zoukankan      html  css  js  c++  java
  • clickhouse

    分区表达式

    按小时分区

    formatDateTime(toDateTime(subString(logTime,1,19)),'%F %H')

    按天分区

    toDate(_cw_raw_time/1000)

    toDate(subString(transDt,1,10))

    单机无副本

    CREATE TABLE upsf_ods_upb.tbl_market_activity_dict_factor (
    `key` String,
     `value` String,
     `_partition_day` Date MATERIALIZED today(),
     `_cw_insert_time` DateTime MATERIALIZED now(),
     `_cw_uuid` UUID MATERIALIZED generateUUIDv4()
    ) ENGINE = MergeTree PARTITION BY _partition_day ORDER BY (key, value) SETTINGS index_granularity = 8192
    -- `_cw_distributed_db`.upsf_ods_upb_tbl_market_activity_dict_factor source
    
    CREATE TABLE _cw_distributed_db.upsf_ods_upb_tbl_market_activity_dict_factor (
    `key` String,
     `value` String,
     `_partition_day` Date MATERIALIZED today(),
     `_cw_insert_time` DateTime MATERIALIZED now(),
     `_cw_uuid` UUID MATERIALIZED generateUUIDv4()
    ) ENGINE = Distributed(test_shard_localhost, upsf_ods_upb, tbl_market_activity_dict_factor, rand());

    集群有副本

    -- shard1.upsf_ods_upb_tbl_market_activity_dict_factor_replica definition
    
    CREATE TABLE shard1.upsf_ods_upb_tbl_market_activity_dict_factor_replica
    (
    
        `key` String,
    
        `value` String,
    
        `_partition_day` Date MATERIALIZED today(),
    
        `_cw_insert_time` DateTime MATERIALIZED now(),
    
        `_cw_uuid` UUID MATERIALIZED generateUUIDv4()
    )
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/upsf_ods_upb/tbl_market_activity_dict_factor',
     'replica1')
    PARTITION BY _partition_day
    ORDER BY (key,
     value)
    SETTINGS index_granularity = 8192;
    -- `_cw_distributed_db`.upsf_ods_upb_tbl_market_activity_dict_factor source
    
    CREATE TABLE _cw_distributed_db.upsf_ods_upb_tbl_market_activity_dict_factor
    (
    
        `key` String,
    
        `value` String,
    
        `_partition_day` Date MATERIALIZED today(),
    
        `_cw_insert_time` DateTime MATERIALIZED now(),
    
        `_cw_uuid` UUID MATERIALIZED generateUUIDv4()
    )
    ENGINE = Distributed('up_ck_cluster',
     '',
     'upsf_ods_upb_tbl_market_activity_dict_factor_replica',
     rand());
    /data/app/clickhouse2/bin/clickhouse-client --multiquery -h 127.0.0.1 --port 19000 --user default --password eb22d98eaf028b1ca5c36548193d604a
    
    
    cd /data/app/clickhouse2/
    
    cd /data/appData/clickhouse2/
    
    select * from system.clusters;
    
    /data/app/clickhouse2/bin/clickhouse-server restart
    
    service clickhouse-server start
    service clickhouse-server stop
    service clickhouse-server restart
    
    
    
    [root@node4 ~]# ps -ef|grep clickhouse
    root      2499     1  0 6月26 ?       00:00:29 /data/app/clickhouse2/bin/clickhouse-server --daemon --config-file=/data/app/clickhouse2/etc/clickhouse-server/config.xml --log-file=/data/logs/clickhouse2/clickhouse-server.log --pid-file=/data/app/clickhouse2/clickhouse2.pid
    root     18940     1  0 10:46 ?        00:00:01 /data/app/clickhouse2/bin/clickhouse-server status
    root     24491 21557  0 10:56 pts/2    00:00:00 grep --color=auto clickhouse
    
    DROP TABLE IF EXISTS `shard1`.`test_tbl_market_activity_dict_hostname_replica`;
    DROP TABLE IF EXISTS `shard2`.`test_tbl_market_activity_dict_hostname_replica`;
    DROP TABLE IF EXISTS `shard3`.`test_tbl_market_activity_dict_hostname_replica`;
    DROP TABLE IF EXISTS `_cw_distributed_db`.`test_tbl_market_activity_log_info_ulsp`;
    DROP TABLE IF EXISTS `test`.`tbl_market_activity_log_info_ulsp`;
    
    -------查看后台进程
    
    show processlist;
    select * from system.processes;
    select user,query_id,query,elapsed,memory_usage from system.processes;
    
    
    
    ---------杀死后台进程
    --  通过上面指令获取到进程相关信息后,可以用query_id条件kill进程
    KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' 
    -- 杀死default用户下的所有进程
    KILL QUERY WHERE user='default'
    
    
    cd /data/app/clickhouse2/etc/clickhouse-server
    ls
    cp config.xml config.xmlbak2 
    vi config.xml 
    <distributed_ddl>
      <!-- Path in ZooKeeper to queue with DDL queries -->
      <path>/clickhouse/task_queue/ddl</path>
      <cleanup_delay_period>60</cleanup_delay_period>
      <task_max_lifetime>86400</task_max_lifetime>
      <max_tasks_in_queue>1000</max_tasks_in_queue>
    </distributed_ddl>
    
    -----
    CREATE TABLE test.tbl_market_activity_log_info_ulsp ON CLUSTER up_ck_cluster 
    (
    `transDt` String,
     `discountId` String,
     `cardNo` String,
     `mobile` String,
     `userId` String,
     `logId` String,
     `transChnl` Nullable(String),
     `_cw_insert_time` DateTime MATERIALIZED now(),
     `_cw_uuid` UUID MATERIALIZED generateUUIDv4()
    )
    ENGINE = ReplicatedMergeTree(up_ck_cluster,test,tbl_market_activity_log_info_instead_ulsp,rand())
    PARTITION BY subString(transDt,1,10)
    ORDER BY (transDt,discountId,cardNo,mobile,userId,logId);
    
    
    SQL 错误 [80]: ClickHouse exception, code: 80, host: 10.0.12.14, port: 18123; Code: 80, e.displayText() = DB::Exception: Seems like cluster is configured for cross-replication, but zookeeper_path for ReplicatedMergeTree is not specified or contains {uuid} macro. It's not supported for cross replication, because tables must have different UUIDs. Please specify unique zookeeper_path explicitly. (version 20.8.12.2)
  • 相关阅读:
    eclipse安装插件最简单的方法!!
    mysql数据库导入sql文件的方法。
    拿起丢掉的东西,才叫做坚持。
    个人总结作业
    黄金点游戏程序注解
    结对项目的完成与体会
    第三周任务学习记录
    vs2013的安装与使用 测试
    习题作业
    对于四则运算编程自己的过程问题
  • 原文地址:https://www.cnblogs.com/tonggc1668/p/14932465.html
Copyright © 2011-2022 走看看