单机部署 ClickHouse 集群

  • 在一台机器上部署 ClickHouse 集群
  • 使用 clickhouse 用户安装部署,安装至 /app/clickhouse 目录下
1
sudo  useradd -m -r  -s /bin/bash -b /app clickhouse

部署 ZooKeeper 集群

部署 jdk

  • 上传 jdk 包
1
2
mkdir -p /app/clickhouse/jdk
tar -zxvf jdk-8u411-linux-x64.tar.gz -C /app/clickhouse/jdk/
  • 配置环境变量
1
2
3
4
5
6
7
8
cat >>~/.bashrc <<EOF
JAVA_HOME=/app/clickhouse/jdk/jdk1.8.0_411
PATH=\$JAVA_HOME/bin:\$PATH 
CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export JAVA_HOME
export CLASSPATH
export PATH
EOF
  • 生效环境变量
1
source ~/.bashrc

zookeeper 配置安装目录

  • 下载 zookeeper 安装包
1
wget https://archive.apache.org/dist/zookeeper/zookeeper-3.8.4/apache-zookeeper-3.8.4-bin.tar.gz
  • 创建 zookepper 安装目录
1
2
mkdir -p /app/clickhouse/zookeeper{1,2,3}
mkdir -p /app/clickhouse/zookeeper{1,2,3}/data
  • 解压缩至安装目录下
1
2
3
tar -zxf apache-zookeeper-3.8.4-bin.tar.gz  -C zookeeper1
tar -zxf apache-zookeeper-3.8.4-bin.tar.gz  -C zookeeper2
tar -zxf apache-zookeeper-3.8.4-bin.tar.gz  -C zookeeper3
  • 建立软连接
1
2
3
4
5
6
cd /app/clickhouse/zookeeper1
ln -sfn apache-zookeeper-3.8.4-bin/ apache-zookeeper
cd /app/clickhouse/zookeeper2
ln -sfn apache-zookeeper-3.8.4-bin/ apache-zookeeper
cd /app/clickhouse/zookeeper3
ln -sfn apache-zookeeper-3.8.4-bin/ apache-zookeeper

修改配置文件

  • 修改 zk1 zoo.cfg 配置文件
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
$ cd /app/clickhouse/zookeeper1/apache-zookeeper/conf/
$ vi zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/app/clickhouse/zookeeper1/data
clientPort=2181

metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
metricsProvider.httpPort=7001
metricsProvider.exportJvmInfo=true

admin.enableServer=true
admin.serverPort=8081

server.1=172.16.1.100:2881:3881
server.2=172.16.1.100:2882:3882
server.3=172.16.1.100:2883:3883
autopurge.purgeInterval=1
autopurge.snapRetainCount=5
  • 修改 zk2 zoo.cfg 配置文件
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
$ cd /app/clickhouse/zookeeper2/apache-zookeeper/conf/
$ vi zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/app/clickhouse/zookeeper2/data
clientPort=2182

metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
metricsProvider.httpPort=7002
metricsProvider.exportJvmInfo=true

admin.enableServer=true
admin.serverPort=8082

server.1=172.16.1.100:2881:3881
server.2=172.16.1.100:2882:3882
server.3=172.16.1.100:2883:3883
autopurge.purgeInterval=1
autopurge.snapRetainCount=5
  • 修改 zk3 zoo.cfg 配置文件
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
$ cd /app/clickhouse/zookeeper3/apache-zookeeper/conf/
$ vi zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/app/clickhouse/zookeeper3/data
clientPort=2183

metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
metricsProvider.httpPort=7003
metricsProvider.exportJvmInfo=true

admin.enableServer=true
admin.serverPort=8083

server.1=172.16.1.100:2881:3881
server.2=172.16.1.100:2882:3882
server.3=172.16.1.100:2883:3883
autopurge.purgeInterval=1
autopurge.snapRetainCount=5
  • 配置 java.env(三个节点均需配置)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
cat << 'EOF' | tee -a /app/clickhouse/zookeeper{1,2,3}/apache-zookeeper/conf//java.env
#!/bin/sh

export JVMFLAGS="-Xms128m \
    -Xmx1g \
    -Xloggc:/var/log/zookeeper/zookeeper-gc.log \
    -XX:+UseGCLogFileRotation \
    -XX:NumberOfGCLogFiles=16 \
    -XX:GCLogFileSize=16M \
    -verbose:gc \
    -XX:+PrintGCTimeStamps \
    -XX:+PrintGCDateStamps \
    -XX:+PrintGCDetails \
    -XX:+PrintTenuringDistribution \
    -XX:+PrintGCApplicationStoppedTime \
    -XX:+PrintGCApplicationConcurrentTime \
    -XX:+PrintSafepointStatistics \
    -XX:+UseG1GC \
    -Djute.maxbuffer=8388608 \
    -XX:MaxGCPauseMillis=50 $JVMFLAGS"
EOF
  • 各节点配置 myid
1
2
3
echo 1 >> /app/clickhouse/zookeeper1/data/myid 
echo 2 >> /app/clickhouse/zookeeper2/data/myid 
echo 3 >> /app/clickhouse/zookeeper3/data/myid 

启动 Zookeeper 集群

1
2
3
/app/clickhouse/zookeeper1/apache-zookeeper/bin/zkServer.sh  start
/app/clickhouse/zookeeper2/apache-zookeeper/bin/zkServer.sh  start
/app/clickhouse/zookeeper3/apache-zookeeper/bin/zkServer.sh  start

查看集群状态

  • 查看集群状态
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
$ /app/clickhouse/zookeeper1/apache-zookeeper/bin/zkServer.sh  status
ZooKeeper JMX enabled by default
Using config: /app/clickhouse/zookeeper1/apache-zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
$ /app/clickhouse/zookeeper2/apache-zookeeper/bin/zkServer.sh  status
ZooKeeper JMX enabled by default
Using config: /app/clickhouse/zookeeper2/apache-zookeeper/bin/../conf/zoo.cfg
Client port found: 2182. Client address: localhost. Client SSL: false.
Mode: leader
$ /app/clickhouse/zookeeper3/apache-zookeeper/bin/zkServer.sh  status
ZooKeeper JMX enabled by default
Using config: /app/clickhouse/zookeeper3/apache-zookeeper/bin/../conf/zoo.cfg
Client port found: 2183. Client address: localhost. Client SSL: false.
Mode: follower
  • 验证
1
$ curl 127.0.0.1:8081/commands/mntr

部署 ClickHouse

配置安装目录

1
mkdir -p /app/clickhouse/clickhouse{1,2,3}

安装 clickhouse

  • 下载并解压
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
    grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
export LATEST_VERSION

case $(uname -m) in
  x86_64) ARCH=amd64 ;;
  aarch64) ARCH=arm64 ;;
  *) echo "Unknown architecture $(uname -m)"; exit 1 ;;
esac

# 下载安装包
for PKG in clickhouse-common-static clickhouse-common-static-dbg clickhouse-server clickhouse-client clickhouse-keeper
do
  curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION-${ARCH}.tgz" \
    || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
done

# 解压
tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"

tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION.tgz"

tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"

tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-client-$LATEST_VERSION.tgz"

# 移动包纸安装目录
for ((i=1;i<=3;i++));do
  CK_HOME="/app/clickhouse/clickhouse$i"

  mkdir -p $CK_HOME/etc/clickhouse-server/config.d  $CK_HOME/etc/clickhouse-server/users.d
  mkdir -p $CK_HOME/etc/clickhouse-client $CK_HOME/log/clickhouse-server
  mkdir -p $CK_HOME/bin $CK_HOME/run $CK_HOME/data/clickhouse

  cp -rf clickhouse-common-static-$LATEST_VERSION/usr/bin $CK_HOME
  cp -rf clickhouse-server-$LATEST_VERSION/usr/bin $CK_HOME
  cp -rf clickhouse-client-$LATEST_VERSION/usr/bin $CK_HOME
  cp -rf clickhouse-server-$LATEST_VERSION/etc/clickhouse-server/* $CK_HOME/etc/clickhouse-server
  cp -rf clickhouse-client-$LATEST_VERSION/etc/clickhouse-client/* $CK_HOME/etc/clickhouse-client

  # 更改 log 和 data 目录
  sed -i "s#/var/log#$CK_HOME/log#g" $CK_HOME/etc/clickhouse-server/config.xml
  sed -i "s#/var/lib#$CK_HOME/data#g" $CK_HOME/etc/clickhouse-server/config.xml
done

更改配置文件

调整服务端口

  • 更改监听地址(所有节点均更改)
1
2
$ vi -O clickhouse[1-3]/etc/clickhouse-server/config.xml
    <listen_host>0.0.0.0</listen_host>
  • ck1/ck2/ck3 更改监听端口
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
$ vi clickhouse1/etc/clickhouse-server/config.xml
    <http_port>8121</http_port>
    <tcp_port>9001</tcp_port>
    <mysql_port>9014</mysql_port>
    <postgresql_port>9015</postgresql_port>
    <interserver_http_port>9019</interserver_http_port>
$ vi clickhouse2/etc/clickhouse-server/config.xml
    <http_port>8122</http_port>
    <tcp_port>9002</tcp_port>
    <mysql_port>9024</mysql_port>
    <postgresql_port>9025</postgresql_port>
    <interserver_http_port>9029</interserver_http_port>
$ vi clickhouse3/etc/clickhouse-server/config.xml
    <http_port>8123</http_port>
    <tcp_port>9003</tcp_port>
    <mysql_port>9034</mysql_port>
    <postgresql_port>9035</postgresql_port>
    <interserver_http_port>9039</interserver_http_port>

配置 zk 信息

  • ck1/ck2/ck3 均更改
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
$ vi -O clickhouse[1-3]/etc/clickhouse-server/config.xml
    <zookeeper>
        <node>
            <host>172.16.1.100</host>
            <port>2181</port>
        </node>
        <node>
            <host>172.16.1.100</host>
            <port>2182</port>
        </node>
        <node>
            <host>172.16.1.100</host>
            <port>2183</port>
        </node>
    </zookeeper>

配置集群信息

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
$ vi -O clickhouse[1-3]/etc/clickhouse-server/config.xml
    <remote_servers>
      <cluster_3S>
        <shard>
          <replica>
              <host>172.16.1.100</host>
              <port>9001</port>
          </replica>
        </shard>
        <shard>
          <replica>
              <host>172.16.1.100</host>
              <port>9002</port>
          </replica>
        </shard>
        <shard>
          <replica>
              <host>172.16.1.100</host>
              <port>9003</port>
          </replica>
        </shard>
      </cluster_3S>
    </remote_servers>

启动服务

1
2
3
clickhouse1/bin/clickhouse-server --config-file=/app/clickhouse/clickhouse1/etc/clickhouse-server/config.xml --daemon
clickhouse2/bin/clickhouse-server --config-file=/app/clickhouse/clickhouse2/etc/clickhouse-server/config.xml --daemon
clickhouse3/bin/clickhouse-server --config-file=/app/clickhouse/clickhouse3/etc/clickhouse-server/config.xml --daemon

验证

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
$  ./clickhouse1/bin/clickhouse-client  -h 172.16.1.100 --port 9001
feilong-test01 :) show clusters;
┌─cluster────┐
│ cluster_3S │
└────────────┘
feilong-test01 :) SELECT cluster,shard_num,host_address,port FROM system.clusters WHERE cluster = 'cluster_3S' ;
┌─cluster────┬─shard_num─┬─host_address───┬─port─┐
│ cluster_3S │         1 │ 172.16.1.100   │ 9001 │
│ cluster_3S │         2 │ 172.16.1.100   │ 9002 │
│ cluster_3S │         3 │ 172.16.1.100   │ 9003 │
└────────────┴───────────┴────────────────┴──────┘

ClickHouse 部署后调整 default 用户权限

  • default 用户由于默认权限过大,需进行的配置如下:

    • default 用户设置密码
    • 限制仅允许本地访问
    • 开启SQL驱动方式的访问权限和账户管理
    • 创建管理员账号后
    • 回收 default 用户权限
  • default 用户设置密码

    • 明文或者 sha256 加密二选一,仅配置一种即可(注意,默认的为空的配置要注释掉)
    • sha256 加密串生成在文末补充部分说明
1
2
            <password>123456</password>
            <password_sha256_hex>bd9734cd2335507ead689b8a61f75757bd00331f349d3296c96fc69cd562c89a</password_sha256_hex>
  • 当前版本 default 用户默认开启了 SQL驱动方式管理账户,如果没有开启,更改 users.xml后重启
1
            <access_management>1</access_management>
  • 新增其他配置
1
2
3
            <named_collection_control>1</named_collection_control>
            <show_named_collections>1</show_named_collections>
            <show_named_collections_secrets>1</show_named_collections_secrets>
  • 创建管理员账户
1
2
3
4
5
-- 创建用户
create user ckdba on cluster cluster_3S IDENTIFIED WITH SHA256_HASH BY 'bd9734cd2335507ead689b8a61f75757bd00331f349d3296c96fc69cd562c89a';
-- 赋权(可通过 show grants for default 查看)
GRANT on cluster cluster_3S SHOW, SELECT, INSERT, ALTER, CREATE, DROP, UNDROP TABLE, TRUNCATE, OPTIMIZE, BACKUP, KILL QUERY, KILL TRANSACTION, MOVE PARTITION BETWEEN SHARDS, ACCESS MANAGEMENT, SYSTEM, dictGet, displaySecretsInShowAndSelect, INTROSPECTION, SOURCES, CLUSTER ON *.* TO ckdba WITH GRANT OPTION;
GRANT on cluster cluster_3S ALTER NAMED COLLECTION, CREATE NAMED COLLECTION, DROP NAMED COLLECTION, SHOW NAMED COLLECTIONS, NAMED COLLECTION ON * TO ckdba WITH GRANT OPTION;

补充

生成 sha256 加密串

1
2
3
4
5
$ echo 'Ckman123456!'; echo -n 'Ckman123456!' | sha256sum | tr -d '-'
# 密码
Ckman123456!
# 密码加密后的串
bd9734cd2335507ead689b8a61f75757bd00331f349d3296c96fc69cd562c89a

参考