# 手动部署

客户端连接clickhouse集群的写法与redis差不多,需要把所有节点的地址和端口都写在配置中。

# 环境准备

# 官方安装文档

https://clickhouse.com/docs/en/install#available-installation-options https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#clickhouse-keeper-user-guide

# 官方下载地址

https://packages.clickhouse.com/tgz/

# 服务器资源

1740068264518.png

# 系统优化

https://clickhouse.com/docs/zh/operations/requirements

# 获取官方最新稳定版的bash脚本

get-clickhouse-latest.sh

LATEST_VERSION=$(curl -s https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv | \
    grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
export LATEST_VERSION

#case $(uname -m) in
#  x86_64) ARCH=amd64 ;;
#  aarch64) ARCH=arm64 ;;
#  *) echo "Unknown architecture $(uname -m)"; exit 1 ;;
#esac

#archList=(amd64 arm64)
archList=(arm64)
for ARCH in ${archList[@]}; do
    for PKG in clickhouse-common-static clickhouse-server clickhouse-client clickhouse-keeper; do
      curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION-${ARCH}.tgz" \
        || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
    done
done

检查系统是否支持SSE 4.2

grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported"
# 不禁用overcommit
echo 0 | tee /proc/sys/vm/overcommit_memory

# 始终禁用透明大页(transparent huge pages)。 它会干扰内存分配器,从而导致显着的性能下降。
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.d/rc.local
echo 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' >> /etc/rc.d/rc.local
chmod +x /etc/rc.d/rc.local

# 禁用swap
swapoff -a
echo "vm.swappiness = 0" >> /etc/sysctl.conf
sysctl -p

vi /etc/fstab # 注释swap那一行

# 设置主机名映射
sed -i '/^192.168.100/d;/^0.0.0/d' /etc/hosts
cat >> /etc/hosts << 'EOF'
192.168.100.181 v8-ch-1
192.168.100.182 v8-ch-2
192.168.100.183 v8-ch-3
EOF

# 安装JDK或OpenJDK

如果使用独立的 zookeeper 则需要安装JDK,本文基于 clickhouse-keeper 进行部署,已自带 JDK 无需再次安装。

# 使用系统自带的 openjdk
dnf install -y java-11-openjdk

# 安装额外工具
dnf install -y nc lsof

# 安装ClickHouse

# RPM方式-在线安装

yum install -y yum-utils
yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo

yum install -y clickhouse-keeper clickhouse-server clickhouse-client

# 创建数据目录
mkdir -p /data/clickhouse/lib
# 创建日志目录
mkdir -p /data/clickhouse/log
# 授权
chown -R clickhouse:clickhouse /data/clickhouse
chmod 775 /data/clickhouse

# 官方建议在生产环境中采用独立服务器部署并使用 clickhouse-keeper 代替 zookeeper
# clickhouse 已内置 clickhouse-keeper 非独立部署时 无需额外安装
# clickhouse-keeper 要求 clickhouse 21.8+
# yum install -y clickhouse-keeper

# RPM方式-离线安装

# https://packages.clickhouse.com

# 新版本已经自带 clickhouse-keeper 无需再下载 clickhouse-keeper-23.9.1.1854.x86_64.rpm
wget https://packages.clickhouse.com/rpm/stable/clickhouse-client-23.9.1.1854.x86_64.rpm
wget https://packages.clickhouse.com/rpm/stable/clickhouse-common-static-23.9.1.1854.x86_64.rpm
wget https://packages.clickhouse.com/rpm/stable/clickhouse-server-23.9.1.1854.x86_64.rpm
# wget https://packages.clickhouse.com/rpm/stable/clickhouse-common-static-dbg-23.9.1.1854.x86_64.rpm

rpm -ivh clickhouse-common-static-23.9.1.1854.x86_64.rpm
rpm -ivh clickhouse-client-23.9.1.1854.x86_64.rpm
rpm -ivh clickhouse-server-23.9.1.1854.x86_64.rpm

systemctl enable clickhouse-server
systemctl start clickhouse-server
systemctl status clickhouse-server
systemctl restart clickhouse-server
clickhouse-client # or "clickhouse-client --password" if you set up a password.

# RPM方式-调整config.xml

# 配置文件调整 - config.xml
# 备份原始配置文件
cp /etc/clickhouse-server/users.xml ~
cp /etc/clickhouse-server/config.xml ~

# 更改目录配置
## 权限更改
chmod 666 /etc/clickhouse-server/config.xml
chmod 666 /etc/clickhouse-server/users.xml
## 日志目录替换
sed -i 's?/var/log/clickhouse-server?/data/clickhouse/log?g' /etc/clickhouse-server/config.xml
## 数据目录替换
sed -i 's?/var/lib/clickhouse?/data/clickhouse/lib?g' /etc/clickhouse-server/config.xml

# 按需手动调整以下参数
# background_pool_size 默认16,可以调整到CPU个数的2倍
grep -n background_pool_size /etc/clickhouse-server/config.xml
# max_concurrent_queries 默认100,可以调整到200或者300
grep -n max_concurrent_queries /etc/clickhouse-server/config.xml
# 设置 0.0.0.0
grep -n listen_host /etc/clickhouse-server/config.xml
# 设置 0.0.0.0
grep -n interserver_listen_host /etc/clickhouse-server/config.xml
# 设置 Asia/Shanghai
grep -n timezone /etc/clickhouse-server/config.xml
# 以上参数请手动调整

# 保存时 wq!
vim /etc/clickhouse-server/config.xml
#    <listen_host>0.0.0.0</listen_host>
#    <interserver_listen_host>0.0.0.0</interserver_listen_host>

# RPM方式-调整users.xml

所有节点相同配置

# 配置文件调整 - users.xml
# 生成随机密码
PASSWORD=$(base64 < /dev/urandom | head -c12); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
# 明文密码:NWsY27FSKbPi
# 密文密码:ccc3f1152ffe4f3ea1807e204a18e5fbc078b800ed6b5a19a78f4747d0046f45

# 也可以指定密码 我这里使用 i4Seeyon
# echo -n "i4Seeyon" | sha256sum | tr -d '-'
# 0814c1a2c87f56c848dbfcc886d9bc915e4e944183836d41d255bd83b79b3941

vim /etc/clickhouse-server/users.xml
            <!-- <password></password> -->
            <password_sha256_hex>0814c1a2c87f56c848dbfcc886d9bc915e4e944183836d41d255bd83b79b3941</password_sha256_hex>
# 使用 :wq! 保存

# RPM方式-服务管理

# clickhouse-server
systemctl enable clickhouse-server
systemctl start clickhouse-server
systemctl status clickhouse-server
clickhouse-client # or "clickhouse-client --password" if you set up a password.
systemctl restart clickhouse-server

# TGZ方式-在线安装

LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
    grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
export LATEST_VERSION

case $(uname -m) in
  x86_64) ARCH=amd64 ;;
  aarch64) ARCH=arm64 ;;
  *) echo "Unknown architecture $(uname -m)"; exit 1 ;;
esac

for PKG in clickhouse-common-static clickhouse-common-static-dbg clickhouse-server clickhouse-client clickhouse-keeper
do
  curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION-${ARCH}.tgz" \
    || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
done

tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"
	"clickhouse-common-static-$LATEST_VERSION/install/doinst.sh"

tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION.tgz"
  "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh"

tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"
  "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure
  /etc/init.d/clickhouse-server start

tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \
  || tar -xzvf "clickhouse-client-$LATEST_VERSION.tgz"
	"clickhouse-client-$LATEST_VERSION/install/doinst.sh"

# RPM方式-创建clickhouse-keeper.xml

https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#clickhouse-keeper-user-guide 所有集群节点创建配置文件 clickhouse-keeper.xml。每个节点的 <server_id>1</server_id> 保持唯一。

cat > /etc/clickhouse-server/config.d/clickhouse-keeper.xml << 'EOF'
<clickhouse>
<keeper_server>
    <tcp_port>9181</tcp_port>
    <server_id>1</server_id>
    <log_storage_path>/data/clickhouse/lib/coordination/log</log_storage_path>
    <snapshot_storage_path>/data/clickhouse/lib/coordination/snapshots</snapshot_storage_path>

    <coordination_settings>
        <operation_timeout_ms>10000</operation_timeout_ms>
        <session_timeout_ms>30000</session_timeout_ms>
        <raft_logs_level>warning</raft_logs_level>
    </coordination_settings>

    <raft_configuration>
        <server>
            <id>1</id>
            <hostname>my-db01</hostname>
            <port>9444</port>
        </server>
        <server>
            <id>2</id>
            <hostname>my-db02</hostname>
            <port>9444</port>
        </server>
        <server>
            <id>3</id>
            <hostname>my-db03</hostname>
            <port>9444</port>
        </server>
    </raft_configuration>
</keeper_server>
      <zookeeper>
        <node>
            <host>my-db01</host>
            <port>9181</port>
        </node>
        <node>
            <host>my-db02</host>
            <port>9181</port>
        </node>
        <node>
            <host>my-db03</host>
            <port>9181</port>
        </node>
    </zookeeper>
</clickhouse>
EOF

chown clickhouse:clickhouse /etc/clickhouse-server/config.d/clickhouse-keeper.xml

# 集群验证

在官方文档:https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#clickhouse-keeper-user-guide 中有很多示例参考。

echo ruok | nc localhost 9181; echo

clickhouse-client --user default --password i4Seeyon --query "SELECT * FROM system.clusters"

clickhouse-client
select * from system.zookeeper WHERE path IN ('/', '/clickhouse');
select * from system.clusters;

# 创建数据库 默认引擎是Ordinary
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster_name];
CREATE DATABASE IF NOT EXISTS seeyon_prod ON CLUSTER my_cluster;

clickhouse-client --user default --password i4Seeyon --query "show databases"


# 不同的数据库之间复制表结构
create table if not exists new_db.table_name as default.table_name engine = tinylog
# 查看集群状态
select * from system.clusters;
# 查看分区信息
select partition_id,name,table,database,disk_name from system.parts;
# 查看远端zookeeper
select name,value,czxid,mzxid from system.zookeeper where path='/clickhouse'
# 查看自身节点的宏变量
select * from system.macros
# 删除数据
alter table ck.point_data_replica DELETE WHERE point_code='EC01'


clickhouse-client --host 172.18.2.72 --port 9000 --user default --password i4Seeyon --query "SELECT * FROM system.clusters"
# 使用 clickhouse-client 登录任意节点之后执行
show clusters;

浏览器访问 http://192.168.100.181:8123 http://192.168.100.181:8123/play 右上角那输入认证密码,输入框内输入查询语句。

# 前端代理

# chproxy

Chproxy 是 ClickHouse 的 HTTP 代理和负载平衡器。

https://www.chproxy.org
https://github.com/ContentSquare/chproxy

# nginx

upstream clickhouse-cluster-web-ui{
    server 192.168.100.181:8123 weight=1;
    server 192.168.100.182:8123 weight=1;
    server 192.168.100.183:8123 weight=1;
}
server{
    listen 8123;
    proxy_pass clickhouse-cluster-web-ui;
}

upstream clickhouse-cluster-app-client{
    server 192.168.100.181:9000 weight=1;
    server 192.168.100.182:9000 weight=1;
    server 192.168.100.183:9000 weight=1;
}
server{
    listen 9000;
    proxy_pass clickhouse-cluster-app-client;
}

# 端口说明

8123 端口:这是 ClickHouse 的 HTTP 端口,用于提供基于 HTTP 的查询接口。通过该端口可以使用 HTTP 请求与 ClickHouse 服务器进行交互执行查询、获取查询结果等操作。

9000 端口:这是 ClickHouse 的默认服务器端口,用于客户端与 ClickHouse 服务器进行通信。客户端应用程序可以通过该端口连接到 ClickHouse 服务器,并执行查询、插入和更新等数据库操作。

9004 端口:这是 ClickHouse 的分布式表引擎(Distributed Table Engine)使用的端口。当 ClickHouse 使用分布式表引擎进行数据分片和分布式查询时,节点之间会通过该端口进行通信。

9005 端口:这是 ClickHouse 的分布式表引擎(Distributed Table Engine)使用的备份(Replica)端口。当 ClickHouse 使用分布式表引擎进行数据备份和冗余存储时,节点之间会通过该端口进行数据同步和复制。

9009 端口:这是 ClickHouse 的远程服务器管理(Remote Server Management)端口。通过该端口,可以使用 ClickHouse 客户端工具(如 clickhouse-client)远程管理 ClickHouse 服务器,包括执行管理命令、配置修改等操作。

这些端口号是 ClickHouse 的默认配置,可以根据需要进行自定义配置。在实际部署中,还可能使用其他端口用于特定的功能或组件。

# 常用命令

# 查看版本号
clickhouse-client --version

# 卸载

https://blog.51cto.com/u_11529070/9174782

#1、查看已安装包
rpm -qa | grep clickhouse
#2、卸载clickhouse相关软件
#1)可以yum卸载
sudo yum remove -y clickhouse-client.noarch
sudo yum remove -y clickhouse-common-static.x86_64
sudo yum remove -y clickhouse-server.noarch
#2)或rpm 卸载
rpm -e clickhouse-client-20.5.4.40-2.noarch --nodeps 
rpm -e clickhouse-server-20.5.4.40-2.noarch --nodeps 
rpm -e clickhouse-common-static-20.5.4.40-2.x86_64 --nodeps
#3、删除相关的目录和数据
#数据目录
rm -rf /var/lib/clickhouse
#删除集群配置文件
rm -rf /etc/metrika.xml
#删除配置文件
rm -rf /etc/clickhouse-*
#删除日志文件
rm -rf /var/log/clickhouse-server
#4、全局查找clickhouse文件和目录,如果存在,则全部删除
find / -name clickhouse

20240424-脚本调试

mkdir -p /seeyon/scripts && cd /seeyon/scripts
tar -xf software/amd64/clickhouse-client-24.3.2.23-amd64.tgz
tar -xf software/amd64/clickhouse-common-static-24.3.2.23-amd64.tgz
tar -xf software/amd64/clickhouse-server-24.3.2.23-amd64.tgz

groupadd clickhouse; useradd -r -g clickhouse -s /sbin/nologin -M clickhouse
mkdir -p /data/{clickhouse,keeper}/{data,logs}
# mkdir -p /apps/clickhouse
chown -R clickhouse:clickhouse /data/{clickhouse,keeper}
# chown -R clickhouse:clickhouse /apps/clickhouse

dnf install -y expect

# 定义安装目录
# installDir="/apps/clickhouse"
# 定义bin目录
# binDir="${installDir}/bin"
# 定义配置目录 - clickhouse
# etcDirC="${installDir}/etc/clickhouse-server"
# 定义配置目录 - keeper
# etcDirK="${installDir}/etc/clickhouse-keeper"

# 定义数据目录 - clickhouse
dataDirC="/data/clickhouse/data"
# 定义数据目录 - keeper
dataDirK="/data/keeper/data"
# 定义日志目录 - clickhouse
logDirC="/data/clickhouse/logs"
# 定义日志目录 - keeper
logDirK="/data/keeper/logs"

# 修改 doinst.sh 文件
# 修改 ch-etc 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
# sed -i "s|CLICKHOUSE_CONFDIR=\${CLICKHOUSE_CONFDIR:-/etc/clickhouse-server}|CLICKHOUSE_CONFDIR=\${CLICKHOUSE_CONFDIR:-${etcDirC}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 ch-bin 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
# sed -i "s|CLICKHOUSE_BINDIR=\${CLICKHOUSE_BINDIR:-/usr/bin}|CLICKHOUSE_BINDIR=\${CLICKHOUSE_BINDIR:-${binDir}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 ch-data 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
sed -i "s|CLICKHOUSE_DATADIR=\${CLICKHOUSE_DATADIR:-/var/lib/clickhouse}|CLICKHOUSE_DATADIR=\${CLICKHOUSE_DATADIR:-${dataDirC}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 ch-log 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
sed -i "s|CLICKHOUSE_LOGDIR=\${CLICKHOUSE_LOGDIR:-/var/log/clickhouse-server}|CLICKHOUSE_LOGDIR=\${CLICKHOUSE_LOGDIR:-${logDirC}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 keeper-etc 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
# sed -i "s|KEEPER_CONFDIR=\${KEEPER_CONFDIR:-/etc/clickhouse-keeper}|KEEPER_CONFDIR=\${KEEPER_CONFDIR:-${etcDirK}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 keeper-data 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
sed -i "s|KEEPER_DATADIR=\${KEEPER_DATADIR:-/var/lib/clickhouse}|KEEPER_DATADIR=\${KEEPER_DATADIR:-${dataDirK}}|g" clickhouse-server-24.3.2.23/install/doinst.sh
# 修改 keeper-log 目录 - clickhouse-server-24.3.2.23/install/doinst.sh
sed -i "s|KEEPER_LOGDIR=\${KEEPER_LOGDIR:-/var/log/clickhouse-keeper}|KEEPER_LOGDIR=\${KEEPER_LOGDIR:-${logDirK}}|g" clickhouse-server-24.3.2.23/install/doinst.sh

# 权限更改
# chmod 666 /etc/clickhouse-server/config.xml
# chmod 666 /etc/clickhouse-server/users.xml

# chmod 666 clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# chmod 666 clickhouse-server-24.3.2.23/etc/clickhouse-server/users.xml

# 修改 config.xml 文件
# 数据目录替换
sed -i "s|/var/lib/clickhouse|${dataDirC}|g" clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# 日志目录替换 - clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
sed -i "s|/var/log/clickhouse-server|${logDirC}|g" clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# 修改通讯接口监听地址 :: 表示同时监听所有可用的ipv6和ipv4地址 0.0.0.0 表示只监听ipv4
sed -i 's|<!-- <listen_host>::</listen_host> -->|<listen_host>::</listen_host>|g' clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# 修改集群内部通讯接口 :: 表示同时监听所有可用的ipv6和ipv4地址 0.0.0.0 表示只监听ipv4
sed -i 's|<!-- <interserver_listen_host>::</interserver_listen_host> -->|<interserver_listen_host>::</interserver_listen_host>|g' clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# 修改默认时区
# 设置 Asia/Shanghai
tz="Asia/Shanghai"
sed -i 's|<!-- <timezone>UTC</timezone> -->|<timezone>'"${tz}"'</timezone>|g' clickhouse-server-24.3.2.23/etc/clickhouse-server/config.xml
# 修改 users.xml 文件
sha256str="0814c1a2c87f56c848dbfcc886d9bc915e4e944183836d41d255bd83b79b3941"
sed -i 's|<password></password>|<password_sha256_hex>'"${sha256str}"'</password_sha256_hex>|g' clickhouse-server-24.3.2.23/etc/clickhouse-server/users.xml 

# 修改 keeper_config.xml 文件
# 数据目录替换 - keeper - clickhouse-keeper/keeper_config.xml
sed -i "s|/var/lib/clickhouse|${dataDirK}|g" clickhouse-server-24.3.2.23/etc/clickhouse-keeper/keeper_config.xml
# 日志目录替换 - keeper - clickhouse-keeper/keeper_config.xml
sed -i "s|/var/log/clickhouse-keeper|${logDirK}|g" clickhouse-server-24.3.2.23/etc/clickhouse-keeper/keeper_config.xml


# 执行安装
clickhouse-common-static-24.3.2.23/install/doinst.sh 
clickhouse-client-24.3.2.23/install/doinst.sh 
# clickhouse-server-24.3.2.23/install/doinst.sh

chPass="i4Seeyon"
expect<<-END
spawn clickhouse-server-24.3.2.23/install/doinst.sh
set timeout -1
while 1 {
    expect {
        "*Allow server to accept connections from the network*" {
            send "y\n"
        }
        "*Enter password for the default user*" {
            send "${chPass}\n"
        }
        eof {
            break
        }
    }
}
exit
END

systemctl enable --now clickhouse-server
select * from system.clusters;

create database if not exists v8prod;
create database if not exists v8dev;
create database if not exists v8test;

show databases;

drop database if exists v8prod;
drop database if exists v8dev;
drop database if exists v8test;

# dbeaver所需ch驱动

https://github.com/ClickHouse/clickhouse-java

在只有三个节点的场景下,建议的分片和副本配置取决于您的具体需求和可用资源。以下是一些常见的配置建议:

高可用性优先: 如果高可用性是您的首要考虑,您可能会选择将每个节点作为一个独立的分片,并且为每个分片配置两个副本。这样,每个节点将包含两个分片的数据,其中一个分片是主分片,另一个是另一个节点的主分片的副本。这种配置可以在单个节点故障时保持数据的可用性。
分片:3(每个节点一个分片)
副本:2(每个分片两个副本)

性能优先: 如果您的主要目标是提高查询性能,并且不介意在单个节点故障时可能会出现的服务中断,您可以选择将所有三个节点作为同一个分片的副本。这样,所有节点都可以参与查询处理,从而提高性能。
分片:1(所有节点包含相同的数据)
副本:3(每个节点一个副本)

平衡配置: 如果您希望在可用性和性能之间找到一个平衡点,您可以选择将数据分为两个分片,每个分片有三个副本。这样,每个节点将包含两个分片的数据,同时保持了较高的数据冗余和可用性。
分片:2(数据分为两个部分)
副本:3(每个分片三个副本,总共六个副本分布在三个节点上)

在实际应用中,您还需要考虑数据的写入和查询模式、网络带宽、存储容量和I/O性能等因素。通常,最佳配置需要根据具体情况进行调整和优化。此外,ClickHouse集群的配置是可以随时调整的,因此您可以根据实际需求和系统负载的变化来修改分片和副本的数量。
编撰人:yangfc