mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-11 06:59:14 +08:00
feat(distributed): create single docker-compose.yaml and CH configuration (#1803)
* feat: setup for distributed clickhouse Signed-off-by: Prashant Shahi <prashant@signoz.io> Co-authored-by: Ankit Nayan <ankit@signoz.io>
This commit is contained in:
parent
7f77bcca2b
commit
88fa3b7699
4
Makefile
4
Makefile
@ -127,8 +127,8 @@ down-x86:
|
|||||||
|
|
||||||
clear-standalone-data:
|
clear-standalone-data:
|
||||||
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
@docker run --rm -v "$(PWD)/$(STANDALONE_DIRECTORY)/data:/pwd" busybox \
|
||||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
|
||||||
|
|
||||||
clear-swarm-data:
|
clear-swarm-data:
|
||||||
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
@docker run --rm -v "$(PWD)/$(SWARM_DIRECTORY)/data:/pwd" busybox \
|
||||||
sh -c "cd /pwd && rm -rf alertmanager/* clickhouse/* signoz/*"
|
sh -c "cd /pwd && rm -rf alertmanager/* clickhous*/* signoz/* zookeeper-*/*"
|
||||||
|
@ -236,8 +236,8 @@
|
|||||||
<openSSL>
|
<openSSL>
|
||||||
<server> <!-- Used for https server AND secure tcp port -->
|
<server> <!-- Used for https server AND secure tcp port -->
|
||||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
|
||||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
|
||||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||||
To generate dhparams, use the following command:
|
To generate dhparams, use the following command:
|
||||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||||
@ -618,148 +618,6 @@
|
|||||||
</jdbc_bridge>
|
</jdbc_bridge>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
|
||||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
|
||||||
-->
|
|
||||||
<remote_servers>
|
|
||||||
<!-- Test only shard config for testing distributed storage -->
|
|
||||||
<test_shard_localhost>
|
|
||||||
<!-- Inter-server per-cluster secret for Distributed queries
|
|
||||||
default: no secret (no authentication will be performed)
|
|
||||||
|
|
||||||
If set, then Distributed queries will be validated on shards, so at least:
|
|
||||||
- such cluster should exist on the shard,
|
|
||||||
- such cluster should have the same secret.
|
|
||||||
|
|
||||||
And also (and which is more important), the initial_user will
|
|
||||||
be used as current user for the query.
|
|
||||||
|
|
||||||
Right now the protocol is pretty simple and it only takes into account:
|
|
||||||
- cluster name
|
|
||||||
- query
|
|
||||||
|
|
||||||
Also it will be nice if the following will be implemented:
|
|
||||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
|
||||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
|
||||||
- target hostname / ip address (same notes as for source hostname)
|
|
||||||
- time-based security tokens
|
|
||||||
-->
|
|
||||||
<!-- <secret></secret> -->
|
|
||||||
|
|
||||||
<shard>
|
|
||||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
|
||||||
<!-- <internal_replication>false</internal_replication> -->
|
|
||||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
|
||||||
<!-- <weight>1</weight> -->
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
|
||||||
<!-- <priority>1</priority> -->
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost>
|
|
||||||
<test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<!--shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard-->
|
|
||||||
</test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
If this section is not present in configuration, all hosts are allowed.
|
If this section is not present in configuration, all hosts are allowed.
|
||||||
-->
|
-->
|
||||||
@ -786,29 +644,6 @@
|
|||||||
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
|
||||||
|
|
||||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
<zookeeper>
|
|
||||||
<node>
|
|
||||||
<host>example1</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example2</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example3</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
</zookeeper>
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Substitutions for parameters of replicated tables.
|
<!-- Substitutions for parameters of replicated tables.
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
Optional. If you don't use replicated tables, you could omit that.
|
||||||
|
|
||||||
|
@ -1,30 +1,133 @@
|
|||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||||
|
tty: true
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-clickhouse-depend: &clickhouse-depend
|
||||||
|
depends_on:
|
||||||
|
- clickhouse
|
||||||
|
# - clickhouse-2
|
||||||
|
# - clickhouse-3
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.0
|
||||||
|
container_name: zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-2:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: zookeeper-2
|
||||||
|
# hostname: zookeeper-2
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2182:2181"
|
||||||
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: zookeeper-3
|
||||||
|
# hostname: zookeeper-3
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
<<: *clickhouse-defaults
|
||||||
|
container_name: clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
# ports:
|
# ports:
|
||||||
# - "9000:9000"
|
# - "9000:9000"
|
||||||
# - "8123:8123"
|
# - "8123:8123"
|
||||||
tty: true
|
# - "9181:9181"
|
||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
deploy:
|
|
||||||
restart_policy:
|
# clickhouse-2:
|
||||||
condition: on-failure
|
# <<: *clickhouse-defaults
|
||||||
logging:
|
# container_name: clickhouse-2
|
||||||
options:
|
# hostname: clickhouse-2
|
||||||
max-size: 50m
|
# ports:
|
||||||
max-file: "3"
|
# - "9001:9000"
|
||||||
healthcheck:
|
# - "8124:8123"
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# - "9182:9181"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
# volumes:
|
||||||
interval: 30s
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
timeout: 5s
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
retries: 3
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.0-0.2
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
@ -66,8 +169,7 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
- clickhouse
|
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.11.4
|
image: signoz/frontend:0.11.4
|
||||||
@ -107,8 +209,7 @@ services:
|
|||||||
mode: global
|
mode: global
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
- clickhouse
|
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/signoz-otel-collector:0.63.0
|
image: signoz/signoz-otel-collector:0.63.0
|
||||||
@ -123,8 +224,7 @@ services:
|
|||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
- clickhouse
|
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
@ -236,8 +236,8 @@
|
|||||||
<openSSL>
|
<openSSL>
|
||||||
<server> <!-- Used for https server AND secure tcp port -->
|
<server> <!-- Used for https server AND secure tcp port -->
|
||||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
|
||||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
|
||||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||||
To generate dhparams, use the following command:
|
To generate dhparams, use the following command:
|
||||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||||
@ -618,148 +618,6 @@
|
|||||||
</jdbc_bridge>
|
</jdbc_bridge>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
|
||||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
|
||||||
-->
|
|
||||||
<remote_servers>
|
|
||||||
<!-- Test only shard config for testing distributed storage -->
|
|
||||||
<test_shard_localhost>
|
|
||||||
<!-- Inter-server per-cluster secret for Distributed queries
|
|
||||||
default: no secret (no authentication will be performed)
|
|
||||||
|
|
||||||
If set, then Distributed queries will be validated on shards, so at least:
|
|
||||||
- such cluster should exist on the shard,
|
|
||||||
- such cluster should have the same secret.
|
|
||||||
|
|
||||||
And also (and which is more important), the initial_user will
|
|
||||||
be used as current user for the query.
|
|
||||||
|
|
||||||
Right now the protocol is pretty simple and it only takes into account:
|
|
||||||
- cluster name
|
|
||||||
- query
|
|
||||||
|
|
||||||
Also it will be nice if the following will be implemented:
|
|
||||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
|
||||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
|
||||||
- target hostname / ip address (same notes as for source hostname)
|
|
||||||
- time-based security tokens
|
|
||||||
-->
|
|
||||||
<!-- <secret></secret> -->
|
|
||||||
|
|
||||||
<shard>
|
|
||||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
|
||||||
<!-- <internal_replication>false</internal_replication> -->
|
|
||||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
|
||||||
<!-- <weight>1</weight> -->
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
|
||||||
<!-- <priority>1</priority> -->
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost>
|
|
||||||
<test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<!--shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard-->
|
|
||||||
</test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
If this section is not present in configuration, all hosts are allowed.
|
If this section is not present in configuration, all hosts are allowed.
|
||||||
-->
|
-->
|
||||||
@ -786,29 +644,6 @@
|
|||||||
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
|
||||||
|
|
||||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
<zookeeper>
|
|
||||||
<node>
|
|
||||||
<host>example1</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example2</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example3</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
</zookeeper>
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Substitutions for parameters of replicated tables.
|
<!-- Substitutions for parameters of replicated tables.
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
Optional. If you don't use replicated tables, you could omit that.
|
||||||
|
|
||||||
|
250
deploy/docker/clickhouse-setup/docker-compose-distributed.yaml
Normal file
250
deploy/docker/clickhouse-setup/docker-compose-distributed.yaml
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
version: "2.4"
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
- zookeeper-2
|
||||||
|
- zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-clickhouse-depend: &clickhouse-depend
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
clickhouse-2:
|
||||||
|
condition: service_healthy
|
||||||
|
clickhouse-3:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.0
|
||||||
|
container_name: zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
- ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
zookeeper-2:
|
||||||
|
image: bitnami/zookeeper:3.7.0
|
||||||
|
container_name: zookeeper-2
|
||||||
|
hostname: zookeeper-2
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2182:2181"
|
||||||
|
- "2889:2888"
|
||||||
|
- "3889:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=2
|
||||||
|
- ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
zookeeper-3:
|
||||||
|
image: bitnami/zookeeper:3.7.0
|
||||||
|
container_name: zookeeper-3
|
||||||
|
hostname: zookeeper-3
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2183:2181"
|
||||||
|
- "2890:2888"
|
||||||
|
- "3890:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=3
|
||||||
|
- ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
clickhouse:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-distributed.xml:/etc/clickhouse-server/config.d/distributed.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
|
||||||
|
clickhouse-2:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: clickhouse-2
|
||||||
|
hostname: clickhouse-2
|
||||||
|
ports:
|
||||||
|
- "9001:9000"
|
||||||
|
- "8124:8123"
|
||||||
|
- "9182:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-distributed.xml:/etc/clickhouse-server/config.d/distributed.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
clickhouse-3:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: clickhouse-3
|
||||||
|
hostname: clickhouse-3
|
||||||
|
ports:
|
||||||
|
- "9002:9000"
|
||||||
|
- "8125:8123"
|
||||||
|
- "9183:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-distributed.xml:/etc/clickhouse-server/config.d/distributed.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
alertmanager:
|
||||||
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
|
depends_on:
|
||||||
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: on-failure
|
||||||
|
command:
|
||||||
|
- --queryService.url=http://query-service:8085
|
||||||
|
- --storage.path=/data
|
||||||
|
|
||||||
|
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
|
||||||
|
|
||||||
|
query-service:
|
||||||
|
image: signoz/query-service:distributed-test-1
|
||||||
|
container_name: query-service
|
||||||
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
|
# ports:
|
||||||
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
|
- ../dashboards:/root/config/dashboards
|
||||||
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
|
environment:
|
||||||
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
|
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
|
||||||
|
- DASHBOARDS_PATH=/root/config/dashboards
|
||||||
|
- STORAGE=clickhouse
|
||||||
|
- GODEBUG=netdns=go
|
||||||
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
<<: *clickhouse-depend
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
image: signoz/frontend:distributed-test-1
|
||||||
|
container_name: frontend
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- alertmanager
|
||||||
|
- query-service
|
||||||
|
ports:
|
||||||
|
- "3301:3301"
|
||||||
|
volumes:
|
||||||
|
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
|
||||||
|
otel-collector:
|
||||||
|
image: signoz/signoz-otel-collector:distributed-test-1
|
||||||
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
|
# required for reading docker container logs
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
|
environment:
|
||||||
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
|
ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
<<: *clickhouse-depend
|
||||||
|
|
||||||
|
otel-collector-metrics:
|
||||||
|
image: signoz/signoz-otel-collector:distributed-test-1
|
||||||
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
|
volumes:
|
||||||
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
|
# ports:
|
||||||
|
# - "1777:1777" # pprof extension
|
||||||
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "13133:13133" # Health check extension
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
<<: *clickhouse-depend
|
||||||
|
|
||||||
|
hotrod:
|
||||||
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
container_name: hotrod
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
command: ["all"]
|
||||||
|
environment:
|
||||||
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
|
load-hotrod:
|
||||||
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
|
container_name: load-hotrod
|
||||||
|
hostname: load-hotrod
|
||||||
|
environment:
|
||||||
|
ATTACKED_HOST: http://hotrod:8080
|
||||||
|
LOCUST_MODE: standalone
|
||||||
|
NO_PROXY: standalone
|
||||||
|
TASK_DELAY_FROM: 5
|
||||||
|
TASK_DELAY_TO: 30
|
||||||
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
|
volumes:
|
||||||
|
- ../common/locust-scripts:/locust
|
@ -1,28 +1,135 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-clickhouse-depend: &clickhouse-depend
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
|
zookeeper-1:
|
||||||
|
image: bitnami/zookeeper:3.7.0
|
||||||
|
container_name: zookeeper-1
|
||||||
|
hostname: zookeeper-1
|
||||||
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
|
volumes:
|
||||||
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
|
environment:
|
||||||
|
- ZOO_SERVER_ID=1
|
||||||
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-2:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: zookeeper-2
|
||||||
|
# hostname: zookeeper-2
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2182:2181"
|
||||||
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# container_name: zookeeper-3
|
||||||
|
# hostname: zookeeper-3
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
clickhouse:
|
clickhouse:
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
<<: *clickhouse-defaults
|
||||||
# ports:
|
container_name: clickhouse
|
||||||
# - "9000:9000"
|
hostname: clickhouse
|
||||||
# - "8123:8123"
|
ports:
|
||||||
tty: true
|
- "9000:9000"
|
||||||
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
- ./data/clickhouse/:/var/lib/clickhouse/
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
restart: on-failure
|
|
||||||
logging:
|
# clickhouse-2:
|
||||||
options:
|
# <<: *clickhouse-defaults
|
||||||
max-size: 50m
|
# container_name: clickhouse-2
|
||||||
max-file: "3"
|
# hostname: clickhouse-2
|
||||||
healthcheck:
|
# ports:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# - "9001:9000"
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
# - "8124:8123"
|
||||||
interval: 30s
|
# - "9182:9181"
|
||||||
timeout: 5s
|
# volumes:
|
||||||
retries: 3
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.0-0.2
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
@ -64,9 +171,7 @@ services:
|
|||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
image: signoz/frontend:0.11.4
|
image: signoz/frontend:0.11.4
|
||||||
@ -83,7 +188,7 @@ services:
|
|||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.63.0
|
image: signoz/signoz-otel-collector:0.63.0
|
||||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
user: root # required for reading docker container logs
|
# required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
@ -102,9 +207,7 @@ services:
|
|||||||
# - "55678:55678" # OpenCensus receiver
|
# - "55678:55678" # OpenCensus receiver
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/signoz-otel-collector:0.63.0
|
image: signoz/signoz-otel-collector:0.63.0
|
||||||
@ -117,9 +220,7 @@ services:
|
|||||||
# - "13133:13133" # Health check extension
|
# - "13133:13133" # Health check extension
|
||||||
# - "55679:55679" # zPages extension
|
# - "55679:55679" # zPages extension
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
<<: *clickhouse-depend
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
|
@ -236,8 +236,8 @@
|
|||||||
<openSSL>
|
<openSSL>
|
||||||
<server> <!-- Used for https server AND secure tcp port -->
|
<server> <!-- Used for https server AND secure tcp port -->
|
||||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
<!-- <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> -->
|
||||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
<!-- <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> -->
|
||||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||||
To generate dhparams, use the following command:
|
To generate dhparams, use the following command:
|
||||||
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
|
||||||
@ -618,148 +618,6 @@
|
|||||||
</jdbc_bridge>
|
</jdbc_bridge>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
|
||||||
https://clickhouse.com/docs/en/operations/table_engines/distributed/
|
|
||||||
-->
|
|
||||||
<remote_servers>
|
|
||||||
<!-- Test only shard config for testing distributed storage -->
|
|
||||||
<test_shard_localhost>
|
|
||||||
<!-- Inter-server per-cluster secret for Distributed queries
|
|
||||||
default: no secret (no authentication will be performed)
|
|
||||||
|
|
||||||
If set, then Distributed queries will be validated on shards, so at least:
|
|
||||||
- such cluster should exist on the shard,
|
|
||||||
- such cluster should have the same secret.
|
|
||||||
|
|
||||||
And also (and which is more important), the initial_user will
|
|
||||||
be used as current user for the query.
|
|
||||||
|
|
||||||
Right now the protocol is pretty simple and it only takes into account:
|
|
||||||
- cluster name
|
|
||||||
- query
|
|
||||||
|
|
||||||
Also it will be nice if the following will be implemented:
|
|
||||||
- source hostname (see interserver_http_host), but then it will depends from DNS,
|
|
||||||
it can use IP address instead, but then the you need to get correct on the initiator node.
|
|
||||||
- target hostname / ip address (same notes as for source hostname)
|
|
||||||
- time-based security tokens
|
|
||||||
-->
|
|
||||||
<!-- <secret></secret> -->
|
|
||||||
|
|
||||||
<shard>
|
|
||||||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
|
|
||||||
<!-- <internal_replication>false</internal_replication> -->
|
|
||||||
<!-- Optional. Shard weight when writing data. Default: 1. -->
|
|
||||||
<!-- <weight>1</weight> -->
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
|
|
||||||
<!-- <priority>1</priority> -->
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost>
|
|
||||||
<test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<!--shard>
|
|
||||||
<internal_replication>false</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.3</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard-->
|
|
||||||
</test_cluster_one_shard_three_replicas_localhost>
|
|
||||||
<test_cluster_two_shards_localhost>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_localhost>
|
|
||||||
<test_cluster_two_shards>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards>
|
|
||||||
<test_cluster_two_shards_internal_replication>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.1</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<internal_replication>true</internal_replication>
|
|
||||||
<replica>
|
|
||||||
<host>127.0.0.2</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_cluster_two_shards_internal_replication>
|
|
||||||
<test_shard_localhost_secure>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9440</port>
|
|
||||||
<secure>1</secure>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_shard_localhost_secure>
|
|
||||||
<test_unavailable_shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>9000</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
<shard>
|
|
||||||
<replica>
|
|
||||||
<host>localhost</host>
|
|
||||||
<port>1</port>
|
|
||||||
</replica>
|
|
||||||
</shard>
|
|
||||||
</test_unavailable_shard>
|
|
||||||
</remote_servers>
|
|
||||||
|
|
||||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||||
If this section is not present in configuration, all hosts are allowed.
|
If this section is not present in configuration, all hosts are allowed.
|
||||||
-->
|
-->
|
||||||
@ -786,29 +644,6 @@
|
|||||||
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
|
||||||
|
|
||||||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
<zookeeper>
|
|
||||||
<node>
|
|
||||||
<host>example1</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example2</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
<node>
|
|
||||||
<host>example3</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
</zookeeper>
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Substitutions for parameters of replicated tables.
|
<!-- Substitutions for parameters of replicated tables.
|
||||||
Optional. If you don't use replicated tables, you could omit that.
|
Optional. If you don't use replicated tables, you could omit that.
|
||||||
|
|
||||||
|
@ -1,32 +1,138 @@
|
|||||||
version: "2.4"
|
version: "2.4"
|
||||||
|
|
||||||
|
x-clickhouse-defaults: &clickhouse-defaults
|
||||||
|
restart: on-failure
|
||||||
|
image: clickhouse/clickhouse-server:22.8.8-alpine
|
||||||
|
tty: true
|
||||||
|
depends_on:
|
||||||
|
- zookeeper-1
|
||||||
|
# - zookeeper-2
|
||||||
|
# - zookeeper-3
|
||||||
|
logging:
|
||||||
|
options:
|
||||||
|
max-size: 50m
|
||||||
|
max-file: "3"
|
||||||
|
healthcheck:
|
||||||
|
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||||
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
|
||||||
|
x-clickhouse-depends: &clickhouse-depends
|
||||||
|
depends_on:
|
||||||
|
clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
# clickhouse-2:
|
||||||
|
# condition: service_healthy
|
||||||
|
# clickhouse-3:
|
||||||
|
# condition: service_healthy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
clickhouse:
|
zookeeper-1:
|
||||||
image: clickhouse/clickhouse-server:22.8.8-alpine
|
image: bitnami/zookeeper:3.7.0
|
||||||
tty: true
|
user: root
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
- "2888:2888"
|
||||||
|
- "3888:3888"
|
||||||
volumes:
|
volumes:
|
||||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
- ./data/zookeeper-1:/bitnami/zookeeper
|
||||||
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
environment:
|
||||||
- ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
- ZOO_SERVER_ID=1
|
||||||
restart: on-failure
|
- ZOO_SERVERS=0.0.0.0:2888:3888
|
||||||
logging:
|
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
|
||||||
options:
|
- ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
max-size: 50m
|
- ZOO_AUTOPURGE_INTERVAL=1
|
||||||
max-file: "3"
|
|
||||||
healthcheck:
|
# zookeeper-2:
|
||||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
# image: bitnami/zookeeper:3.7.0
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
# user: root
|
||||||
interval: 30s
|
# ports:
|
||||||
timeout: 5s
|
# - "2182:2181"
|
||||||
retries: 3
|
# - "2889:2888"
|
||||||
|
# - "3889:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-2:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=2
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
# zookeeper-3:
|
||||||
|
# image: bitnami/zookeeper:3.7.0
|
||||||
|
# user: root
|
||||||
|
# ports:
|
||||||
|
# - "2183:2181"
|
||||||
|
# - "2890:2888"
|
||||||
|
# - "3890:3888"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/zookeeper-3:/bitnami/zookeeper
|
||||||
|
# environment:
|
||||||
|
# - ZOO_SERVER_ID=3
|
||||||
|
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
|
||||||
|
# - ALLOW_ANONYMOUS_LOGIN=yes
|
||||||
|
# - ZOO_AUTOPURGE_INTERVAL=1
|
||||||
|
|
||||||
|
clickhouse:
|
||||||
|
<<: *clickhouse-defaults
|
||||||
|
container_name: clickhouse
|
||||||
|
hostname: clickhouse
|
||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000:9000"
|
||||||
- "8123:8123"
|
- "8123:8123"
|
||||||
|
- "9181:9181"
|
||||||
|
volumes:
|
||||||
|
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
- ./data/clickhouse/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
|
||||||
|
# clickhouse-2:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: clickhouse-2
|
||||||
|
# hostname: clickhouse-2
|
||||||
|
# ports:
|
||||||
|
# - "9001:9000"
|
||||||
|
# - "8124:8123"
|
||||||
|
# - "9182:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-2/:/var/lib/clickhouse/
|
||||||
|
|
||||||
|
# clickhouse-3:
|
||||||
|
# <<: *clickhouse-defaults
|
||||||
|
# container_name: clickhouse-3
|
||||||
|
# hostname: clickhouse-3
|
||||||
|
# ports:
|
||||||
|
# - "9002:9000"
|
||||||
|
# - "8125:8123"
|
||||||
|
# - "9183:9181"
|
||||||
|
# volumes:
|
||||||
|
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||||
|
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
|
||||||
|
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
|
||||||
|
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
|
# - ./data/clickhouse-3/:/var/lib/clickhouse/
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
image: signoz/alertmanager:0.23.0-0.2
|
image: signoz/alertmanager:0.23.0-0.2
|
||||||
|
volumes:
|
||||||
|
- ./data/alertmanager:/data
|
||||||
depends_on:
|
depends_on:
|
||||||
- query-service
|
query-service:
|
||||||
|
condition: service_healthy
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
command:
|
command:
|
||||||
- --queryService.url=http://query-service:8085
|
- --queryService.url=http://query-service:8085
|
||||||
@ -38,12 +144,13 @@ services:
|
|||||||
image: signoz/query-service:latest
|
image: signoz/query-service:latest
|
||||||
container_name: query-service
|
container_name: query-service
|
||||||
command: ["-config=/root/config/prometheus.yml"]
|
command: ["-config=/root/config/prometheus.yml"]
|
||||||
ports:
|
# ports:
|
||||||
- "8180:8080"
|
# - "6060:6060" # pprof port
|
||||||
|
# - "8080:8080" # query-service port
|
||||||
volumes:
|
volumes:
|
||||||
- ./prometheus.yml:/root/config/prometheus.yml
|
- ./prometheus.yml:/root/config/prometheus.yml
|
||||||
- ../dashboards:/root/config/dashboards
|
- ../dashboards:/root/config/dashboards
|
||||||
- ./data:/var/lib/signoz
|
- ./data/signoz/:/var/lib/signoz/
|
||||||
environment:
|
environment:
|
||||||
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
|
||||||
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
|
||||||
@ -52,49 +159,62 @@ services:
|
|||||||
- STORAGE=clickhouse
|
- STORAGE=clickhouse
|
||||||
- GODEBUG=netdns=go
|
- GODEBUG=netdns=go
|
||||||
- TELEMETRY_ENABLED=true
|
- TELEMETRY_ENABLED=true
|
||||||
|
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||||
|
restart: on-failure
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 3
|
retries: 3
|
||||||
depends_on:
|
<<: *clickhouse-depends
|
||||||
clickhouse:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
otel-collector:
|
otel-collector:
|
||||||
image: signoz/signoz-otel-collector:0.63.0
|
image: signoz/signoz-otel-collector:0.63.0
|
||||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||||
user: root # required for reading docker container logs
|
# required for reading docker container logs
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||||
environment:
|
environment:
|
||||||
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
|
||||||
ports:
|
ports:
|
||||||
- "4317:4317" # OTLP GRPC receiver
|
# - "1777:1777" # pprof extension
|
||||||
restart: always
|
- "4317:4317" # OTLP gRPC receiver
|
||||||
depends_on:
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
clickhouse:
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
condition: service_healthy
|
# - "8889:8889" # signoz spanmetrics exposed by the agent
|
||||||
|
# - "9411:9411" # Zipkin port
|
||||||
|
# - "13133:13133" # health check extension
|
||||||
|
# - "14250:14250" # Jaeger gRPC
|
||||||
|
# - "14268:14268" # Jaeger thrift HTTP
|
||||||
|
# - "55678:55678" # OpenCensus receiver
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
<<: *clickhouse-depends
|
||||||
|
|
||||||
otel-collector-metrics:
|
otel-collector-metrics:
|
||||||
image: signoz/signoz-otel-collector:0.63.0
|
image: signoz/signoz-otel-collector:0.63.0
|
||||||
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
|
||||||
volumes:
|
volumes:
|
||||||
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
|
||||||
depends_on:
|
# ports:
|
||||||
clickhouse:
|
# - "1777:1777" # pprof extension
|
||||||
condition: service_healthy
|
# - "8888:8888" # OtelCollector internal metrics
|
||||||
|
# - "13133:13133" # Health check extension
|
||||||
|
# - "55679:55679" # zPages extension
|
||||||
|
restart: on-failure
|
||||||
|
<<: *clickhouse-depends
|
||||||
|
|
||||||
hotrod:
|
hotrod:
|
||||||
image: jaegertracing/example-hotrod:1.30
|
image: jaegertracing/example-hotrod:1.30
|
||||||
container_name: hotrod
|
container_name: hotrod
|
||||||
logging:
|
logging:
|
||||||
options:
|
options:
|
||||||
max-size: 50m
|
max-size: 50m
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
command: ["all"]
|
command: ["all"]
|
||||||
environment:
|
environment:
|
||||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||||
|
|
||||||
load-hotrod:
|
load-hotrod:
|
||||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||||
@ -109,4 +229,4 @@ services:
|
|||||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||||
volumes:
|
volumes:
|
||||||
- ../../../../deploy/docker/common/locust-scripts:/locust
|
- ../common/locust-scripts:/locust
|
||||||
|
Loading…
x
Reference in New Issue
Block a user