diff --git a/.dockerignore b/.dockerignore
index 028b1e410b..6e985a3fd8 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -3,4 +3,7 @@
.vscode
README.md
deploy
-sample-apps
\ No newline at end of file
+sample-apps
+
+# frontend
+node_modules
\ No newline at end of file
diff --git a/.github/workflows/releaser.yaml b/.github/workflows/releaser.yaml
index 304cc511dc..4d267802da 100644
--- a/.github/workflows/releaser.yaml
+++ b/.github/workflows/releaser.yaml
@@ -7,6 +7,7 @@ on:
jobs:
detect:
+ if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
runs-on: ubuntu-latest
outputs:
release_type: ${{ steps.find.outputs.release_type }}
@@ -22,6 +23,7 @@ jobs:
fi
echo "release_type=${release_type}" >> "$GITHUB_OUTPUT"
charts:
+ if: ${{ !startsWith(github.event.release.tag_name, 'histogram-quantile/') }}
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
secrets: inherit
needs: [detect]
diff --git a/.gitignore b/.gitignore
index feb3e8f535..33ad6ab250 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,7 +52,7 @@ ee/query-service/tests/test-deploy/data/
/deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/
bin/
-
+.local/
*/query-service/queries.active
# e2e
@@ -70,5 +70,9 @@ vendor/
# git-town
.git-branches.toml
+
# goreleaser
dist/
+
+# ignore user_scripts that is fetched by init-clickhouse
+deploy/common/clickhouse/user_scripts/
diff --git a/.gitpod.yml b/.gitpod.yml
index 1771de8779..660e186b9a 100644
--- a/.gitpod.yml
+++ b/.gitpod.yml
@@ -3,16 +3,10 @@
tasks:
- - name: Run Script to Comment ut required lines
- init: |
- cd ./.scripts
- sh commentLinesForSetup.sh
-
- name: Run Docker Images
init: |
- cd ./deploy
- sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
- # command:
+ cd ./deploy/docker
+ sudo docker compose up -d
- name: Run Frontend
init: |
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index bed34bfd86..cbb5c69c37 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -141,9 +141,9 @@ Depending upon your area of expertise & interest, you can choose one or more to
# 3. Develop Frontend 🌚
-**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/frontend](https://github.com/SigNoz/signoz/tree/develop/frontend)**
+**Need to Update: [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)**
-Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/develop/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
+Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/main/frontend/README.md) sections for more info on how to setup SigNoz frontend locally (with and without Docker).
## 3.1 Contribute to Frontend with Docker installation of SigNoz
@@ -151,14 +151,14 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
```
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
-- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
+- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)

- run `cd deploy` to move to deploy directory,
- Install signoz locally **without** the frontend,
- - Add / Uncomment the below configuration to query-service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L47)
+ - Add / Uncomment the below configuration to query-service section at [`deploy/docker/docker-compose.yaml#L47`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L47)
```
ports:
- "8080:8080"
@@ -167,9 +167,10 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
- Next run,
```
- sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
+ cd deploy/docker
+ sudo docker compose up -d
```
-- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
+- `cd ../frontend` and change baseURL in file [`frontend/src/constants/env.ts#L2`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts#L2) and for that, you need to create a `.env` file in the `frontend` directory with the following environment variable (`FRONTEND_API_ENDPOINT`) matching your configuration.
If you have backend api exposed via frontend nginx:
```
@@ -186,11 +187,6 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/
yarn dev
```
-### Important Notes:
-The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
-
- **[`^top^`](#contributing-guidelines)**
-
## 3.2 Contribute to Frontend without installing SigNoz backend
If you don't want to install the SigNoz backend just for doing frontend development, we can provide you with test environments that you can use as the backend.
@@ -216,7 +212,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
# 4. Contribute to Backend (Query-Service) 🌑
-**Need to Update: [https://github.com/SigNoz/signoz/tree/develop/pkg/query-service](https://github.com/SigNoz/signoz/tree/develop/pkg/query-service)**
+**Need to Update: [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)**
## 4.1 Prerequisites
@@ -242,13 +238,13 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
git clone https://github.com/SigNoz/signoz.git && cd signoz
```
- run `sudo make dev-setup` to configure local setup to run query-service,
-- Comment out `frontend` service section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L68)
+- Comment out `frontend` service section at [`deploy/docker/docker-compose.yaml#L68`](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L68)
-- Comment out `query-service` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml#L41)
+- Comment out `query-service` section at [`deploy/docker/docker-compose.yaml#L41`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml#L41)
-- add below configuration to `clickhouse` section at [`deploy/docker/clickhouse-setup/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/develop/deploy/docker/clickhouse-setup/docker-compose.yaml)
+- add below configuration to `clickhouse` section at [`deploy/docker/docker-compose.yaml`,](https://github.com/SigNoz/signoz/blob/main/deploy/docker/docker-compose.yaml)
```
ports:
- 9001:9000
@@ -258,9 +254,9 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi
- run `cd pkg/query-service/` to move to `query-service` directory,
- Then, you need to create a `.env` file with the following environment variable
```
- SIGNOZ_LOCAL_DB_PATH="./signoz.db"
+ SIGNOZ_SQLSTORE_SQLITE_PATH="./signoz.db"
```
-to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/develop/pkg/query-service/constants/constants.go#L38)
+to set your local environment with the right `RELATIONAL_DATASOURCE_PATH` as mentioned in [`./constants/constants.go#L38`,](https://github.com/SigNoz/signoz/blob/main/pkg/query-service/constants/constants.go#L38)
- Now, install SigNoz locally **without** the `frontend` and `query-service`,
- If you are using `x86_64` processors (All Intel/AMD processors) run `sudo make run-x86`
@@ -294,13 +290,10 @@ docker pull signoz/query-service:develop
```
### Important Note:
-The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh)
-
-
**Query Service should now be available at** [`http://localhost:8080`](http://localhost:8080)
-If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/develop/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
+If you want to see how the frontend plays with query service, you can run the frontend also in your local env with the baseURL changed to `http://localhost:8080` in file [`frontend/src/constants/env.ts`](https://github.com/SigNoz/signoz/blob/main/frontend/src/constants/env.ts) as the `query-service` is now running at port `8080`.
Local Setup completed"
@echo "------------------"
-run-local:
- @docker-compose -f \
- $(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
- up --build -d
-
-down-local:
- @docker-compose -f \
- $(STANDALONE_DIRECTORY)/docker-compose-core.yaml -f $(STANDALONE_DIRECTORY)/docker-compose-local.yaml \
- down -v
-
pull-signoz:
@docker-compose -f $(STANDALONE_DIRECTORY)/docker-compose.yaml pull
diff --git a/README.de-de.md b/README.de-de.md
index 1fdbcbda15..73a853141b 100644
--- a/README.de-de.md
+++ b/README.de-de.md
@@ -13,9 +13,9 @@
diff --git a/README.md b/README.md
index 7622e0c655..b7f656f56a 100644
--- a/README.md
+++ b/README.md
@@ -17,9 +17,9 @@
diff --git a/README.zh-cn.md b/README.zh-cn.md
index 445474f6ba..d8228e49af 100644
--- a/README.zh-cn.md
+++ b/README.zh-cn.md
@@ -12,9 +12,9 @@
diff --git a/deploy/README.md b/deploy/README.md
index 55c3b6e8d4..597dc855f8 100644
--- a/deploy/README.md
+++ b/deploy/README.md
@@ -18,65 +18,64 @@ Now run the following command to install:
### Using Docker Compose
-If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
+If you don't have docker compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
to set up docker compose before proceeding with the next steps.
-For x86 chip (amd):
-
```sh
-docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
+cd deploy/docker
+docker compose up -d
```
-Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
-the data generated from hotrod in SigNoz UI.
+Open http://localhost:3301 in your favourite browser.
-## Kubernetes
-
-### Using Helm
-
-#### Bring up SigNoz cluster
+To start collecting logs and metrics from your infrastructure, run the following command:
```sh
-helm repo add signoz https://charts.signoz.io
-
-kubectl create ns platform
-
-helm -n platform install my-release signoz/signoz
+cd generator/infra
+docker compose up -d
```
-To access the UI, you can `port-forward` the frontend service:
+To start generating sample traces, run the following command:
```sh
-kubectl -n platform port-forward svc/my-release-frontend 3301:3301
+cd generator/hotrod
+docker compose up -d
```
-Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
-from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
+In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
-#### Test HotROD application with SigNoz
+For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker/).
+
+## Docker Swarm
+
+To install SigNoz using Docker Swarm, run the following command:
```sh
-kubectl create ns sample-application
-
-kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
+cd deploy/docker-swarm
+docker stack deploy -c docker-compose.yaml signoz
```
-To generate load:
+Open http://localhost:3301 in your favourite browser.
+
+To start collecting logs and metrics from your infrastructure, run the following command:
```sh
-kubectl -n sample-application run strzal --image=djbingham/curl \
---restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
-'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm
+cd generator/infra
+docker stack deploy -c docker-compose.yaml infra
```
-To stop load:
+To start generating sample traces, run the following command:
```sh
-kubectl -n sample-application run strzal --image=djbingham/curl \
- --restart='OnFailure' -i --tty --rm --command -- curl \
- http://locust-master:8089/stop
+cd generator/hotrod
+docker stack deploy -c docker-compose.yaml hotrod
```
+In a couple of minutes, you should see the data generated from hotrod in SigNoz UI.
+
+For more details, please refer to the [SigNoz documentation](https://signoz.io/docs/install/docker-swarm/).
+
## Uninstall/Troubleshoot?
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.
+
diff --git a/deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml b/deploy/common/clickhouse/cluster.ha.xml
similarity index 96%
rename from deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
rename to deploy/common/clickhouse/cluster.ha.xml
index 0e3ddcdde0..5c208815de 100644
--- a/deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
+++ b/deploy/common/clickhouse/cluster.ha.xml
@@ -10,14 +10,14 @@
zookeeper-1
2181
-
+
-
+
-
\ No newline at end of file
+
diff --git a/deploy/docker/clickhouse-setup/clickhouse-cluster.xml b/deploy/common/clickhouse/cluster.xml
similarity index 99%
rename from deploy/docker/clickhouse-setup/clickhouse-cluster.xml
rename to deploy/common/clickhouse/cluster.xml
index 0e3ddcdde0..8b475ffe88 100644
--- a/deploy/docker/clickhouse-setup/clickhouse-cluster.xml
+++ b/deploy/common/clickhouse/cluster.xml
@@ -72,4 +72,4 @@
-->
-
\ No newline at end of file
+
diff --git a/deploy/docker/clickhouse-setup/clickhouse-config.xml b/deploy/common/clickhouse/config.xml
similarity index 99%
rename from deploy/docker/clickhouse-setup/clickhouse-config.xml
rename to deploy/common/clickhouse/config.xml
index de47f5a25c..1965ac3b20 100644
--- a/deploy/docker/clickhouse-setup/clickhouse-config.xml
+++ b/deploy/common/clickhouse/config.xml
@@ -370,7 +370,7 @@
/var/lib/clickhouse/tmp/
-
+
`
@@ -652,12 +652,12 @@
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
-->
-
+
01
example01-01-1
-
+
@@ -716,7 +716,7 @@
asynchronous_metrics - send data from table system.asynchronous_metrics
status_info - send data from different component from CH, ex: Dictionaries status
-->
-
diff --git a/deploy/docker/clickhouse-setup/custom-function.xml b/deploy/common/clickhouse/custom-function.xml
similarity index 100%
rename from deploy/docker/clickhouse-setup/custom-function.xml
rename to deploy/common/clickhouse/custom-function.xml
diff --git a/deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml b/deploy/common/clickhouse/storage.xml
similarity index 100%
rename from deploy/docker-swarm/clickhouse-setup/clickhouse-storage.xml
rename to deploy/common/clickhouse/storage.xml
diff --git a/deploy/docker-swarm/dashboards/.gitkeep b/deploy/common/clickhouse/user_scripts/.gitkeep
similarity index 100%
rename from deploy/docker-swarm/dashboards/.gitkeep
rename to deploy/common/clickhouse/user_scripts/.gitkeep
diff --git a/deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml b/deploy/common/clickhouse/users.xml
similarity index 100%
rename from deploy/docker-swarm/clickhouse-setup/clickhouse-users.xml
rename to deploy/common/clickhouse/users.xml
diff --git a/deploy/common/dashboards/.gitkeep b/deploy/common/dashboards/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/deploy/docker-swarm/common/locust-scripts/locustfile.py b/deploy/common/locust-scripts/locustfile.py
similarity index 100%
rename from deploy/docker-swarm/common/locust-scripts/locustfile.py
rename to deploy/common/locust-scripts/locustfile.py
diff --git a/deploy/docker/common/nginx-config.conf b/deploy/common/signoz/nginx-config.conf
similarity index 98%
rename from deploy/docker/common/nginx-config.conf
rename to deploy/common/signoz/nginx-config.conf
index c87960d7b2..6826589d66 100644
--- a/deploy/docker/common/nginx-config.conf
+++ b/deploy/common/signoz/nginx-config.conf
@@ -44,7 +44,7 @@ server {
location /api {
proxy_pass http://query-service:8080/api;
# connection will be closed if no data is read for 600s between successive read operations
- proxy_read_timeout 600s;
+ proxy_read_timeout 600s;
}
location /ws {
diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-opamp-config.yaml b/deploy/common/signoz/otel-collector-opamp-config.yaml
similarity index 100%
rename from deploy/docker-swarm/clickhouse-setup/otel-collector-opamp-config.yaml
rename to deploy/common/signoz/otel-collector-opamp-config.yaml
diff --git a/deploy/docker-swarm/clickhouse-setup/prometheus.yml b/deploy/common/signoz/prometheus.yml
similarity index 95%
rename from deploy/docker-swarm/clickhouse-setup/prometheus.yml
rename to deploy/common/signoz/prometheus.yml
index d7c52893c5..683e5e198b 100644
--- a/deploy/docker-swarm/clickhouse-setup/prometheus.yml
+++ b/deploy/common/signoz/prometheus.yml
@@ -12,10 +12,10 @@ alerting:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
+rule_files: []
# - "first_rules.yml"
# - "second_rules.yml"
- - 'alerts.yml'
+ # - 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
diff --git a/deploy/docker-swarm/clickhouse-setup/.gitkeep b/deploy/docker-swarm/clickhouse-setup/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/deploy/docker-swarm/clickhouse-setup/alertmanager.yml b/deploy/docker-swarm/clickhouse-setup/alertmanager.yml
deleted file mode 100644
index d69357f9dd..0000000000
--- a/deploy/docker-swarm/clickhouse-setup/alertmanager.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-global:
- resolve_timeout: 1m
- slack_api_url: 'https://hooks.slack.com/services/xxx'
-
-route:
- receiver: 'slack-notifications'
-
-receivers:
-- name: 'slack-notifications'
- slack_configs:
- - channel: '#alerts'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
-
- *Description:* {{ .Annotations.description }}
-
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
\ No newline at end of file
diff --git a/deploy/docker-swarm/clickhouse-setup/alerts.yml b/deploy/docker-swarm/clickhouse-setup/alerts.yml
deleted file mode 100644
index 810a20750c..0000000000
--- a/deploy/docker-swarm/clickhouse-setup/alerts.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-groups:
-- name: ExampleCPULoadGroup
- rules:
- - alert: HighCpuLoad
- expr: system_cpu_load_average_1m > 0.1
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: High CPU load
- description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
diff --git a/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml b/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
deleted file mode 100644
index f285997166..0000000000
--- a/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml
+++ /dev/null
@@ -1,1142 +0,0 @@
-
-
-
-
-
- information
-
- json
-
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
-
- 1000M
- 10
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 8123
-
-
- 9000
-
-
- 9004
-
-
- 9005
-
-
-
-
-
-
-
-
-
-
-
- 9009
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4096
-
-
- 3
-
-
-
-
- false
-
-
- /path/to/ssl_cert_file
- /path/to/ssl_key_file
-
-
- false
-
-
- /path/to/ssl_ca_cert_file
-
-
- none
-
-
- 0
-
-
- -1
- -1
-
-
- false
-
-
-
-
-
-
-
-
-
-
- none
- true
- true
- sslv2,sslv3
- true
-
-
-
- true
- true
- sslv2,sslv3
- true
-
-
-
- RejectCertificateHandler
-
-
-
-
-
-
-
-
- 100
-
-
- 0
-
-
-
- 10000
-
-
-
-
-
- 0.9
-
-
- 4194304
-
-
- 0
-
-
-
-
-
- 8589934592
-
-
- 5368709120
-
-
-
- 1000
-
-
- 134217728
-
-
- 10000
-
-
- /var/lib/clickhouse/
-
-
- /var/lib/clickhouse/tmp/
-
-
-
- `
-
-
-
-
-
- /var/lib/clickhouse/user_files/
-
-
-
-
-
-
-
-
-
-
-
-
- users.xml
-
-
-
- /var/lib/clickhouse/access/
-
-
-
-
-
-
- default
-
-
-
-
-
-
-
-
-
-
-
- default
-
-
-
-
-
-
-
-
- true
-
-
- false
-
- ' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- clickhouse-jdbc-bridge &
-
- * [CentOS/RHEL]
- export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
- export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- clickhouse-jdbc-bridge &
-
- Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 01
- example01-01-1
-
-
-
-
-
- 3600
-
-
-
- 3600
-
-
- 60
-
-
-
-
-
-
-
-
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
-
-
-
-
-
- 7500
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
-
-
- system
-
- 7500
- 1000
-
-
-
-
- system
-
-
- 7000
-
-
-
-
-
-
- engine MergeTree
- partition by toYYYYMM(finish_date)
- order by (finish_date, finish_time_us, trace_id)
-
- system
-
- 7500
-
-
-
-
-
- system
-
-
-
- 1000
-
-
-
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
-
-
-
-
-
- *_dictionary.xml
-
-
- *_function.xml
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /clickhouse/task_queue/ddl
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- click_cost
- any
-
- 0
- 3600
-
-
- 86400
- 60
-
-
-
- max
-
- 0
- 60
-
-
- 3600
- 300
-
-
- 86400
- 3600
-
-
-
-
-
- /var/lib/clickhouse/format_schemas/
-
-
-
-
- hide encrypt/decrypt arguments
- ((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)
-
- \1(???)
-
-
-
-
-
-
-
-
-
- false
-
- false
-
-
- https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277
-
-
-
-
-
-
-
-
-
-
- 268435456
- true
-
-
diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
deleted file mode 100644
index 65456f0857..0000000000
--- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
+++ /dev/null
@@ -1,251 +0,0 @@
-version: "3.9"
-x-clickhouse-defaults: &clickhouse-defaults
- image: clickhouse/clickhouse-server:24.1.2-alpine
- tty: true
- deploy:
- restart_policy:
- condition: on-failure
- depends_on:
- - zookeeper-1
- # - zookeeper-2
- # - zookeeper-3
- logging:
- options:
- max-size: 50m
- max-file: "3"
- healthcheck:
- # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
- test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
- interval: 30s
- timeout: 5s
- retries: 3
- ulimits:
- nproc: 65535
- nofile:
- soft: 262144
- hard: 262144
-x-db-depend: &db-depend
- depends_on:
- - clickhouse
- - otel-collector-migrator
- # - clickhouse-2
- # - clickhouse-3
-services:
- zookeeper-1:
- image: bitnami/zookeeper:3.7.1
- hostname: zookeeper-1
- user: root
- ports:
- - "2181:2181"
- - "2888:2888"
- - "3888:3888"
- volumes:
- - ./data/zookeeper-1:/bitnami/zookeeper
- environment:
- - ZOO_SERVER_ID=1
- # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- - ALLOW_ANONYMOUS_LOGIN=yes
- - ZOO_AUTOPURGE_INTERVAL=1
- # zookeeper-2:
- # image: bitnami/zookeeper:3.7.0
- # hostname: zookeeper-2
- # user: root
- # ports:
- # - "2182:2181"
- # - "2889:2888"
- # - "3889:3888"
- # volumes:
- # - ./data/zookeeper-2:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=2
- # - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
-
- # zookeeper-3:
- # image: bitnami/zookeeper:3.7.0
- # hostname: zookeeper-3
- # user: root
- # ports:
- # - "2183:2181"
- # - "2890:2888"
- # - "3890:3888"
- # volumes:
- # - ./data/zookeeper-3:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=3
- # - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
- clickhouse:
- !!merge <<: *clickhouse-defaults
- hostname: clickhouse
- # ports:
- # - "9000:9000"
- # - "8123:8123"
- # - "9181:9181"
- volumes:
- - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- - ./data/clickhouse/:/var/lib/clickhouse/
- # clickhouse-2:
- # <<: *clickhouse-defaults
- # hostname: clickhouse-2
- # ports:
- # - "9001:9000"
- # - "8124:8123"
- # - "9182:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-2/:/var/lib/clickhouse/
-
- # clickhouse-3:
- # <<: *clickhouse-defaults
- # hostname: clickhouse-3
- # ports:
- # - "9002:9000"
- # - "8125:8123"
- # - "9183:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-3/:/var/lib/clickhouse/
- alertmanager:
- image: signoz/alertmanager:0.23.7
- volumes:
- - ./data/alertmanager:/data
- command:
- - --queryService.url=http://query-service:8085
- - --storage.path=/data
- depends_on:
- - query-service
- deploy:
- restart_policy:
- condition: on-failure
- query-service:
- image: signoz/query-service:0.69.0
- command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
- # ports:
- # - "6060:6060" # pprof port
- # - "8080:8080" # query-service port
- volumes:
- - ./prometheus.yml:/root/config/prometheus.yml
- - ../dashboards:/root/config/dashboards
- - ./data/signoz/:/var/lib/signoz/
- environment:
- - ClickHouseUrl=tcp://clickhouse:9000
- - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- - DASHBOARDS_PATH=/root/config/dashboards
- - STORAGE=clickhouse
- - GODEBUG=netdns=go
- - TELEMETRY_ENABLED=true
- - DEPLOYMENT_TYPE=docker-swarm
- healthcheck:
- test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
- interval: 30s
- timeout: 5s
- retries: 3
- deploy:
- restart_policy:
- condition: on-failure
- !!merge <<: *db-depend
- frontend:
- image: signoz/frontend:0.69.0
- deploy:
- restart_policy:
- condition: on-failure
- depends_on:
- - alertmanager
- - query-service
- ports:
- - "3301:3301"
- volumes:
- - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
- otel-collector:
- image: signoz/signoz-otel-collector:0.111.24
- command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- user: root # required for reading docker container logs
- volumes:
- - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- - /var/lib/docker/containers:/var/lib/docker/containers:ro
- - /:/hostfs:ro
- environment:
- - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}},dockerswarm.service.name={{.Service.Name}},dockerswarm.task.name={{.Task.Name}}
- - LOW_CARDINAL_EXCEPTION_GROUPING=false
- ports:
- # - "1777:1777" # pprof extension
- - "4317:4317" # OTLP gRPC receiver
- - "4318:4318" # OTLP HTTP receiver
- # - "8888:8888" # OtelCollector internal metrics
- # - "8889:8889" # signoz spanmetrics exposed by the agent
- # - "9411:9411" # Zipkin port
- # - "13133:13133" # Health check extension
- # - "14250:14250" # Jaeger gRPC
- # - "14268:14268" # Jaeger thrift HTTP
- # - "55678:55678" # OpenCensus receiver
- # - "55679:55679" # zPages extension
- deploy:
- mode: global
- restart_policy:
- condition: on-failure
- depends_on:
- - clickhouse
- - otel-collector-migrator
- - query-service
- otel-collector-migrator:
- image: signoz/signoz-schema-migrator:0.111.24
- deploy:
- restart_policy:
- condition: on-failure
- delay: 5s
- command:
- - "sync"
- - "--dsn=tcp://clickhouse:9000"
- - "--up="
- depends_on:
- - clickhouse
- # - clickhouse-2
- # - clickhouse-3
- logspout:
- image: "gliderlabs/logspout:v3.2.14"
- volumes:
- - /etc/hostname:/etc/host_hostname:ro
- - /var/run/docker.sock:/var/run/docker.sock
- command: syslog+tcp://otel-collector:2255
- depends_on:
- - otel-collector
- deploy:
- mode: global
- restart_policy:
- condition: on-failure
- hotrod:
- image: jaegertracing/example-hotrod:1.30
- command: ["all"]
- environment:
- - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
- logging:
- options:
- max-size: 50m
- max-file: "3"
- load-hotrod:
- image: "signoz/locust:1.2.3"
- hostname: load-hotrod
- environment:
- ATTACKED_HOST: http://hotrod:8080
- LOCUST_MODE: standalone
- NO_PROXY: standalone
- TASK_DELAY_FROM: 5
- TASK_DELAY_TO: 30
- QUIET_MODE: "${QUIET_MODE:-false}"
- LOCUST_OPTS: "--headless -u 10 -r 1"
- volumes:
- - ../common/locust-scripts:/locust
diff --git a/deploy/docker-swarm/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql b/deploy/docker-swarm/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql
deleted file mode 100644
index f71983c083..0000000000
--- a/deploy/docker-swarm/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql
+++ /dev/null
@@ -1,31 +0,0 @@
-CREATE TABLE IF NOT EXISTS signoz_index (
- timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
- traceID String CODEC(ZSTD(1)),
- spanID String CODEC(ZSTD(1)),
- parentSpanID String CODEC(ZSTD(1)),
- serviceName LowCardinality(String) CODEC(ZSTD(1)),
- name LowCardinality(String) CODEC(ZSTD(1)),
- kind Int32 CODEC(ZSTD(1)),
- durationNano UInt64 CODEC(ZSTD(1)),
- tags Array(String) CODEC(ZSTD(1)),
- tagsKeys Array(String) CODEC(ZSTD(1)),
- tagsValues Array(String) CODEC(ZSTD(1)),
- statusCode Int64 CODEC(ZSTD(1)),
- references String CODEC(ZSTD(1)),
- externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
- externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
- component Nullable(String) CODEC(ZSTD(1)),
- dbSystem Nullable(String) CODEC(ZSTD(1)),
- dbName Nullable(String) CODEC(ZSTD(1)),
- dbOperation Nullable(String) CODEC(ZSTD(1)),
- peerService Nullable(String) CODEC(ZSTD(1)),
- INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
- INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
- INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
- INDEX idx_kind kind TYPE minmax GRANULARITY 4,
- INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
- INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
- INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
-) ENGINE MergeTree()
-PARTITION BY toDate(timestamp)
-ORDER BY (serviceName, -toUnixTimestamp(timestamp))
\ No newline at end of file
diff --git a/deploy/docker-swarm/common/nginx-config.conf b/deploy/docker-swarm/common/nginx-config.conf
deleted file mode 100644
index f7943e21aa..0000000000
--- a/deploy/docker-swarm/common/nginx-config.conf
+++ /dev/null
@@ -1,51 +0,0 @@
-server {
- listen 3301;
- server_name _;
-
- gzip on;
- gzip_static on;
- gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
- gzip_proxied any;
- gzip_vary on;
- gzip_comp_level 6;
- gzip_buffers 16 8k;
- gzip_http_version 1.1;
-
- # to handle uri issue 414 from nginx
- client_max_body_size 24M;
- large_client_header_buffers 8 128k;
-
- location / {
- if ( $uri = '/index.html' ) {
- add_header Cache-Control no-store always;
- }
- root /usr/share/nginx/html;
- index index.html index.htm;
- try_files $uri $uri/ /index.html;
- }
-
- location ~ ^/api/(v1|v3)/logs/(tail|livetail){
- proxy_pass http://query-service:8080;
- proxy_http_version 1.1;
-
- # connection will be closed if no data is read for 600s between successive read operations
- proxy_read_timeout 600s;
-
- # dont buffer the data send it directly to client.
- proxy_buffering off;
- proxy_cache off;
- }
-
- location /api {
- proxy_pass http://query-service:8080/api;
- # connection will be closed if no data is read for 600s between successive read operations
- proxy_read_timeout 600s;
- }
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-}
\ No newline at end of file
diff --git a/deploy/docker-swarm/docker-compose.ha.yaml b/deploy/docker-swarm/docker-compose.ha.yaml
new file mode 100644
index 0000000000..595c979733
--- /dev/null
+++ b/deploy/docker-swarm/docker-compose.ha.yaml
@@ -0,0 +1,265 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ deploy:
+ restart_policy:
+ condition: on-failure
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+x-clickhouse-defaults: &clickhouse-defaults
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ tty: true
+ deploy:
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9363"
+ signoz.io/path: "/metrics"
+ depends_on:
+ - zookeeper-1
+ - zookeeper-2
+ - zookeeper-3
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - 0.0.0.0:8123/ping
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ ulimits:
+ nproc: 65535
+ nofile:
+ soft: 262144
+ hard: 262144
+x-zookeeper-defaults: &zookeeper-defaults
+ <<: *common
+ image: bitnami/zookeeper:3.7.1
+ user: root
+ deploy:
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9141"
+ signoz.io/path: "/metrics"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
+ interval: 30s
+ timeout: 5s
+ retries: 3
+x-db-depend: &db-depend
+ <<: *common
+ depends_on:
+ - clickhouse
+ - clickhouse-2
+ - clickhouse-3
+ - schema-migrator
+services:
+ init-clickhouse:
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ command:
+ - bash
+ - -c
+ - |
+ version="v0.0.1"
+ node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
+ node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
+ echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
+ cd /tmp
+ wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
+ tar -xvzf histogram-quantile.tar.gz
+ mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
+ volumes:
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ zookeeper-1:
+ <<: *zookeeper-defaults
+ # ports:
+ # - "2181:2181"
+ # - "2888:2888"
+ # - "3888:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=1
+ - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ zookeeper-2:
+ <<: *zookeeper-defaults
+ # ports:
+ # - "2182:2181"
+ # - "2889:2888"
+ # - "3889:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=2
+ - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ zookeeper-3:
+ <<: *zookeeper-defaults
+ # ports:
+ # - "2183:2181"
+ # - "2890:2888"
+ # - "3890:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=3
+ - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ clickhouse:
+ <<: *clickhouse-defaults
+ # TODO: needed for schema-migrator to work, remove this redundancy once we have a better solution
+ hostname: clickhouse
+ # ports:
+ # - "9000:9000"
+ # - "8123:8123"
+ # - "9181:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ clickhouse-2:
+ <<: *clickhouse-defaults
+ hostname: clickhouse-2
+ # ports:
+ # - "9001:9000"
+ # - "8124:8123"
+ # - "9182:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ clickhouse-3:
+ <<: *clickhouse-defaults
+ hostname: clickhouse-3
+ # ports:
+ # - "9002:9000"
+ # - "8125:8123"
+ # - "9183:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ alertmanager:
+ <<: *common
+ image: signoz/alertmanager:0.23.7
+ command:
+ - --queryService.url=http://query-service:8085
+ - --storage.path=/data
+ volumes:
+ - ./clickhouse-setup/data/alertmanager:/data
+ depends_on:
+ - query-service
+ query-service:
+ <<: *db-depend
+ image: signoz/query-service:0.69.0
+ command:
+ - --config=/root/config/prometheus.yml
+ - --use-logs-new-schema=true
+ - --use-trace-new-schema=true
+ # ports:
+ # - "8080:8080" # signoz port
+ # - "6060:6060" # pprof port
+ volumes:
+ - ../common/signoz/prometheus.yml:/root/config/prometheus.yml
+ - ../common/dashboards:/root/config/dashboards
+ - ./clickhouse-setup/data/signoz/:/var/lib/signoz/
+ environment:
+ - ClickHouseUrl=tcp://clickhouse:9000
+ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
+ - STORAGE=clickhouse
+ - GODEBUG=netdns=go
+ - TELEMETRY_ENABLED=true
+ - DEPLOYMENT_TYPE=docker-swarm
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - localhost:8080/api/v1/health
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ frontend:
+ <<: *common
+ image: signoz/frontend:0.69.0
+ depends_on:
+ - alertmanager
+ - query-service
+ ports:
+ - "3301:3301"
+ volumes:
+ - ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
+ otel-collector:
+ <<: *db-depend
+ image: signoz/signoz-otel-collector:0.111.24
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ - --manager-config=/etc/manager-config.yaml
+ - --copy-path=/var/tmp/collector-config.yaml
+ - --feature-gates=-pkg.translator.prometheus.NormalizeName
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
+ environment:
+ - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
+ - LOW_CARDINAL_EXCEPTION_GROUPING=false
+ ports:
+ # - "1777:1777" # pprof extension
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
+ deploy:
+ replicas: 3
+ depends_on:
+ - clickhouse
+ - schema-migrator
+ - query-service
+ schema-migrator:
+ <<: *common
+ image: signoz/signoz-schema-migrator:0.111.24
+ deploy:
+ restart_policy:
+ condition: on-failure
+ delay: 5s
+ entrypoint: sh
+ command:
+ - -c
+ - "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
+ depends_on:
+ - clickhouse
+
+networks:
+ signoz-net:
+ name: signoz-net
diff --git a/deploy/docker-swarm/docker-compose.yaml b/deploy/docker-swarm/docker-compose.yaml
new file mode 100644
index 0000000000..c0624ed9cf
--- /dev/null
+++ b/deploy/docker-swarm/docker-compose.yaml
@@ -0,0 +1,201 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ deploy:
+ restart_policy:
+ condition: on-failure
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+x-clickhouse-defaults: &clickhouse-defaults
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ tty: true
+ deploy:
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9363"
+ signoz.io/path: "/metrics"
+ depends_on:
+ - init-clickhouse
+ - zookeeper-1
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - 0.0.0.0:8123/ping
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ ulimits:
+ nproc: 65535
+ nofile:
+ soft: 262144
+ hard: 262144
+x-zookeeper-defaults: &zookeeper-defaults
+ <<: *common
+ image: bitnami/zookeeper:3.7.1
+ user: root
+ deploy:
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9141"
+ signoz.io/path: "/metrics"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
+ interval: 30s
+ timeout: 5s
+ retries: 3
+x-db-depend: &db-depend
+ <<: *common
+ depends_on:
+ - clickhouse
+ - schema-migrator
+services:
+ init-clickhouse:
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ command:
+ - bash
+ - -c
+ - |
+ version="v0.0.1"
+ node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
+ node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
+ echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
+ cd /tmp
+ wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
+ tar -xvzf histogram-quantile.tar.gz
+ mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
+ volumes:
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ zookeeper-1:
+ <<: *zookeeper-defaults
+ # ports:
+ # - "2181:2181"
+ # - "2888:2888"
+ # - "3888:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=1
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ clickhouse:
+ <<: *clickhouse-defaults
+ # TODO: needed for clickhouse TCP connectio
+ hostname: clickhouse
+ # ports:
+ # - "9000:9000"
+ # - "8123:8123"
+ # - "9181:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ alertmanager:
+ <<: *common
+ image: signoz/alertmanager:0.23.7
+ command:
+ - --queryService.url=http://query-service:8085
+ - --storage.path=/data
+ volumes:
+ - ./clickhouse-setup/data/alertmanager:/data
+ depends_on:
+ - query-service
+ query-service:
+ <<: *db-depend
+ image: signoz/query-service:0.69.0
+ command:
+ - --config=/root/config/prometheus.yml
+ - --use-logs-new-schema=true
+ - --use-trace-new-schema=true
+ # ports:
+ # - "8080:8080" # signoz port
+ # - "6060:6060" # pprof port
+ volumes:
+ - ../common/signoz/prometheus.yml:/root/config/prometheus.yml
+ - ../common/dashboards:/root/config/dashboards
+ - ./clickhouse-setup/data/signoz/:/var/lib/signoz/
+ environment:
+ - ClickHouseUrl=tcp://clickhouse:9000
+ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
+ - STORAGE=clickhouse
+ - GODEBUG=netdns=go
+ - TELEMETRY_ENABLED=true
+ - DEPLOYMENT_TYPE=docker-swarm
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - localhost:8080/api/v1/health
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ frontend:
+ <<: *common
+ image: signoz/frontend:0.69.0
+ depends_on:
+ - alertmanager
+ - query-service
+ ports:
+ - "3301:3301"
+ volumes:
+ - ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
+ otel-collector:
+ <<: *db-depend
+ image: signoz/signoz-otel-collector:0.111.24
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ - --manager-config=/etc/manager-config.yaml
+ - --copy-path=/var/tmp/collector-config.yaml
+ - --feature-gates=-pkg.translator.prometheus.NormalizeName
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
+ environment:
+ - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
+ - LOW_CARDINAL_EXCEPTION_GROUPING=false
+ ports:
+ # - "1777:1777" # pprof extension
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
+ deploy:
+ replicas: 3
+ depends_on:
+ - clickhouse
+ - schema-migrator
+ - query-service
+ schema-migrator:
+ <<: *common
+ image: signoz/signoz-schema-migrator:0.111.24
+ deploy:
+ restart_policy:
+ condition: on-failure
+ delay: 5s
+ entrypoint: sh
+ command:
+ - -c
+ - "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
+ depends_on:
+ - clickhouse
+
+networks:
+ signoz-net:
+ name: signoz-net
diff --git a/deploy/docker-swarm/generator/hotrod/docker-compose.yaml b/deploy/docker-swarm/generator/hotrod/docker-compose.yaml
new file mode 100644
index 0000000000..065dc1352c
--- /dev/null
+++ b/deploy/docker-swarm/generator/hotrod/docker-compose.yaml
@@ -0,0 +1,38 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ extra_hosts:
+ - host.docker.internal:host-gateway
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+ deploy:
+ restart_policy:
+ condition: on-failure
+services:
+ hotrod:
+ <<: *common
+ image: jaegertracing/example-hotrod:1.61.0
+ command: [ "all" ]
+ environment:
+ - OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 #
+ load-hotrod:
+ <<: *common
+ image: "signoz/locust:1.2.3"
+ environment:
+ ATTACKED_HOST: http://hotrod:8080
+ LOCUST_MODE: standalone
+ NO_PROXY: standalone
+ TASK_DELAY_FROM: 5
+ TASK_DELAY_TO: 30
+ QUIET_MODE: "${QUIET_MODE:-false}"
+ LOCUST_OPTS: "--headless -u 10 -r 1"
+ volumes:
+ - ../../../common/locust-scripts:/locust
+
+networks:
+ signoz-net:
+ name: signoz-net
+ external: true
diff --git a/deploy/docker-swarm/generator/infra/docker-compose.yaml b/deploy/docker-swarm/generator/infra/docker-compose.yaml
new file mode 100644
index 0000000000..ca7ab66312
--- /dev/null
+++ b/deploy/docker-swarm/generator/infra/docker-compose.yaml
@@ -0,0 +1,69 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ extra_hosts:
+ - host.docker.internal:host-gateway
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+ deploy:
+ mode: global
+ restart_policy:
+ condition: on-failure
+services:
+ otel-agent:
+ <<: *common
+ image: otel/opentelemetry-collector-contrib:0.111.0
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ volumes:
+ - ./otel-agent-config.yaml:/etc/otel-collector-config.yaml
+ - /:/hostfs:ro
+ environment:
+ - SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
+ - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
+ # - SIGNOZ_ACCESS_TOKEN=""
+ # Before exposing the ports, make sure the ports are not used by other services
+ # ports:
+ # - "4317:4317"
+ # - "4318:4318"
+ otel-metrics:
+ <<: *common
+ image: otel/opentelemetry-collector-contrib:0.111.0
+ user: 0:0 # If you have security concerns, you can replace this with your `UID:GID` that has necessary permissions to docker.sock
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ volumes:
+ - ./otel-metrics-config.yaml:/etc/otel-collector-config.yaml
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ - SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
+ - OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
+ # - SIGNOZ_ACCESS_TOKEN=""
+ # Before exposing the ports, make sure the ports are not used by other services
+ # ports:
+ # - "4317:4317"
+ # - "4318:4318"
+ deploy:
+ mode: replicated
+ replicas: 1
+ placement:
+ constraints:
+ - node.role == manager
+ logspout:
+ <<: *common
+ image: "gliderlabs/logspout:v3.2.14"
+ command: syslog+tcp://otel-agent:2255
+ user: root
+ volumes:
+ - /etc/hostname:/etc/host_hostname:ro
+ - /var/run/docker.sock:/var/run/docker.sock
+ depends_on:
+ - otel-agent
+
+networks:
+ signoz-net:
+ name: signoz-net
+ external: true
diff --git a/deploy/docker-swarm/generator/infra/otel-agent-config.yaml b/deploy/docker-swarm/generator/infra/otel-agent-config.yaml
new file mode 100644
index 0000000000..eb60c1091b
--- /dev/null
+++ b/deploy/docker-swarm/generator/infra/otel-agent-config.yaml
@@ -0,0 +1,102 @@
+receivers:
+ hostmetrics:
+ collection_interval: 30s
+ root_path: /hostfs
+ scrapers:
+ cpu: {}
+ load: {}
+ memory: {}
+ disk: {}
+ filesystem: {}
+ network: {}
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-agent
+ static_configs:
+ - targets:
+ - localhost:8888
+ labels:
+ job_name: otel-agent
+ tcplog/docker:
+ listen_address: "0.0.0.0:2255"
+ operators:
+ - type: regex_parser
+ regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?'
+ timestamp:
+ parse_from: attributes.timestamp
+ layout: '%Y-%m-%dT%H:%M:%S.%LZ'
+ - type: move
+ from: attributes["body"]
+ to: body
+ - type: remove
+ field: attributes.timestamp
+ # please remove names from below if you want to collect logs from them
+ - type: filter
+ id: signoz_logs_filter
+ expr: 'attributes.container_name matches "^(signoz_(logspout|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra_(logspout|otel-agent|otel-metrics)).*"'
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+ resourcedetection:
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ detectors:
+ # - ec2
+ # - gcp
+ # - azure
+ - env
+ - system
+ timeout: 2s
+extensions:
+ health_check:
+ endpoint: 0.0.0.0:13133
+ pprof:
+ endpoint: 0.0.0.0:1777
+exporters:
+ otlp:
+ endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
+ tls:
+ insecure: true
+ headers:
+ signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
+ # debug: {}
+service:
+ telemetry:
+ logs:
+ encoding: json
+ metrics:
+ address: 0.0.0.0:8888
+ extensions:
+ - health_check
+ - pprof
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics:
+ receivers: [otlp]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics/hostmetrics:
+ receivers: [hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics/prometheus:
+ receivers: [prometheus]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp, tcplog/docker]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
diff --git a/deploy/docker-swarm/generator/infra/otel-metrics-config.yaml b/deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
new file mode 100644
index 0000000000..e44bbc6761
--- /dev/null
+++ b/deploy/docker-swarm/generator/infra/otel-metrics-config.yaml
@@ -0,0 +1,103 @@
+receivers:
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-metrics
+ static_configs:
+ - targets:
+ - localhost:8888
+ labels:
+ job_name: otel-metrics
+ # For Docker daemon metrics to be scraped, it must be configured to expose
+ # Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
+ # - job_name: docker-daemon
+ # dockerswarm_sd_configs:
+ # - host: unix:///var/run/docker.sock
+ # role: nodes
+ # relabel_configs:
+ # - source_labels: [__meta_dockerswarm_node_address]
+ # target_label: __address__
+ # replacement: $1:9323
+ - job_name: "dockerswarm"
+ dockerswarm_sd_configs:
+ - host: unix:///var/run/docker.sock
+ role: tasks
+ relabel_configs:
+ - action: keep
+ regex: running
+ source_labels:
+ - __meta_dockerswarm_task_desired_state
+ - action: keep
+ regex: true
+ source_labels:
+ - __meta_dockerswarm_service_label_signoz_io_scrape
+ - regex: ([^:]+)(?::\d+)?
+ replacement: $1
+ source_labels:
+ - __address__
+ target_label: swarm_container_ip
+ - separator: .
+ source_labels:
+ - __meta_dockerswarm_service_name
+ - __meta_dockerswarm_task_slot
+ - __meta_dockerswarm_task_id
+ target_label: swarm_container_name
+ - target_label: __address__
+ source_labels:
+ - swarm_container_ip
+ - __meta_dockerswarm_service_label_signoz_io_port
+ separator: ":"
+ - source_labels:
+ - __meta_dockerswarm_service_label_signoz_io_path
+ target_label: __metrics_path__
+ - source_labels:
+ - __meta_dockerswarm_service_label_com_docker_stack_namespace
+ target_label: namespace
+ - source_labels:
+ - __meta_dockerswarm_service_name
+ target_label: service_name
+ - source_labels:
+ - __meta_dockerswarm_task_id
+ target_label: service_instance_id
+ - source_labels:
+ - __meta_dockerswarm_node_hostname
+ target_label: host_name
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+ resourcedetection:
+ detectors:
+ - env
+ - system
+ timeout: 2s
+extensions:
+ health_check:
+ endpoint: 0.0.0.0:13133
+ pprof:
+ endpoint: 0.0.0.0:1777
+exporters:
+ otlp:
+ endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
+ tls:
+ insecure: true
+ headers:
+ signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
+ # debug: {}
+service:
+ telemetry:
+ logs:
+ encoding: json
+ metrics:
+ address: 0.0.0.0:8888
+ extensions:
+ - health_check
+ - pprof
+ pipelines:
+ metrics:
+ receivers: [prometheus]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker-swarm/otel-collector-config.yaml
similarity index 56%
rename from deploy/docker/clickhouse-setup/otel-collector-config.yaml
rename to deploy/docker-swarm/otel-collector-config.yaml
index b73acdea11..1daab97e34 100644
--- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml
+++ b/deploy/docker-swarm/otel-collector-config.yaml
@@ -1,85 +1,29 @@
receivers:
- tcplog/docker:
- listen_address: "0.0.0.0:2255"
- operators:
- - type: regex_parser
- regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?'
- timestamp:
- parse_from: attributes.timestamp
- layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- - type: move
- from: attributes["body"]
- to: body
- - type: remove
- field: attributes.timestamp
- # please remove names from below if you want to collect logs from them
- - type: filter
- id: signoz_logs_filter
- expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
- opencensus:
- endpoint: 0.0.0.0:55678
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
- jaeger:
- protocols:
- grpc:
- endpoint: 0.0.0.0:14250
- thrift_http:
- endpoint: 0.0.0.0:14268
- # thrift_compact:
- # endpoint: 0.0.0.0:6831
- # thrift_binary:
- # endpoint: 0.0.0.0:6832
- hostmetrics:
- collection_interval: 30s
- root_path: /hostfs
- scrapers:
- cpu: {}
- load: {}
- memory: {}
- disk: {}
- filesystem: {}
- network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- # otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
-
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
- # memory_limiter:
- # # 80% of maximum memory up to 2G
- # limit_mib: 1500
- # # 25% of limit up to 2G
- # spike_limit_mib: 512
- # check_interval: 5s
- #
- # # 50% of the maximum memory
- # limit_percentage: 50
- # # 20% of max memory usage spike expected
- # spike_limit_percentage: 20
- # queued_retry:
- # num_workers: 4
- # queue_size: 100
- # retry_on_failure: true
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
- detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
+ detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
@@ -106,15 +50,11 @@ processors:
- name: host.name
- name: host.type
- name: container.name
-
extensions:
health_check:
endpoint: 0.0.0.0:13133
- zpages:
- endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
-
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
@@ -132,8 +72,7 @@ exporters:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
- # logging: {}
-
+ # debug: {}
service:
telemetry:
logs:
@@ -142,26 +81,21 @@ service:
address: 0.0.0.0:8888
extensions:
- health_check
- - zpages
- pprof
pipelines:
traces:
- receivers: [jaeger, otlp]
+ receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
- metrics/hostmetrics:
- receivers: [hostmetrics]
- processors: [resourcedetection, batch]
- exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
- receivers: [otlp, tcplog/docker]
+ receivers: [otlp]
processors: [batch]
- exporters: [clickhouselogsexporter]
\ No newline at end of file
+ exporters: [clickhouselogsexporter]
diff --git a/deploy/docker/.env b/deploy/docker/.env
new file mode 100644
index 0000000000..d2f66ad330
--- /dev/null
+++ b/deploy/docker/.env
@@ -0,0 +1 @@
+COMPOSE_PROJECT_NAME=clickhouse-setup
\ No newline at end of file
diff --git a/deploy/docker/clickhouse-setup/.gitkeep b/deploy/docker/clickhouse-setup/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/deploy/docker/clickhouse-setup/alertmanager.yml b/deploy/docker/clickhouse-setup/alertmanager.yml
deleted file mode 100644
index d69357f9dd..0000000000
--- a/deploy/docker/clickhouse-setup/alertmanager.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-global:
- resolve_timeout: 1m
- slack_api_url: 'https://hooks.slack.com/services/xxx'
-
-route:
- receiver: 'slack-notifications'
-
-receivers:
-- name: 'slack-notifications'
- slack_configs:
- - channel: '#alerts'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
-
- *Description:* {{ .Annotations.description }}
-
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
\ No newline at end of file
diff --git a/deploy/docker/clickhouse-setup/alerts.yml b/deploy/docker/clickhouse-setup/alerts.yml
deleted file mode 100644
index 810a20750c..0000000000
--- a/deploy/docker/clickhouse-setup/alerts.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-groups:
-- name: ExampleCPULoadGroup
- rules:
- - alert: HighCpuLoad
- expr: system_cpu_load_average_1m > 0.1
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: High CPU load
- description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
diff --git a/deploy/docker/clickhouse-setup/clickhouse-storage.xml b/deploy/docker/clickhouse-setup/clickhouse-storage.xml
deleted file mode 100644
index 54ec4976f5..0000000000
--- a/deploy/docker/clickhouse-setup/clickhouse-storage.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- 10485760
-
-
- s3
-
- https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/
- ACCESS-KEY-ID
- SECRET-ACCESS-KEY
-
-
-
-
-
-
-
-
-
-
- default
-
-
- s3
- 0
-
-
-
-
-
-
diff --git a/deploy/docker/clickhouse-setup/clickhouse-users.xml b/deploy/docker/clickhouse-setup/clickhouse-users.xml
deleted file mode 100644
index f18562071d..0000000000
--- a/deploy/docker/clickhouse-setup/clickhouse-users.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
-
-
-
-
-
-
-
- 10000000000
-
-
- random
-
-
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
- ::/0
-
-
-
- default
-
-
- default
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3600
-
-
- 0
- 0
- 0
- 0
- 0
-
-
-
-
diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml
deleted file mode 100644
index 826667fabf..0000000000
--- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-version: "2.4"
-include:
- - test-app-docker-compose.yaml
-services:
- zookeeper-1:
- image: bitnami/zookeeper:3.7.1
- container_name: signoz-zookeeper-1
- hostname: zookeeper-1
- user: root
- ports:
- - "2181:2181"
- - "2888:2888"
- - "3888:3888"
- volumes:
- - ./data/zookeeper-1:/bitnami/zookeeper
- environment:
- - ZOO_SERVER_ID=1
- # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- - ALLOW_ANONYMOUS_LOGIN=yes
- - ZOO_AUTOPURGE_INTERVAL=1
- clickhouse:
- image: clickhouse/clickhouse-server:24.1.2-alpine
- container_name: signoz-clickhouse
- # ports:
- # - "9000:9000"
- # - "8123:8123"
- tty: true
- volumes:
- - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- - ./data/clickhouse/:/var/lib/clickhouse/
- - ./user_scripts:/var/lib/clickhouse/user_scripts/
- restart: on-failure
- logging:
- options:
- max-size: 50m
- max-file: "3"
- healthcheck:
- # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
- test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
- interval: 30s
- timeout: 5s
- retries: 3
- alertmanager:
- container_name: signoz-alertmanager
- image: signoz/alertmanager:0.23.7
- volumes:
- - ./data/alertmanager:/data
- depends_on:
- query-service:
- condition: service_healthy
- restart: on-failure
- command:
- - --queryService.url=http://query-service:8085
- - --storage.path=/data
- otel-collector-migrator:
- image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
- container_name: otel-migrator
- command:
- - "sync"
- - "--dsn=tcp://clickhouse:9000"
- - "--up="
- depends_on:
- clickhouse:
- condition: service_healthy
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
- # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
- otel-collector:
- container_name: signoz-otel-collector
- image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
- command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- # user: root # required for reading docker container logs
- volumes:
- - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- - /var/lib/docker/containers:/var/lib/docker/containers:ro
- - /:/hostfs:ro
- environment:
- - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- ports:
- # - "1777:1777" # pprof extension
- - "4317:4317" # OTLP gRPC receiver
- - "4318:4318" # OTLP HTTP receiver
- # - "8888:8888" # OtelCollector internal metrics
- # - "8889:8889" # signoz spanmetrics exposed by the agent
- # - "9411:9411" # Zipkin port
- # - "13133:13133" # health check extension
- # - "14250:14250" # Jaeger gRPC
- # - "14268:14268" # Jaeger thrift HTTP
- # - "55678:55678" # OpenCensus receiver
- # - "55679:55679" # zPages extension
- restart: on-failure
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator:
- condition: service_completed_successfully
- query-service:
- condition: service_healthy
- logspout:
- image: "gliderlabs/logspout:v3.2.14"
- container_name: signoz-logspout
- volumes:
- - /etc/hostname:/etc/host_hostname:ro
- - /var/run/docker.sock:/var/run/docker.sock
- command: syslog+tcp://otel-collector:2255
- depends_on:
- - otel-collector
- restart: on-failure
diff --git a/deploy/docker/clickhouse-setup/docker-compose-local.yaml b/deploy/docker/clickhouse-setup/docker-compose-local.yaml
deleted file mode 100644
index 7a4222ff8c..0000000000
--- a/deploy/docker/clickhouse-setup/docker-compose-local.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-version: "2.4"
-
-services:
- query-service:
- hostname: query-service
- build:
- context: "../../../"
- dockerfile: "./pkg/query-service/Dockerfile"
- args:
- LDFLAGS: ""
- TARGETPLATFORM: "${GOOS}/${GOARCH}"
- container_name: signoz-query-service
- environment:
- - ClickHouseUrl=tcp://clickhouse:9000
- - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- - DASHBOARDS_PATH=/root/config/dashboards
- - STORAGE=clickhouse
- - GODEBUG=netdns=go
- - TELEMETRY_ENABLED=true
- volumes:
- - ./prometheus.yml:/root/config/prometheus.yml
- - ../dashboards:/root/config/dashboards
- - ./data/signoz/:/var/lib/signoz/
- command:
- [
- "-config=/root/config/prometheus.yml",
- "--use-logs-new-schema=true",
- "--use-trace-new-schema=true"
- ]
- ports:
- - "6060:6060"
- - "8080:8080"
- restart: on-failure
- healthcheck:
- test:
- [
- "CMD",
- "wget",
- "--spider",
- "-q",
- "localhost:8080/api/v1/health"
- ]
- interval: 30s
- timeout: 5s
- retries: 3
- depends_on:
- clickhouse:
- condition: service_healthy
-
- frontend:
- build:
- context: "../../../frontend"
- dockerfile: "./Dockerfile"
- args:
- TARGETOS: "${GOOS}"
- TARGETPLATFORM: "${GOARCH}"
- container_name: signoz-frontend
- environment:
- - FRONTEND_API_ENDPOINT=http://query-service:8080
- restart: on-failure
- depends_on:
- - alertmanager
- - query-service
- ports:
- - "3301:3301"
- volumes:
- - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml
deleted file mode 100644
index ae25523fbb..0000000000
--- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml
+++ /dev/null
@@ -1,257 +0,0 @@
-x-clickhouse-defaults: &clickhouse-defaults
- restart: on-failure
- # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
- image: clickhouse/clickhouse-server:24.1.2-alpine
- tty: true
- depends_on:
- - zookeeper-1
- # - zookeeper-2
- # - zookeeper-3
- logging:
- options:
- max-size: 50m
- max-file: "3"
- healthcheck:
- # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
- test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
- interval: 30s
- timeout: 5s
- retries: 3
- ulimits:
- nproc: 65535
- nofile:
- soft: 262144
- hard: 262144
-x-db-depend: &db-depend
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator-sync:
- condition: service_completed_successfully
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
-services:
- zookeeper-1:
- image: bitnami/zookeeper:3.7.1
- container_name: signoz-zookeeper-1
- hostname: zookeeper-1
- user: root
- ports:
- - "2181:2181"
- - "2888:2888"
- - "3888:3888"
- volumes:
- - ./data/zookeeper-1:/bitnami/zookeeper
- environment:
- - ZOO_SERVER_ID=1
- # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- - ALLOW_ANONYMOUS_LOGIN=yes
- - ZOO_AUTOPURGE_INTERVAL=1
- # zookeeper-2:
- # image: bitnami/zookeeper:3.7.0
- # container_name: signoz-zookeeper-2
- # hostname: zookeeper-2
- # user: root
- # ports:
- # - "2182:2181"
- # - "2889:2888"
- # - "3889:3888"
- # volumes:
- # - ./data/zookeeper-2:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=2
- # - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
-
- # zookeeper-3:
- # image: bitnami/zookeeper:3.7.0
- # container_name: signoz-zookeeper-3
- # hostname: zookeeper-3
- # user: root
- # ports:
- # - "2183:2181"
- # - "2890:2888"
- # - "3890:3888"
- # volumes:
- # - ./data/zookeeper-3:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=3
- # - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
- clickhouse:
- !!merge <<: *clickhouse-defaults
- container_name: signoz-clickhouse
- hostname: clickhouse
- ports:
- - "9000:9000"
- - "8123:8123"
- - "9181:9181"
- volumes:
- - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- - ./data/clickhouse/:/var/lib/clickhouse/
- - ./user_scripts:/var/lib/clickhouse/user_scripts/
- # clickhouse-2:
- # <<: *clickhouse-defaults
- # container_name: signoz-clickhouse-2
- # hostname: clickhouse-2
- # ports:
- # - "9001:9000"
- # - "8124:8123"
- # - "9182:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-2/:/var/lib/clickhouse/
- # - ./user_scripts:/var/lib/clickhouse/user_scripts/
-
- # clickhouse-3:
- # <<: *clickhouse-defaults
- # container_name: signoz-clickhouse-3
- # hostname: clickhouse-3
- # ports:
- # - "9002:9000"
- # - "8125:8123"
- # - "9183:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-3/:/var/lib/clickhouse/
- # - ./user_scripts:/var/lib/clickhouse/user_scripts/
- alertmanager:
- image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
- container_name: signoz-alertmanager
- volumes:
- - ./data/alertmanager:/data
- depends_on:
- query-service:
- condition: service_healthy
- restart: on-failure
- command:
- - --queryService.url=http://query-service:8085
- - --storage.path=/data
- # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
- query-service:
- image: signoz/query-service:${DOCKER_TAG:-0.69.0}
- container_name: signoz-query-service
- command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
- # ports:
- # - "6060:6060" # pprof port
- # - "8080:8080" # query-service port
- volumes:
- - ./prometheus.yml:/root/config/prometheus.yml
- - ../dashboards:/root/config/dashboards
- - ./data/signoz/:/var/lib/signoz/
- environment:
- - ClickHouseUrl=tcp://clickhouse:9000
- - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- - DASHBOARDS_PATH=/root/config/dashboards
- - STORAGE=clickhouse
- - GODEBUG=netdns=go
- - TELEMETRY_ENABLED=true
- - DEPLOYMENT_TYPE=docker-standalone-amd
- restart: on-failure
- healthcheck:
- test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
- interval: 30s
- timeout: 5s
- retries: 3
- !!merge <<: *db-depend
- frontend:
- image: signoz/frontend:${DOCKER_TAG:-0.69.0}
- container_name: signoz-frontend
- restart: on-failure
- depends_on:
- - alertmanager
- - query-service
- ports:
- - "3301:3301"
- volumes:
- - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
- otel-collector-migrator-sync:
- image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
- container_name: otel-migrator-sync
- command:
- - "sync"
- - "--dsn=tcp://clickhouse:9000"
- - "--up="
- depends_on:
- clickhouse:
- condition: service_healthy
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
- otel-collector-migrator-async:
- image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
- container_name: otel-migrator-async
- command:
- - "async"
- - "--dsn=tcp://clickhouse:9000"
- - "--up="
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator-sync:
- condition: service_completed_successfully
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
- otel-collector:
- image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
- container_name: signoz-otel-collector
- command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- user: root # required for reading docker container logs
- volumes:
- - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- - /var/lib/docker/containers:/var/lib/docker/containers:ro
- - /:/hostfs:ro
- environment:
- - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- - LOW_CARDINAL_EXCEPTION_GROUPING=false
- ports:
- # - "1777:1777" # pprof extension
- - "4317:4317" # OTLP gRPC receiver
- - "4318:4318" # OTLP HTTP receiver
- # - "8888:8888" # OtelCollector internal metrics
- # - "8889:8889" # signoz spanmetrics exposed by the agent
- # - "9411:9411" # Zipkin port
- # - "13133:13133" # health check extension
- # - "14250:14250" # Jaeger gRPC
- # - "14268:14268" # Jaeger thrift HTTP
- # - "55678:55678" # OpenCensus receiver
- # - "55679:55679" # zPages extension
- restart: on-failure
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator-sync:
- condition: service_completed_successfully
- query-service:
- condition: service_healthy
- logspout:
- image: "gliderlabs/logspout:v3.2.14"
- container_name: signoz-logspout
- volumes:
- - /etc/hostname:/etc/host_hostname:ro
- - /var/run/docker.sock:/var/run/docker.sock
- command: syslog+tcp://otel-collector:2255
- depends_on:
- - otel-collector
- restart: on-failure
diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml
deleted file mode 100644
index cf9309a967..0000000000
--- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml
+++ /dev/null
@@ -1,243 +0,0 @@
-version: "2.4"
-include:
- - test-app-docker-compose.yaml
-x-clickhouse-defaults: &clickhouse-defaults
- restart: on-failure
- # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
- image: clickhouse/clickhouse-server:24.1.2-alpine
- tty: true
- depends_on:
- - zookeeper-1
- # - zookeeper-2
- # - zookeeper-3
- logging:
- options:
- max-size: 50m
- max-file: "3"
- healthcheck:
- # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
- test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"]
- interval: 30s
- timeout: 5s
- retries: 3
- ulimits:
- nproc: 65535
- nofile:
- soft: 262144
- hard: 262144
-x-db-depend: &db-depend
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator:
- condition: service_completed_successfully
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
-services:
- zookeeper-1:
- image: bitnami/zookeeper:3.7.1
- container_name: signoz-zookeeper-1
- hostname: zookeeper-1
- user: root
- ports:
- - "2181:2181"
- - "2888:2888"
- - "3888:3888"
- volumes:
- - ./data/zookeeper-1:/bitnami/zookeeper
- environment:
- - ZOO_SERVER_ID=1
- # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- - ALLOW_ANONYMOUS_LOGIN=yes
- - ZOO_AUTOPURGE_INTERVAL=1
- # zookeeper-2:
- # image: bitnami/zookeeper:3.7.0
- # container_name: signoz-zookeeper-2
- # hostname: zookeeper-2
- # user: root
- # ports:
- # - "2182:2181"
- # - "2889:2888"
- # - "3889:3888"
- # volumes:
- # - ./data/zookeeper-2:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=2
- # - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
-
- # zookeeper-3:
- # image: bitnami/zookeeper:3.7.0
- # container_name: signoz-zookeeper-3
- # hostname: zookeeper-3
- # user: root
- # ports:
- # - "2183:2181"
- # - "2890:2888"
- # - "3890:3888"
- # volumes:
- # - ./data/zookeeper-3:/bitnami/zookeeper
- # environment:
- # - ZOO_SERVER_ID=3
- # - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
- # - ALLOW_ANONYMOUS_LOGIN=yes
- # - ZOO_AUTOPURGE_INTERVAL=1
- clickhouse:
- !!merge <<: *clickhouse-defaults
- container_name: signoz-clickhouse
- hostname: clickhouse
- ports:
- - "9000:9000"
- - "8123:8123"
- - "9181:9181"
- volumes:
- - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- - ./data/clickhouse/:/var/lib/clickhouse/
- - ./user_scripts:/var/lib/clickhouse/user_scripts/
- # clickhouse-2:
- # <<: *clickhouse-defaults
- # container_name: signoz-clickhouse-2
- # hostname: clickhouse-2
- # ports:
- # - "9001:9000"
- # - "8124:8123"
- # - "9182:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-2/:/var/lib/clickhouse/
- # - ./user_scripts:/var/lib/clickhouse/user_scripts/
-
- # clickhouse-3:
- # <<: *clickhouse-defaults
- # container_name: signoz-clickhouse-3
- # hostname: clickhouse-3
- # ports:
- # - "9002:9000"
- # - "8125:8123"
- # - "9183:9181"
- # volumes:
- # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- # - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
- # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- # - ./data/clickhouse-3/:/var/lib/clickhouse/
- # - ./user_scripts:/var/lib/clickhouse/user_scripts/
- alertmanager:
- image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
- container_name: signoz-alertmanager
- volumes:
- - ./data/alertmanager:/data
- depends_on:
- query-service:
- condition: service_healthy
- restart: on-failure
- command:
- - --queryService.url=http://query-service:8085
- - --storage.path=/data
- # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
- query-service:
- image: signoz/query-service:${DOCKER_TAG:-0.69.0}
- container_name: signoz-query-service
- command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"]
- # ports:
- # - "6060:6060" # pprof port
- # - "8080:8080" # query-service port
- volumes:
- - ./prometheus.yml:/root/config/prometheus.yml
- - ../dashboards:/root/config/dashboards
- - ./data/signoz/:/var/lib/signoz/
- environment:
- - ClickHouseUrl=tcp://clickhouse:9000
- - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- - DASHBOARDS_PATH=/root/config/dashboards
- - STORAGE=clickhouse
- - GODEBUG=netdns=go
- - TELEMETRY_ENABLED=true
- - DEPLOYMENT_TYPE=docker-standalone-amd
- - KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
- restart: on-failure
- healthcheck:
- test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
- interval: 30s
- timeout: 5s
- retries: 3
- !!merge <<: *db-depend
- frontend:
- image: signoz/frontend:${DOCKER_TAG:-0.69.0}
- container_name: signoz-frontend
- restart: on-failure
- depends_on:
- - alertmanager
- - query-service
- ports:
- - "3301:3301"
- volumes:
- - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
- otel-collector-migrator:
- image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
- container_name: otel-migrator
- command:
- - "--dsn=tcp://clickhouse:9000"
- depends_on:
- clickhouse:
- condition: service_healthy
- # clickhouse-2:
- # condition: service_healthy
- # clickhouse-3:
- # condition: service_healthy
- otel-collector:
- image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
- container_name: signoz-otel-collector
- command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- user: root # required for reading docker container logs
- volumes:
- - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- - /var/lib/docker/containers:/var/lib/docker/containers:ro
- - /:/hostfs:ro
- environment:
- - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- - LOW_CARDINAL_EXCEPTION_GROUPING=false
- ports:
- # - "1777:1777" # pprof extension
- - "4317:4317" # OTLP gRPC receiver
- - "4318:4318" # OTLP HTTP receiver
- # - "8888:8888" # OtelCollector internal metrics
- # - "8889:8889" # signoz spanmetrics exposed by the agent
- # - "9411:9411" # Zipkin port
- # - "13133:13133" # health check extension
- # - "14250:14250" # Jaeger gRPC
- # - "14268:14268" # Jaeger thrift HTTP
- # - "55678:55678" # OpenCensus receiver
- # - "55679:55679" # zPages extension
- restart: on-failure
- depends_on:
- clickhouse:
- condition: service_healthy
- otel-collector-migrator:
- condition: service_completed_successfully
- query-service:
- condition: service_healthy
- logspout:
- image: "gliderlabs/logspout:v3.2.14"
- container_name: signoz-logspout
- volumes:
- - /etc/hostname:/etc/host_hostname:ro
- - /var/run/docker.sock:/var/run/docker.sock
- command: syslog+tcp://otel-collector:2255
- depends_on:
- - otel-collector
- restart: on-failure
diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml
deleted file mode 100644
index 251ca25344..0000000000
--- a/deploy/docker/clickhouse-setup/docker-compose.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-include:
- - test-app-docker-compose.yaml
- - docker-compose-minimal.yaml
diff --git a/deploy/docker/clickhouse-setup/keeper_config.xml b/deploy/docker/clickhouse-setup/keeper_config.xml
deleted file mode 100644
index a9a25c1817..0000000000
--- a/deploy/docker/clickhouse-setup/keeper_config.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
- information
- /var/log/clickhouse-keeper/clickhouse-keeper.log
- /var/log/clickhouse-keeper/clickhouse-keeper.err.log
-
- 1000M
- 10
-
-
-
- 0.0.0.0
- 4096
-
-
- 9181
-
-
- 1
-
- /var/lib/clickhouse/coordination/logs
- /var/lib/clickhouse/coordination/snapshots
-
-
- 10000
- 10000
- 100000
- information
- false
-
-
-
-
- true
-
-
- 1
-
-
- clickhouses-keeper-1
- 9234
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/deploy/docker/clickhouse-setup/otel-collector-opamp-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-opamp-config.yaml
deleted file mode 100644
index e408b55ef6..0000000000
--- a/deploy/docker/clickhouse-setup/otel-collector-opamp-config.yaml
+++ /dev/null
@@ -1 +0,0 @@
-server_endpoint: ws://query-service:4320/v1/opamp
diff --git a/deploy/docker/clickhouse-setup/prometheus.yml b/deploy/docker/clickhouse-setup/prometheus.yml
deleted file mode 100644
index d7c52893c5..0000000000
--- a/deploy/docker/clickhouse-setup/prometheus.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# my global config
-global:
- scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
- evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
- # scrape_timeout is set to the global default (10s).
-
-# Alertmanager configuration
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- - alertmanager:9093
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
- # - "first_rules.yml"
- # - "second_rules.yml"
- - 'alerts.yml'
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs: []
-
-remote_read:
- - url: tcp://clickhouse:9000/signoz_metrics
diff --git a/deploy/docker/clickhouse-setup/test-app-docker-compose.yaml b/deploy/docker/clickhouse-setup/test-app-docker-compose.yaml
deleted file mode 100644
index c043d75d74..0000000000
--- a/deploy/docker/clickhouse-setup/test-app-docker-compose.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-services:
- hotrod:
- image: jaegertracing/example-hotrod:1.30
- container_name: hotrod
- logging:
- options:
- max-size: 50m
- max-file: "3"
- command: [ "all" ]
- environment:
- - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
-
- load-hotrod:
- image: "signoz/locust:1.2.3"
- container_name: load-hotrod
- hostname: load-hotrod
- environment:
- ATTACKED_HOST: http://hotrod:8080
- LOCUST_MODE: standalone
- NO_PROXY: standalone
- TASK_DELAY_FROM: 5
- TASK_DELAY_TO: 30
- QUIET_MODE: "${QUIET_MODE:-false}"
- LOCUST_OPTS: "--headless -u 10 -r 1"
- volumes:
- - ../common/locust-scripts:/locust
diff --git a/deploy/docker/common/locust-scripts/locustfile.py b/deploy/docker/common/locust-scripts/locustfile.py
deleted file mode 100644
index 0b518208cd..0000000000
--- a/deploy/docker/common/locust-scripts/locustfile.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from locust import HttpUser, task, between
-class UserTasks(HttpUser):
- wait_time = between(5, 15)
-
- @task
- def rachel(self):
- self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
- @task
- def trom(self):
- self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
- @task
- def japanese(self):
- self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
- @task
- def coffee(self):
- self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
diff --git a/deploy/docker/docker-compose.ha.yaml b/deploy/docker/docker-compose.ha.yaml
new file mode 100644
index 0000000000..e65ef63c0d
--- /dev/null
+++ b/deploy/docker/docker-compose.ha.yaml
@@ -0,0 +1,283 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ restart: on-failure
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+x-clickhouse-defaults: &clickhouse-defaults
+ <<: *common
+ # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ tty: true
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9363"
+ signoz.io/path: "/metrics"
+ depends_on:
+ init-clickhouse:
+ condition: service_completed_successfully
+ zookeeper-1:
+ condition: service_healthy
+ zookeeper-2:
+ condition: service_healthy
+ zookeeper-3:
+ condition: service_healthy
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - 0.0.0.0:8123/ping
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ ulimits:
+ nproc: 65535
+ nofile:
+ soft: 262144
+ hard: 262144
+x-zookeeper-defaults: &zookeeper-defaults
+ <<: *common
+ image: bitnami/zookeeper:3.7.1
+ user: root
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9141"
+ signoz.io/path: "/metrics"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
+ interval: 30s
+ timeout: 5s
+ retries: 3
+x-db-depend: &db-depend
+ <<: *common
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-sync:
+ condition: service_completed_successfully
+services:
+ init-clickhouse:
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ container_name: signoz-init-clickhouse
+ command:
+ - bash
+ - -c
+ - |
+ version="v0.0.1"
+ node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
+ node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
+ echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
+ cd /tmp
+ wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
+ tar -xvzf histogram-quantile.tar.gz
+ mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
+ volumes:
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ zookeeper-1:
+ <<: *zookeeper-defaults
+ container_name: signoz-zookeeper-1
+ # ports:
+ # - "2181:2181"
+ # - "2888:2888"
+ # - "3888:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=1
+ - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ zookeeper-2:
+ <<: *zookeeper-defaults
+ container_name: signoz-zookeeper-2
+ # ports:
+ # - "2182:2181"
+ # - "2889:2888"
+ # - "3889:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-2:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=2
+ - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ zookeeper-3:
+ <<: *zookeeper-defaults
+ container_name: signoz-zookeeper-3
+ # ports:
+ # - "2183:2181"
+ # - "2890:2888"
+ # - "3890:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-3:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=3
+ - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ clickhouse:
+ <<: *clickhouse-defaults
+ container_name: signoz-clickhouse
+ # ports:
+ # - "9000:9000"
+ # - "8123:8123"
+ # - "9181:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ clickhouse-2:
+ <<: *clickhouse-defaults
+ container_name: signoz-clickhouse-2
+ # ports:
+ # - "9001:9000"
+ # - "8124:8123"
+ # - "9182:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ clickhouse-3:
+ <<: *clickhouse-defaults
+ container_name: signoz-clickhouse-3
+ # ports:
+ # - "9002:9000"
+ # - "8125:8123"
+ # - "9183:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ alertmanager:
+ <<: *common
+ image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
+ container_name: signoz-alertmanager
+ command:
+ - --queryService.url=http://query-service:8085
+ - --storage.path=/data
+ volumes:
+ - ./clickhouse-setup/data/alertmanager:/data
+ depends_on:
+ query-service:
+ condition: service_healthy
+ query-service:
+ <<: *db-depend
+ image: signoz/query-service:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-query-service
+ command:
+ - --config=/root/config/prometheus.yml
+ - --use-logs-new-schema=true
+ - --use-trace-new-schema=true
+ # ports:
+ # - "3301:8080" # signoz port
+ # - "6060:6060" # pprof port
+ volumes:
+ - ../common/signoz/prometheus.yml:/root/config/prometheus.yml
+ - ../common/dashboards:/root/config/dashboards
+ - ./clickhouse-setup/data/signoz/:/var/lib/signoz/
+ environment:
+ - ClickHouseUrl=tcp://clickhouse:9000
+ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
+ - STORAGE=clickhouse
+ - GODEBUG=netdns=go
+ - TELEMETRY_ENABLED=true
+ - DEPLOYMENT_TYPE=docker-standalone-amd
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - localhost:8080/api/v1/health
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ frontend:
+ <<: *common
+ image: signoz/frontend:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-frontend
+ depends_on:
+ - alertmanager
+ - query-service
+ ports:
+ - "3301:3301"
+ volumes:
+ - ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
+ # TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
+ otel-collector:
+ <<: *db-depend
+ image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
+ container_name: signoz-otel-collector
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ - --manager-config=/etc/manager-config.yaml
+ - --copy-path=/var/tmp/collector-config.yaml
+ - --feature-gates=-pkg.translator.prometheus.NormalizeName
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
+ environment:
+ - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
+ - LOW_CARDINAL_EXCEPTION_GROUPING=false
+ ports:
+ # - "1777:1777" # pprof extension
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-sync:
+ condition: service_completed_successfully
+ query-service:
+ condition: service_healthy
+ schema-migrator-sync:
+ <<: *common
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-sync
+ command:
+ - sync
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-async:
+ <<: *db-depend
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-async
+ command:
+ - async
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+
+networks:
+ signoz-net:
+ name: signoz-net
diff --git a/deploy/docker/docker-compose.testing.yaml b/deploy/docker/docker-compose.testing.yaml
new file mode 100644
index 0000000000..c4bb825273
--- /dev/null
+++ b/deploy/docker/docker-compose.testing.yaml
@@ -0,0 +1,213 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ restart: on-failure
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+x-clickhouse-defaults: &clickhouse-defaults
+ <<: *common
+ # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ tty: true
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9363"
+ signoz.io/path: "/metrics"
+ depends_on:
+ init-clickhouse:
+ condition: service_completed_successfully
+ zookeeper-1:
+ condition: service_healthy
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - 0.0.0.0:8123/ping
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ ulimits:
+ nproc: 65535
+ nofile:
+ soft: 262144
+ hard: 262144
+x-zookeeper-defaults: &zookeeper-defaults
+ <<: *common
+ image: bitnami/zookeeper:3.7.1
+ user: root
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9141"
+ signoz.io/path: "/metrics"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
+ interval: 30s
+ timeout: 5s
+ retries: 3
+x-db-depend: &db-depend
+ <<: *common
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-sync:
+ condition: service_completed_successfully
+services:
+ init-clickhouse:
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ container_name: signoz-init-clickhouse
+ command:
+ - bash
+ - -c
+ - |
+ version="v0.0.1"
+ node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
+ node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
+ echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
+ cd /tmp
+ wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
+ tar -xvzf histogram-quantile.tar.gz
+ mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
+ volumes:
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ zookeeper-1:
+ <<: *zookeeper-defaults
+ container_name: signoz-zookeeper-1
+ ports:
+ - "2181:2181"
+ - "2888:2888"
+ - "3888:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=1
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ clickhouse:
+ <<: *clickhouse-defaults
+ container_name: signoz-clickhouse
+ ports:
+ - "9000:9000"
+ - "8123:8123"
+ - "9181:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ alertmanager:
+ <<: *common
+ image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
+ container_name: signoz-alertmanager
+ command:
+ - --queryService.url=http://query-service:8085
+ - --storage.path=/data
+ volumes:
+ - ./clickhouse-setup/data/alertmanager:/data
+ depends_on:
+ query-service:
+ condition: service_healthy
+ query-service:
+ <<: *db-depend
+ image: signoz/query-service:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-query-service
+ command:
+ - --config=/root/config/prometheus.yml
+ - --gateway-url=https://api.staging.signoz.cloud
+ - --use-logs-new-schema=true
+ - --use-trace-new-schema=true
+ # ports:
+ # - "8080:8080" # signoz port
+ # - "6060:6060" # pprof port
+ volumes:
+ - ../common/signoz/prometheus.yml:/root/config/prometheus.yml
+ - ../common/dashboards:/root/config/dashboards
+ - ./clickhouse-setup/data/signoz/:/var/lib/signoz/
+ environment:
+ - ClickHouseUrl=tcp://clickhouse:9000
+ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
+ - STORAGE=clickhouse
+ - GODEBUG=netdns=go
+ - TELEMETRY_ENABLED=true
+ - DEPLOYMENT_TYPE=docker-standalone-amd
+ - KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - localhost:8080/api/v1/health
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ frontend:
+ <<: *common
+ image: signoz/frontend:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-frontend
+ depends_on:
+ - alertmanager
+ - query-service
+ ports:
+ - "3301:3301"
+ volumes:
+ - ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
+ otel-collector:
+ <<: *db-depend
+ image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
+ container_name: signoz-otel-collector
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ - --manager-config=/etc/manager-config.yaml
+ - --copy-path=/var/tmp/collector-config.yaml
+ - --feature-gates=-pkg.translator.prometheus.NormalizeName
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
+ environment:
+ - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
+ - LOW_CARDINAL_EXCEPTION_GROUPING=false
+ ports:
+ # - "1777:1777" # pprof extension
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
+ depends_on:
+ query-service:
+ condition: service_healthy
+ schema-migrator-sync:
+ <<: *common
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-sync
+ command:
+ - sync
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-async:
+ <<: *db-depend
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-async
+ command:
+ - async
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+
+networks:
+ signoz-net:
+ name: signoz-net
diff --git a/deploy/docker/docker-compose.yaml b/deploy/docker/docker-compose.yaml
new file mode 100644
index 0000000000..903cece5f8
--- /dev/null
+++ b/deploy/docker/docker-compose.yaml
@@ -0,0 +1,211 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ restart: on-failure
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+x-clickhouse-defaults: &clickhouse-defaults
+ <<: *common
+ # addding non LTS version due to this fix https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ tty: true
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9363"
+ signoz.io/path: "/metrics"
+ depends_on:
+ init-clickhouse:
+ condition: service_completed_successfully
+ zookeeper-1:
+ condition: service_healthy
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - 0.0.0.0:8123/ping
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ ulimits:
+ nproc: 65535
+ nofile:
+ soft: 262144
+ hard: 262144
+x-zookeeper-defaults: &zookeeper-defaults
+ <<: *common
+ image: bitnami/zookeeper:3.7.1
+ user: root
+ labels:
+ signoz.io/scrape: "true"
+ signoz.io/port: "9141"
+ signoz.io/path: "/metrics"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
+ interval: 30s
+ timeout: 5s
+ retries: 3
+x-db-depend: &db-depend
+ <<: *common
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-sync:
+ condition: service_completed_successfully
+services:
+ init-clickhouse:
+ <<: *common
+ image: clickhouse/clickhouse-server:24.1.2-alpine
+ container_name: signoz-init-clickhouse
+ command:
+ - bash
+ - -c
+ - |
+ version="v0.0.1"
+ node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
+ node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
+ echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
+ cd /tmp
+ wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
+ tar -xvzf histogram-quantile.tar.gz
+ mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
+ volumes:
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ zookeeper-1:
+ <<: *zookeeper-defaults
+ container_name: signoz-zookeeper-1
+ # ports:
+ # - "2181:2181"
+ # - "2888:2888"
+ # - "3888:3888"
+ volumes:
+ - ./clickhouse-setup/data/zookeeper-1:/bitnami/zookeeper
+ environment:
+ - ZOO_SERVER_ID=1
+ - ALLOW_ANONYMOUS_LOGIN=yes
+ - ZOO_AUTOPURGE_INTERVAL=1
+ - ZOO_ENABLE_PROMETHEUS_METRICS=yes
+ - ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
+ clickhouse:
+ <<: *clickhouse-defaults
+ container_name: signoz-clickhouse
+ # ports:
+ # - "9000:9000"
+ # - "8123:8123"
+ # - "9181:9181"
+ volumes:
+ - ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
+ - ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
+ - ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
+ - ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
+ - ../common/clickhouse/cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
+ - ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
+ # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
+ alertmanager:
+ <<: *common
+ image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.7}
+ container_name: signoz-alertmanager
+ command:
+ - --queryService.url=http://query-service:8085
+ - --storage.path=/data
+ volumes:
+ - ./clickhouse-setup/data/alertmanager:/data
+ depends_on:
+ query-service:
+ condition: service_healthy
+ query-service:
+ <<: *db-depend
+ image: signoz/query-service:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-query-service
+ command:
+ - --config=/root/config/prometheus.yml
+ - --use-logs-new-schema=true
+ - --use-trace-new-schema=true
+ # ports:
+ # - "3301:8080" # signoz port
+ # - "6060:6060" # pprof port
+ volumes:
+ - ../common/signoz/prometheus.yml:/root/config/prometheus.yml
+ - ../common/dashboards:/root/config/dashboards
+ - ./clickhouse-setup/data/signoz/:/var/lib/signoz/
+ environment:
+ - ClickHouseUrl=tcp://clickhouse:9000
+ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
+ - STORAGE=clickhouse
+ - GODEBUG=netdns=go
+ - TELEMETRY_ENABLED=true
+ - DEPLOYMENT_TYPE=docker-standalone-amd
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - --spider
+ - -q
+ - localhost:8080/api/v1/health
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ frontend:
+ <<: *common
+ image: signoz/frontend:${DOCKER_TAG:-0.69.0}
+ container_name: signoz-frontend
+ depends_on:
+ - alertmanager
+ - query-service
+ ports:
+ - "3301:3301"
+ volumes:
+ - ../common/signoz/nginx-config.conf:/etc/nginx/conf.d/default.conf
+ otel-collector:
+ <<: *db-depend
+ image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.24}
+ container_name: signoz-otel-collector
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ - --manager-config=/etc/manager-config.yaml
+ - --copy-path=/var/tmp/collector-config.yaml
+ - --feature-gates=-pkg.translator.prometheus.NormalizeName
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
+ environment:
+ - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
+ - LOW_CARDINAL_EXCEPTION_GROUPING=false
+ ports:
+ # - "1777:1777" # pprof extension
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
+ depends_on:
+ query-service:
+ condition: service_healthy
+ schema-migrator-sync:
+ <<: *common
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-sync
+ command:
+ - sync
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+ depends_on:
+ clickhouse:
+ condition: service_healthy
+ schema-migrator-async:
+ <<: *db-depend
+ image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.24}
+ container_name: schema-migrator-async
+ command:
+ - async
+ - --dsn=tcp://clickhouse:9000
+ - --up=
+
+networks:
+ signoz-net:
+ name: signoz-net
diff --git a/deploy/docker/generator/hotrod/docker-compose.yaml b/deploy/docker/generator/hotrod/docker-compose.yaml
new file mode 100644
index 0000000000..287d17765d
--- /dev/null
+++ b/deploy/docker/generator/hotrod/docker-compose.yaml
@@ -0,0 +1,39 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ extra_hosts:
+ - host.docker.internal:host-gateway
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+ restart: on-failure
+services:
+ hotrod:
+ <<: *common
+ image: jaegertracing/example-hotrod:1.61.0
+ container_name: hotrod
+ command: [ "all" ]
+ environment:
+ - OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318 # In case of external SigNoz or cloud, update the endpoint and access token
+ # - OTEL_OTLP_HEADERS=signoz-access-token=
+ load-hotrod:
+ <<: *common
+ image: "signoz/locust:1.2.3"
+ container_name: load-hotrod
+ environment:
+ ATTACKED_HOST: http://hotrod:8080
+ LOCUST_MODE: standalone
+ NO_PROXY: standalone
+ TASK_DELAY_FROM: 5
+ TASK_DELAY_TO: 30
+ QUIET_MODE: "${QUIET_MODE:-false}"
+ LOCUST_OPTS: "--headless -u 10 -r 1"
+ volumes:
+ - ../../../common/locust-scripts:/locust
+
+networks:
+ signoz-net:
+ name: signoz-net
+ external: true
diff --git a/deploy/docker/generator/infra/docker-compose.yaml b/deploy/docker/generator/infra/docker-compose.yaml
new file mode 100644
index 0000000000..95c1cbd8cb
--- /dev/null
+++ b/deploy/docker/generator/infra/docker-compose.yaml
@@ -0,0 +1,43 @@
+version: "3"
+x-common: &common
+ networks:
+ - signoz-net
+ extra_hosts:
+ - host.docker.internal:host-gateway
+ logging:
+ options:
+ max-size: 50m
+ max-file: "3"
+ restart: on-failure
+services:
+ otel-agent:
+ <<: *common
+ image: otel/opentelemetry-collector-contrib:0.111.0
+ command:
+ - --config=/etc/otel-collector-config.yaml
+ volumes:
+ - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
+ - /:/hostfs:ro
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ - SIGNOZ_COLLECTOR_ENDPOINT=http://host.docker.internal:4317 # In case of external SigNoz or cloud, update the endpoint and access token
+ - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux # Replace signoz-host with the actual hostname
+ # - SIGNOZ_ACCESS_TOKEN=""
+ # Before exposing the ports, make sure the ports are not used by other services
+ # ports:
+ # - "4317:4317"
+ # - "4318:4318"
+ logspout:
+ <<: *common
+ image: "gliderlabs/logspout:v3.2.14"
+ volumes:
+ - /etc/hostname:/etc/host_hostname:ro
+ - /var/run/docker.sock:/var/run/docker.sock
+ command: syslog+tcp://otel-agent:2255
+ depends_on:
+ - otel-agent
+
+networks:
+ signoz-net:
+ name: signoz-net
+ external: true
diff --git a/deploy/docker/generator/infra/otel-collector-config.yaml b/deploy/docker/generator/infra/otel-collector-config.yaml
new file mode 100644
index 0000000000..ea0a3e846f
--- /dev/null
+++ b/deploy/docker/generator/infra/otel-collector-config.yaml
@@ -0,0 +1,139 @@
+receivers:
+ hostmetrics:
+ collection_interval: 30s
+ root_path: /hostfs
+ scrapers:
+ cpu: {}
+ load: {}
+ memory: {}
+ disk: {}
+ filesystem: {}
+ network: {}
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+ prometheus:
+ config:
+ global:
+ scrape_interval: 60s
+ scrape_configs:
+ - job_name: otel-collector
+ static_configs:
+ - targets:
+ - localhost:8888
+ labels:
+ job_name: otel-collector
+ # For Docker daemon metrics to be scraped, it must be configured to expose
+ # Prometheus metrics, as documented here: https://docs.docker.com/config/daemon/prometheus/
+ # - job_name: docker-daemon
+ # static_configs:
+ # - targets:
+ # - host.docker.internal:9323
+ # labels:
+ # job_name: docker-daemon
+ - job_name: docker-container
+ docker_sd_configs:
+ - host: unix:///var/run/docker.sock
+ relabel_configs:
+ - action: keep
+ regex: true
+ source_labels:
+ - __meta_docker_container_label_signoz_io_scrape
+ - regex: true
+ source_labels:
+ - __meta_docker_container_label_signoz_io_path
+ target_label: __metrics_path__
+ - regex: (.+)
+ source_labels:
+ - __meta_docker_container_label_signoz_io_path
+ target_label: __metrics_path__
+ - separator: ":"
+ source_labels:
+ - __meta_docker_network_ip
+ - __meta_docker_container_label_signoz_io_port
+ target_label: __address__
+ - regex: '/(.*)'
+ replacement: '$1'
+ source_labels:
+ - __meta_docker_container_name
+ target_label: container_name
+ - regex: __meta_docker_container_label_signoz_io_(.+)
+ action: labelmap
+ replacement: $1
+ tcplog/docker:
+ listen_address: "0.0.0.0:2255"
+ operators:
+ - type: regex_parser
+ regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?'
+ timestamp:
+ parse_from: attributes.timestamp
+ layout: '%Y-%m-%dT%H:%M:%S.%LZ'
+ - type: move
+ from: attributes["body"]
+ to: body
+ - type: remove
+ field: attributes.timestamp
+ # please remove names from below if you want to collect logs from them
+ - type: filter
+ id: signoz_logs_filter
+ expr: 'attributes.container_name matches "^(signoz-(|alertmanager|query-service|otel-collector|clickhouse|zookeeper))|(infra-(logspout|otel-agent)-.*)"'
+processors:
+ batch:
+ send_batch_size: 10000
+ send_batch_max_size: 11000
+ timeout: 10s
+ resourcedetection:
+ # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
+ detectors:
+ # - ec2
+ # - gcp
+ # - azure
+ - env
+ - system
+ timeout: 2s
+extensions:
+ health_check:
+ endpoint: 0.0.0.0:13133
+ pprof:
+ endpoint: 0.0.0.0:1777
+exporters:
+ otlp:
+ endpoint: ${env:SIGNOZ_COLLECTOR_ENDPOINT}
+ tls:
+ insecure: true
+ headers:
+ signoz-access-token: ${env:SIGNOZ_ACCESS_TOKEN}
+ # debug: {}
+service:
+ telemetry:
+ logs:
+ encoding: json
+ metrics:
+ address: 0.0.0.0:8888
+ extensions:
+ - health_check
+ - pprof
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics:
+ receivers: [otlp]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics/hostmetrics:
+ receivers: [hostmetrics]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ metrics/prometheus:
+ receivers: [prometheus]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
+ logs:
+ receivers: [otlp, tcplog/docker]
+ processors: [resourcedetection, batch]
+ exporters: [otlp]
diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/otel-collector-config.yaml
similarity index 56%
rename from deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml
rename to deploy/docker/otel-collector-config.yaml
index 1b81ea214a..1daab97e34 100644
--- a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml
+++ b/deploy/docker/otel-collector-config.yaml
@@ -1,62 +1,21 @@
receivers:
- tcplog/docker:
- listen_address: "0.0.0.0:2255"
- operators:
- - type: regex_parser
- regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?'
- timestamp:
- parse_from: attributes.timestamp
- layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- - type: move
- from: attributes["body"]
- to: body
- - type: remove
- field: attributes.timestamp
- # please remove names from below if you want to collect logs from them
- - type: filter
- id: signoz_logs_filter
- expr: 'attributes.container_name matches "^signoz_(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
- opencensus:
- endpoint: 0.0.0.0:55678
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
- jaeger:
- protocols:
- grpc:
- endpoint: 0.0.0.0:14250
- thrift_http:
- endpoint: 0.0.0.0:14268
- # thrift_compact:
- # endpoint: 0.0.0.0:6831
- # thrift_binary:
- # endpoint: 0.0.0.0:6832
- hostmetrics:
- collection_interval: 30s
- root_path: /hostfs
- scrapers:
- cpu: {}
- load: {}
- memory: {}
- disk: {}
- filesystem: {}
- network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- # otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
-
processors:
batch:
send_batch_size: 10000
@@ -64,25 +23,11 @@ processors:
timeout: 10s
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
- detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
+ detectors: [env, system]
timeout: 2s
- # memory_limiter:
- # # 80% of maximum memory up to 2G
- # limit_mib: 1500
- # # 25% of limit up to 2G
- # spike_limit_mib: 512
- # check_interval: 5s
- #
- # # 50% of the maximum memory
- # limit_percentage: 50
- # # 20% of max memory usage spike expected
- # spike_limit_percentage: 20
- # queued_retry:
- # num_workers: 4
- # queue_size: 100
- # retry_on_failure: true
signozspanmetrics/delta:
metrics_exporter: clickhousemetricswrite
+ metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
@@ -105,7 +50,11 @@ processors:
- name: host.name
- name: host.type
- name: container.name
-
+extensions:
+ health_check:
+ endpoint: 0.0.0.0:13133
+ pprof:
+ endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
@@ -119,44 +68,34 @@ exporters:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
- # logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
-extensions:
- health_check:
- endpoint: 0.0.0.0:13133
- zpages:
- endpoint: 0.0.0.0:55679
- pprof:
- endpoint: 0.0.0.0:1777
-
+ # debug: {}
service:
telemetry:
logs:
encoding: json
metrics:
address: 0.0.0.0:8888
- extensions: [health_check, zpages, pprof]
+ extensions:
+ - health_check
+ - pprof
pipelines:
traces:
- receivers: [jaeger, otlp]
+ receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
- metrics/hostmetrics:
- receivers: [hostmetrics]
- processors: [resourcedetection, batch]
- exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
- receivers: [otlp, tcplog/docker]
+ receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter]
diff --git a/deploy/install.sh b/deploy/install.sh
index f729d3ae7e..b2282100a1 100755
--- a/deploy/install.sh
+++ b/deploy/install.sh
@@ -2,6 +2,11 @@
set -o errexit
+# Variables
+BASE_DIR="$(dirname "$(readlink -f "$0")")"
+DOCKER_STANDALONE_DIR="docker"
+DOCKER_SWARM_DIR="docker-swarm" # TODO: Add docker swarm support
+
# Regular Colors
Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red
@@ -225,7 +230,7 @@ start_docker() {
echo -e "🐳 Starting Docker ...\n"
if [[ $os == "Mac" ]]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
- else
+ else
if ! $sudo_cmd systemctl is-active docker.service > /dev/null; then
echo "Starting docker service"
$sudo_cmd systemctl start docker.service
@@ -257,12 +262,15 @@ wait_for_containers_start() {
}
bye() { # Prints a friendly good bye message and exits the script.
+ # Switch back to the original directory
+ popd > /dev/null 2>&1
if [[ "$?" -ne 0 ]]; then
set +o errexit
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
- echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
+ echo -e "cd ${DOCKER_STANDALONE_DIR}"
+ echo -e "$sudo_cmd $docker_compose_cmd ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
@@ -471,9 +479,12 @@ fi
start_docker
+# Switch to the Docker Standalone directory
+pushd "${BASE_DIR}/${DOCKER_STANDALONE_DIR}" > /dev/null 2>&1
+
# check for open ports, if signoz is not installed
if is_command_present docker-compose; then
- if $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
+ if $sudo_cmd $docker_compose_cmd ps | grep "signoz-query-service" | grep -q "healthy" > /dev/null 2>&1; then
echo "SigNoz already installed, skipping the occupied ports check"
else
check_ports_occupied
@@ -482,14 +493,14 @@ fi
echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz.\n"
-$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml pull
+$sudo_cmd $docker_compose_cmd pull
echo ""
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The $docker_compose_cmd command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
-$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
+$sudo_cmd $docker_compose_cmd up --detach --remove-orphans || true
wait_for_containers_start 60
echo ""
@@ -499,7 +510,14 @@ if [[ $status_code -ne 200 ]]; then
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
- echo -e "$sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
+ echo "cd ${DOCKER_STANDALONE_DIR}"
+ echo "$sudo_cmd $docker_compose_cmd ps -a"
+ echo ""
+
+ echo "Try bringing down the containers and retrying the installation"
+ echo "cd ${DOCKER_STANDALONE_DIR}"
+ echo "$sudo_cmd $docker_compose_cmd down -v"
+ echo ""
echo "Please read our troubleshooting guide https://signoz.io/docs/install/troubleshooting/"
echo "or reach us on SigNoz for support https://signoz.io/slack"
@@ -517,10 +535,13 @@ else
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
- echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
+ echo "ℹ️ By default, retention period is set to 15 days for logs and traces, and 30 days for metrics."
echo -e "To change this, navigate to the General tab on the Settings page of SigNoz UI. For more details, refer to https://signoz.io/docs/userguide/retention-period \n"
- echo "ℹ️ To bring down SigNoz and clean volumes : $sudo_cmd $docker_compose_cmd -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
+ echo "ℹ️ To bring down SigNoz and clean volumes:"
+ echo ""
+ echo "cd ${DOCKER_STANDALONE_DIR}"
+ echo "$sudo_cmd $docker_compose_cmd down -v"
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
@@ -535,7 +556,7 @@ else
do
read -rp 'Email: ' email
done
-
+
send_event "identify_successful_installation"
fi
diff --git a/frontend/README.md b/frontend/README.md
index 99a36710cb..ff6edd4488 100644
--- a/frontend/README.md
+++ b/frontend/README.md
@@ -6,7 +6,7 @@
**Building image**
-``docker-compose up`
+``docker compose up`
/ This will also run
or
@@ -19,7 +19,7 @@ docker tag signoz/frontend:latest 7296823551/signoz:latest
```
```
-docker-compose up
+docker compose up
```
## Without Docker
diff --git a/frontend/docker-compose.yaml b/frontend/docker-compose.yaml
new file mode 100644
index 0000000000..3b871ca848
--- /dev/null
+++ b/frontend/docker-compose.yaml
@@ -0,0 +1,7 @@
+version: "3.9"
+services:
+ web:
+ build: .
+ image: signoz/frontend:latest
+ ports:
+ - "3301:3301"
diff --git a/frontend/docker-compose.yml b/frontend/docker-compose.yml
deleted file mode 100644
index 8bc085de40..0000000000
--- a/frontend/docker-compose.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-version: "3.9"
-services:
- web:
- build: .
- image: signoz/frontend:latest
- ports:
- - "3301:3301"
diff --git a/pkg/query-service/README.md b/pkg/query-service/README.md
index 0087230d8d..72d9fa2f62 100644
--- a/pkg/query-service/README.md
+++ b/pkg/query-service/README.md
@@ -7,10 +7,10 @@ Query service is the interface between frontend and databases. It is written in
- clickhouse response in the format accepted by Frontend
# Complete the clickhouse setup locally.
-https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#to-run-clickhouse-setup-recommended-for-local-development
+https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#42-to-run-clickhouse-setup-recommended-for-local-development
-- Comment out the query-service and the frontend section in `signoz/deploy/docker/clickhouse-setup/docker-compose.yaml`
-- Change the alertmanager section in `signoz/deploy/docker/clickhouse-setup/docker-compose.yaml` as follows:
+- Comment out the query-service and the frontend section in `signoz/deploy/docker/docker-compose.yaml`
+- Change the alertmanager section in `signoz/deploy/docker/docker-compose.yaml` as follows:
```console
alertmanager:
image: signoz/alertmanager:0.23.7
@@ -30,9 +30,8 @@ alertmanager:
```
- Run the following:
```console
-cd signoz/
-If you are using x86_64 processors (All Intel/AMD processors) run sudo make run-x86
-If you are on arm64 processors (Apple M1 Macs) run sudo make run-arm
+cd deploy/docker
+docker compose up -d
```
#### Backend Configuration
diff --git a/sample-apps/hotrod/README.md b/sample-apps/hotrod/README.md
deleted file mode 100644
index 3498a06bd4..0000000000
--- a/sample-apps/hotrod/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# HotROD Sample Application (Kubernetes)
-
-Follow the steps in this section to install a sample application named HotR.O.D, and generate tracing data.
-
-```console
-kubectl create ns sample-application
-
-kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml
-```
-
-In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below:
-
-```console
-export HELM_RELEASE=my-release-2
-export SIGNOZ_NAMESPACE=platform-2
-export HOTROD_NAMESPACE=sample-application-2
-
-curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash
-```
-
-To delete sample application:
-
-```console
-export HOTROD_NAMESPACE=sample-application-2
-
-curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash
-```
-
-For testing with local scripts, you can use the following commands:
-
-```console
-# To install hotrod
-cat hotrod-install.sh | bash
-
-# To delete hotrod
-cat hotrod-delete.sh | bash
-```
diff --git a/sample-apps/hotrod/hotrod-delete.sh b/sample-apps/hotrod/hotrod-delete.sh
deleted file mode 100755
index a7d88ebc07..0000000000
--- a/sample-apps/hotrod/hotrod-delete.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-cd "$(dirname "${BASH_SOURCE[0]}")";
-
-HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
-
-if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then
- echo "Default k8s namespace and SigNoz namespace must not be deleted"
- echo "Deleting components only"
- kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml)
-else
- echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}"
- kubectl delete namespace "${HOTROD_NAMESPACE}"
-fi
-
-if [ $? -ne 0 ]; then
- echo "❌ Failed to delete HotROD sample application"
-else
- echo "✅ Successfully deleted HotROD sample application"
-fi
diff --git a/sample-apps/hotrod/hotrod-install.sh b/sample-apps/hotrod/hotrod-install.sh
deleted file mode 100755
index b7ba4f6caa..0000000000
--- a/sample-apps/hotrod/hotrod-install.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-cd "$(dirname "${BASH_SOURCE[0]}")";
-
-# Namespace to install sample app
-HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
-SIGNOZ_NAMESPACE="${SIGNOZ_NAMESPACE:-platform}"
-
-# HotROD's docker image
-if [[ -z $HOTROD_IMAGE ]]; then
- HOTROD_REPO="${HOTROD_REPO:-jaegertracing/example-hotrod}"
- HOTROD_TAG="${HOTROD_TAG:-1.30}"
- HOTROD_IMAGE="${HOTROD_REPO}:${HOTROD_TAG}"
-fi
-
-# Locust's docker image
-if [[ -z $LOCUST_IMAGE ]]; then
- LOCUST_REPO="${LOCUST_REPO:-signoz/locust}"
- LOCUST_TAG="${LOCUST_TAG:-1.2.3}"
- LOCUST_IMAGE="${LOCUST_REPO}:${LOCUST_TAG}"
-fi
-
-# Helm release name
-HELM_RELEASE="${HELM_RELEASE:-my-release}"
-
-# Otel Collector service address
-if [[ -z $JAEGER_ENDPOINT ]]; then
- if [[ "$HELM_RELEASE" == *"signoz"* ]]; then
- JAEGER_ENDPOINT="http://${HELM_RELEASE}-otel-collector.${SIGNOZ_NAMESPACE}.svc.cluster.local:14268/api/traces"
- else
- JAEGER_ENDPOINT="http://${HELM_RELEASE}-signoz-otel-collector.${SIGNOZ_NAMESPACE}.svc.cluster.local:14268/api/traces"
- fi
-fi
-
-# Create namespace for sample application if does not exist
-kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/dev/null | kubectl apply -f -
-
-# Setup sample apps into specified namespace
-kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \
- (cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \
- HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \
- HOTROD_IMAGE="${HOTROD_IMAGE}" \
- LOCUST_IMAGE="${LOCUST_IMAGE}" \
- JAEGER_ENDPOINT="${JAEGER_ENDPOINT}" \
- envsubst \
- )
-
-if [ $? -ne 0 ]; then
- echo "❌ Failed to deploy HotROD sample application"
-else
- echo "✅ Successfully deployed HotROD sample application"
-fi
diff --git a/sample-apps/hotrod/hotrod-template.yaml b/sample-apps/hotrod/hotrod-template.yaml
deleted file mode 100644
index 472cbe4d52..0000000000
--- a/sample-apps/hotrod/hotrod-template.yaml
+++ /dev/null
@@ -1,202 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: locust-cm
-data:
- ATTACKED_HOST: http://hotrod:8080
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: scripts-cm
-data:
- locustfile.py: |
- from locust import HttpUser, task, between
- class UserTasks(HttpUser):
- wait_time = between(5, 15)
-
- @task
- def rachel(self):
- self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
- @task
- def trom(self):
- self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
- @task
- def japanese(self):
- self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
- @task
- def coffee(self):
- self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- service: hotrod
- name: hotrod
-spec:
- replicas: 1
- selector:
- matchLabels:
- service: hotrod
- strategy: {}
- template:
- metadata:
- labels:
- service: hotrod
- spec:
- containers:
- - args:
- - all
- env:
- - name: JAEGER_ENDPOINT
- value: ${JAEGER_ENDPOINT}
- image: ${HOTROD_IMAGE}
- imagePullPolicy: IfNotPresent
- name: hotrod
- ports:
- - containerPort: 8080
- restartPolicy: Always
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- service: hotrod
- name: hotrod
-spec:
- ports:
- - name: "8080"
- port: 8080
- targetPort: 8080
- selector:
- service: hotrod
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- annotations:
- deployment.kubernetes.io/revision: "1"
- labels:
- role: locust-master
- name: locust-master
-spec:
- replicas: 1
- selector:
- matchLabels:
- role: locust-master
- strategy:
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- role: locust-master
- spec:
- containers:
- - image: ${LOCUST_IMAGE}
- imagePullPolicy: IfNotPresent
- name: locust-master
- env:
- - name: ATTACKED_HOST
- valueFrom:
- configMapKeyRef:
- name: locust-cm
- key: ATTACKED_HOST
- - name: LOCUST_MODE
- value: MASTER
- - name: LOCUST_OPTS
- value: --print-stats
- volumeMounts:
- - mountPath: /locust
- name: locust-scripts
- ports:
- - containerPort: 5557
- name: comm
- - containerPort: 5558
- name: comm-plus-1
- - containerPort: 8089
- name: web-ui
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: locust-scripts
- configMap:
- name: scripts-cm
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- role: locust-master
- name: locust-master
-spec:
- ports:
- - port: 5557
- name: communication
- - port: 5558
- name: communication-plus-1
- - port: 8089
- targetPort: 8089
- name: web-ui
- selector:
- role: locust-master
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- annotations:
- deployment.kubernetes.io/revision: "1"
- labels:
- role: locust-worker
- name: locust-worker
-spec:
- replicas: 1
- selector:
- matchLabels:
- role: locust-worker
- strategy:
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- role: locust-worker
- spec:
- containers:
- - image: ${LOCUST_IMAGE}
- imagePullPolicy: IfNotPresent
- name: locust-worker
- env:
- - name: ATTACKED_HOST
- valueFrom:
- configMapKeyRef:
- name: locust-cm
- key: ATTACKED_HOST
- - name: LOCUST_MODE
- value: WORKER
- - name: LOCUST_MASTER_HOST
- value: locust-master
- volumeMounts:
- - mountPath: /locust
- name: locust-scripts
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: locust-scripts
- configMap:
- name: scripts-cm
diff --git a/sample-apps/hotrod/hotrod.yaml b/sample-apps/hotrod/hotrod.yaml
deleted file mode 100644
index 2dbbaec654..0000000000
--- a/sample-apps/hotrod/hotrod.yaml
+++ /dev/null
@@ -1,202 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: locust-cm
-data:
- ATTACKED_HOST: http://hotrod:8080
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: scripts-cm
-data:
- locustfile.py: |
- from locust import HttpUser, task, between
- class UserTasks(HttpUser):
- wait_time = between(5, 15)
-
- @task
- def rachel(self):
- self.client.get("/dispatch?customer=123&nonse=0.6308392664170006")
- @task
- def trom(self):
- self.client.get("/dispatch?customer=392&nonse=0.015296363321630757")
- @task
- def japanese(self):
- self.client.get("/dispatch?customer=731&nonse=0.8022286220408668")
- @task
- def coffee(self):
- self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593")
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- service: hotrod
- name: hotrod
-spec:
- replicas: 1
- selector:
- matchLabels:
- service: hotrod
- strategy: {}
- template:
- metadata:
- labels:
- service: hotrod
- spec:
- containers:
- - args:
- - all
- env:
- - name: JAEGER_ENDPOINT
- value: http://my-release-signoz-otel-collector.platform.svc.cluster.local:14268/api/traces
- image: jaegertracing/example-hotrod:1.30
- imagePullPolicy: IfNotPresent
- name: hotrod
- ports:
- - containerPort: 8080
- restartPolicy: Always
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- service: hotrod
- name: hotrod
-spec:
- ports:
- - name: "8080"
- port: 8080
- targetPort: 8080
- selector:
- service: hotrod
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- annotations:
- deployment.kubernetes.io/revision: "1"
- labels:
- role: locust-master
- name: locust-master
-spec:
- replicas: 1
- selector:
- matchLabels:
- role: locust-master
- strategy:
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- role: locust-master
- spec:
- containers:
- - image: signoz/locust:1.2.3
- imagePullPolicy: IfNotPresent
- name: locust-master
- env:
- - name: ATTACKED_HOST
- valueFrom:
- configMapKeyRef:
- name: locust-cm
- key: ATTACKED_HOST
- - name: LOCUST_MODE
- value: MASTER
- - name: LOCUST_OPTS
- value: --print-stats
- volumeMounts:
- - mountPath: /locust
- name: locust-scripts
- ports:
- - containerPort: 5557
- name: comm
- - containerPort: 5558
- name: comm-plus-1
- - containerPort: 8089
- name: web-ui
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: locust-scripts
- configMap:
- name: scripts-cm
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- role: locust-master
- name: locust-master
-spec:
- ports:
- - port: 5557
- name: communication
- - port: 5558
- name: communication-plus-1
- - port: 8089
- targetPort: 8089
- name: web-ui
- selector:
- role: locust-master
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- annotations:
- deployment.kubernetes.io/revision: "1"
- labels:
- role: locust-worker
- name: locust-worker
-spec:
- replicas: 1
- selector:
- matchLabels:
- role: locust-worker
- strategy:
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- role: locust-worker
- spec:
- containers:
- - image: signoz/locust:1.2.3
- imagePullPolicy: IfNotPresent
- name: locust-worker
- env:
- - name: ATTACKED_HOST
- valueFrom:
- configMapKeyRef:
- name: locust-cm
- key: ATTACKED_HOST
- - name: LOCUST_MODE
- value: WORKER
- - name: LOCUST_MASTER_HOST
- value: locust-master
- volumeMounts:
- - mountPath: /locust
- name: locust-scripts
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: locust-scripts
- configMap:
- name: scripts-cm