Merge pull request #696 from SigNoz/release/v0.6.0

Release/v0.6.0
This commit is contained in:
Ankit Nayan 2022-02-09 23:32:52 +05:30 committed by GitHub
commit b37bc0620d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
236 changed files with 7055 additions and 7138 deletions

View File

@ -29,6 +29,17 @@ jobs:
- name: Inject the images to the cluster
run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz
- name: Set up HotROD sample-app
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
- name: Deploy the app
run: |
# add signoz helm repository
@ -47,22 +58,13 @@ jobs:
--set frontend.image.tag=$DOCKER_TAG
# get pods, services and the container images
kubectl describe deploy/frontend -n platform | grep Image
kubectl describe statefulset/query-service -n platform | grep Image
kubectl describe deploy/my-release-frontend -n platform | grep Image
kubectl describe statefulset/my-release-query-service -n platform | grep Image
kubectl get pods -n platform
kubectl get svc -n platform
- name: Kick off a sample-app workload
run: |
# create sample-application namespace
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s
# start the locust swarm
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --rm --command -- curl -X POST -F \

View File

@ -2,13 +2,11 @@ name: push
on:
push:
paths:
- 'pkg/query-service/**'
- 'frontend/**'
branches:
- main
- develop
tags:
- ^v[0-9]+.[0-9]+.[0-9]+$
- v*
jobs:
@ -33,10 +31,18 @@ jobs:
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
env:
DOCKER_TAG: ${{ steps.short-sha.outputs.sha }}
DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }}
run: make build-push-query-service
image-build-and-push-frontend:
@ -69,8 +75,16 @@ jobs:
- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v5.1
- name: Set docker tag environment
run: |
if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
tag="${{ steps.branch-name.outputs.tag }}"
tag="${tag:1}"
echo "DOCKER_TAG=$tag" >> $GITHUB_ENV
elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
echo "DOCKER_TAG=latest" >> $GITHUB_ENV
else
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
env:
DOCKER_TAG: ${{ steps.short-sha.outputs.sha }}
DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }}
run: make build-push-frontend

View File

@ -21,8 +21,8 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59`
- run `cd deploy` to move to deploy directory
- Install signoz locally without the frontend
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml up -d`
- If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d`
- If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d`
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
- `yarn install`
- `yarn dev`
@ -31,14 +31,14 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git
### Contribute to Frontend without installing SigNoz backend
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `<test environment URL>`
- `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend`
- Create a file `.env` with `FRONTEND_API_ENDPOINT=<test environment URL>`
- `yarn install`
- `yarn dev`
**_Frontend should now be accessible at `http://localhost:3000/application`_**
**_Frontend should now be accessible at `http://localhost:3301/application`_**
# Contribute to Query-Service
@ -69,11 +69,45 @@ Click the button below. A workspace with all required environments will be creat
> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com/<your-github-username>/signoz`
# Contribute to SigNoz Helm Chart
Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts).
### To run helm chart for local development
- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts`
- it is recommended to use lightweight kubernetes (k8s) cluster for local development:
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- [k3d](https://k3d.io/#installation)
- [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**To load data with HotROD sample app:**
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
**To stop the load generation:**
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
---
## General Instructions
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack).
- If you find any bugs, please create an issue
- If you find anything missing in documentation, you can create an issue with label **documentation**

View File

@ -1,7 +1,14 @@
#
# Reference Guide - https://www.gnu.org/software/make/manual/make.html
#
# Build variables
BUILD_VERSION ?= $(shell git describe --always --tags)
BUILD_HASH ?= $(shell git rev-parse --short HEAD)
BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
# Internal variables or constants.
#
FRONTEND_DIRECTORY ?= frontend
FLATTENER_DIRECTORY ?= pkg/processors/flattener
QUERY_SERVICE_DIRECTORY ?= pkg/query-service
@ -13,6 +20,15 @@ FRONTEND_DOCKER_IMAGE ?= frontend
QUERY_SERVICE_DOCKER_IMAGE ?= query-service
FLATTERNER_DOCKER_IMAGE ?= flattener-processor
# Build-time Go variables
PACKAGE?=go.signoz.io/query-service
buildVersion=${PACKAGE}/version.buildVersion
buildHash=${PACKAGE}/version.buildHash
buildTime=${PACKAGE}/version.buildTime
gitBranch=${PACKAGE}/version.gitBranch
LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}"
all: build-push-frontend build-push-query-service build-push-flattener
# Steps to build and push docker image of frontend
.PHONY: build-frontend-amd64 build-push-frontend
@ -22,22 +38,15 @@ build-frontend-amd64:
@echo "--> Building frontend docker image for amd64"
@echo "------------------"
@cd $(FRONTEND_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" .
# Step to build and push docker image of frontend(used in push pipeline)
build-push-frontend:
@echo "------------------"
@echo "--> Building and pushing frontend docker image"
@echo "------------------"
ifndef DOCKER_SECOND_TAG
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) .
else
@cd $(FRONTEND_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 . \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \
--tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_SECOND_TAG)
endif
# Steps to build and push docker image of query service
.PHONY: build-query-service-amd64 build-push-query-service
@ -47,22 +56,15 @@ build-query-service-amd64:
@echo "--> Building query-service docker image for amd64"
@echo "------------------"
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64"
docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS)
# Step to build and push docker image of query in amd64 and arm64 (used in push pipeline)
build-push-query-service:
@echo "------------------"
@echo "--> Building and pushing query-service docker image"
@echo "------------------"
ifndef DOCKER_SECOND_TAG
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
else
@cd $(QUERY_SERVICE_DIRECTORY) && \
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 . \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \
--tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG)
endif
docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) .
# Steps to build and push docker image of flattener
.PHONY: build-flattener-amd64 build-push-flattener
@ -91,7 +93,7 @@ dev-setup:
@echo "------------------"
run-x86:
@sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/x86_64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
run-arm:
@sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/arm64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d
@sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d

View File

@ -17,7 +17,7 @@
<a href="https://signoz.io/docs"><b>Dokumentation</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe auf Chinesisch</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe auf Portugiesisch</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@ -39,7 +39,7 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei
## Werde Teil unserer Slack Community
Sag Hi zu uns auf [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋
<br /><br />
@ -130,7 +130,7 @@ Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger:
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).
<br /><br />
@ -146,7 +146,7 @@ Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unver
## Community
Werde Teil der [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen.
Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen.

View File

@ -18,7 +18,7 @@
<a href="https://github.com/SigNoz/signoz/blob/main/README.zh-cn.md"><b>ReadMe in Chinese</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.de-de.md"><b>ReadMe in German</b></a> &bull;
<a href="https://github.com/SigNoz/signoz/blob/main/README.pt-br.md"><b>ReadMe in Portuguese</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Slack Community</b></a> &bull;
<a href="https://signoz.io/slack"><b>Slack Community</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@ -132,7 +132,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger:
We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz.
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
<br /><br />
@ -148,7 +148,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f
## Community
Join the [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
Join the [slack community](https://signoz.io/slack) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors.
If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions)

View File

@ -15,7 +15,7 @@
<h3 align="center">
<a href="https://signoz.io/docs"><b>Documentação</b></a> &bull;
<a href="https://bit.ly/signoz-slack"><b>Comunidade no Slack</b></a> &bull;
<a href="https://signoz.io/slack"><b>Comunidade no Slack</b></a> &bull;
<a href="https://twitter.com/SigNozHq"><b>Twitter</b></a>
</h3>
@ -38,7 +38,7 @@ SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problem
## Junte-se à nossa comunidade no Slack
Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋
Venha dizer oi para nós no [Slack](https://signoz.io/slack) 👋
<br /><br />
@ -129,7 +129,7 @@ Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger:
Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz.
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)
Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://signoz.io/slack)
<br /><br />
@ -145,7 +145,7 @@ Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver
## Comunidade
Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
Junte-se a [comunidade no Slack](https://signoz.io/slack) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores.
Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions)

View File

@ -29,7 +29,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo
## 加入我们的Slack社区
来[Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 跟我们打声招呼👋
来[Slack](https://signoz.io/slack) 跟我们打声招呼👋
<br /><br />
@ -120,7 +120,7 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。
还不清楚怎么开始? 只需在[slack社区](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)的`#contributing`频道里ping我们。
还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。
<br /><br />
@ -136,7 +136,7 @@ Jaeger只做分布式跟踪SigNoz则是做了矩阵和跟踪两块我们
## 社区
加入[slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA),了解更多关于分布式跟踪、可观察性(observability)以及SigNoz。同时与其他用户和贡献者一起交流。
加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability)以及SigNoz。同时与其他用户和贡献者一起交流。
如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。

88
deploy/README.md Normal file
View File

@ -0,0 +1,88 @@
# Deploy
Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz)
and currently are in `signoz/deploy` folder.
## Docker
If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/)
to set up docker before proceeding with the next steps.
### Using Install Script
Now run the following command to install:
```sh
./install.sh
```
### Using Docker Compose
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/)
to set up docker compose before proceeding with the next steps.
For x86 chip (amd):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d
```
For Mac with Apple chip (arm):
```sh
docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d
```
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see
the data generated from hotrod in SigNoz UI.
## Kubernetes
### Using Helm
#### Bring up SigNoz cluster
```sh
helm repo add signoz https://charts.signoz.io
kubectl create ns platform
helm -n platform install my-release signoz/signoz
```
To access the UI, you can `port-forward` the frontend service:
```sh
kubectl -n platform port-forward svc/my-release-frontend 3301:3301
```
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load
from the HotROD application, you should see the data generated from hotrod in SigNoz UI.
#### Test HotROD application with SigNoz
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \
'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm
```
To stop load:
```sh
kubectl -n sample-application run strzal --image=djbingham/curl \
--restart='OnFailure' -i --tty --rm --command -- curl \
http://locust-master:8089/stop
```
## Uninstall/Troubleshoot?
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more.

View File

@ -50,7 +50,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@ -0,0 +1,98 @@
version: "2.4"
services:
clickhouse:
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
image: signoz/alertmanager:0.5.0
volumes:
- ./alertmanager.yml:/prometheus/alertmanager.yml
- ./data/alertmanager:/data
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
query-service:
image: signoz/query-service:0.6.0
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.6.0
container_name: frontend
depends_on:
- query-service
ports:
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "4317:4317" # OTLP GRPC receiver
mem_limit: 2000m
restart: always
depends_on:
clickhouse:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@ -2,24 +2,16 @@ version: "2.4"
services:
clickhouse:
image: ${clickhouse_image}
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
image: yandex/clickhouse-server:21.12.3.32
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
alertmanager:
image: signoz/alertmanager:0.5.0
@ -29,63 +21,44 @@ services:
command:
- '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data'
ports:
- 9093:9093
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:0.5.4
image: signoz/query-service:0.6.0
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.5.4
container_name: frontend
frontend:
image: signoz/frontend:0.6.0
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcontribcol:0.4.2
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
- "8889:8889" # prometheus exporter
mem_limit: 2000m
restart: always
depends_on:
@ -93,29 +66,29 @@ services:
condition: service_healthy
otel-collector-metrics:
image: signoz/otelcontribcol:0.4.2
image: signoz/otelcontribcol:0.5.0
command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
ports:
- "9000:8080"
logging:
options:
max-size: 50m
max-file: "3"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone

View File

@ -1,31 +0,0 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4,
INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4,
INDEX idx_name name TYPE bloom_filter GRANULARITY 4,
INDEX idx_kind kind TYPE minmax GRANULARITY 4,
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@ -1 +0,0 @@
clickhouse_image=altinity/clickhouse-server:21.8.12.1.testingarm

View File

@ -1 +0,0 @@
clickhouse_image=yandex/clickhouse-server:21.10

View File

@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@ -167,7 +167,8 @@ services:
container_name: query-service
depends_on:
- router
router:
condition: service_healthy
ports:
- "8080:8080"
volumes:
@ -180,10 +181,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
@ -193,7 +190,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -162,7 +162,8 @@ services:
container_name: query-service
depends_on:
- router
router:
condition: service_healthy
ports:
- "8080:8080"
@ -176,10 +177,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend:
image: signoz/frontend:0.4.1
container_name: frontend
@ -189,7 +186,7 @@ services:
links:
- "query-service"
ports:
- "3000:3000"
- "3301:3301"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -102,7 +102,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|3000|8080"
local ports_pattern="80|3301|8080"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@ -116,15 +116,7 @@ check_ports_occupied() {
fi
if [[ -n $port_check_output ]]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "port_not_available"
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
@ -207,15 +199,7 @@ install_docker_compose() {
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "docker_compose_not_found"
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
@ -241,7 +225,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then
break
else
@ -272,15 +256,15 @@ bye() { # Prints a friendly good bye message and exits the script.
echo ""
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a"
else
echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
fi
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
echo -e "\n📨 Please share your email to receive support with the installation"
@ -291,16 +275,7 @@ bye() { # Prints a friendly good bye message and exits the script.
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "installation_support"
echo ""
@ -317,10 +292,19 @@ echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
email=""
echo -e "Detecting your OS ..."
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
# Obtain unique installation id
sysinfo="$(uname -a)"
if [ $? -ne 0 ]; then
uuid="$(uuidgen)"
uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}"
else
SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1)
fi
# echo ""
@ -350,29 +334,78 @@ setup_type='clickhouse'
# Run bye if failure happens
trap bye EXIT
URL="https://api.segment.io/v1/track"
HEADER_1="Content-Type: application/json"
HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
send_event() {
error=""
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
case "$1" in
'install_started')
event="Installation Started"
;;
'os_not_supported')
event="Installation Error"
error="OS Not Supported"
;;
'docker_not_installed')
event="Installation Error"
error="Docker not installed"
;;
'docker_compose_not_found')
event="Installation Error"
event="Docker Compose not found"
;;
'port_not_available')
event="Installation Error"
error="port not available"
;;
'installation_error_checks')
event="Installation Error - Checks"
error="Containers not started"
if [ $setup_type == 'clickhouse' ]; then
others='"data": "some_checks",'
else
supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",'
fi
;;
'installation_support')
event="Installation Support"
others='"email": "'"$email"'",'
;;
'installation_success')
event="Installation Success"
;;
'identify_successful_installation')
event="Identify Successful Installation"
others='"email": "'"$email"'",'
;;
*)
print_error "unknown event type: $1"
exit 1
;;
esac
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
if [ "$error" != "" ]; then
error='"error": "'"$error"'", '
fi
DATA='{ "anonymousId": "'"$SIGNOZ_INSTALLATION_ID"'", "event": "'"$event"'", "properties": { "os": "'"$os"'", '"$error $others"' "setup_type": "'"$setup_type"'" } }'
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1
fi
}
send_event "install_started"
if [[ $desired_os -eq 0 ]]; then
send_event "os_not_supported"
fi
# check_ports_occupied
@ -387,15 +420,8 @@ if ! is_command_present docker; then
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "docker_not_installed"
exit 1
fi
fi
@ -406,7 +432,6 @@ if ! is_command_present docker-compose; then
fi
start_docker
@ -417,9 +442,9 @@ echo ""
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull
else
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
@ -433,9 +458,9 @@ echo
# script doesn't exit because this command looks like it failed to do it's thing.
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true
else
sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
fi
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
@ -449,67 +474,42 @@ if [[ $status_code -ne 200 ]]; then
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "or reach us on SigNoz for support https://signoz.io/slack"
echo "++++++++++++++++++++++++++++++++++++++++"
if [ $setup_type == 'clickhouse' ]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
fi
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "installation_error_checks"
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
send_event "installation_success"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo ""
echo "🟢 Your installation is complete!"
echo ""
echo -e "🟢 Your frontend is running on http://localhost:3000"
echo -e "🟢 Your frontend is running on http://localhost:3301"
echo ""
if [ $setup_type == 'clickhouse' ]; then
if is_arm64; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v"
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml down -v"
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v"
fi
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo -e "Join us on Slack https://signoz.io/slack"
echo ""
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
@ -519,16 +519,7 @@ else
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
send_event "identify_successful_installation"
fi
echo -e "\n🙏 Thank you!\n"

View File

@ -1,15 +0,0 @@
dependencies:
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 6.0.0
- name: query-service
repository: file://./signoz-charts/query-service
version: 0.5.4
- name: frontend
repository: file://./signoz-charts/frontend
version: 0.5.4
- name: alertmanager
repository: file://./signoz-charts/alertmanager
version: 0.5.0
digest: sha256:b75aaa30cee8c67d7194fec3543e02389d4df0806982cce55d848b564ae9aad7
generated: "2021-12-24T13:23:16.211336+05:30"

View File

@ -1,37 +0,0 @@
apiVersion: v2
name: signoz-platform
description: SigNoz Observability Platform Helm Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.5.3
dependencies:
- name: zookeeper
repository: "https://charts.bitnami.com/bitnami"
version: 6.0.0
- name: query-service
repository: "file://./signoz-charts/query-service"
version: 0.5.4
- name: frontend
repository: "file://./signoz-charts/frontend"
version: 0.5.4
- name: alertmanager
repository: "file://./signoz-charts/alertmanager"
version: 0.5.0

View File

@ -1,25 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
unittests/

View File

@ -1,7 +0,0 @@
apiVersion: v2
name: alertmanager
description: The Alertmanager handles alerts sent by client applications such as the Prometheus server.
type: application
version: 0.5.0
appVersion: 0.5.0

View File

@ -1,2 +0,0 @@
configmapReload:
enabled: true

View File

@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alertmanager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ .Values.service.port }}:80
{{- end }}

View File

@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "alertmanager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "alertmanager.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "alertmanager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "alertmanager.labels" -}}
helm.sh/chart: {{ include "alertmanager.chart" . }}
{{ include "alertmanager.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "alertmanager.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alertmanager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "alertmanager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "alertmanager.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -1,15 +0,0 @@
{{- if .Values.config }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
data:
alertmanager.yml: |
{{- toYaml .Values.config | default "{}" | nindent 4 }}
{{- range $key, $value := .Values.templates }}
{{ $key }}: |-
{{- $value | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "alertmanager.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,13 +0,0 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "alertmanager.serviceAccountName" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,48 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: alertmanager
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "alertmanager.fullname" . }}-headless
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
spec:
clusterIP: None
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
- port: 9094
targetPort: 9094
protocol: TCP
name: cluster-tcp
- port: 9094
targetPort: 9094
protocol: UDP
name: cluster-udp
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}

View File

@ -1,152 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- if .Values.statefulSet.annotations }}
annotations:
{{ toYaml .Values.statefulSet.annotations | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
serviceName: {{ include "alertmanager.fullname" . }}-headless
template:
metadata:
labels:
{{- include "alertmanager.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 8 }}
{{- end }}
annotations:
{{- if not .Values.configmapReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "alertmanager.serviceAccountName" . }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- if and (.Values.configmapReload.enabled) (.Values.config) }}
- name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }}
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
args:
- --volume-dir=/etc/alertmanager
- --webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
resources:
{{- toYaml .Values.configmapReload.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /etc/alertmanager
{{- end }}
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
{{- if .Values.command }}
command:
{{- toYaml .Values.command | nindent 12 }}
{{- end }}
args:
- --storage.path=/alertmanager
- --config.file=/etc/alertmanager/alertmanager.yml
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
- --cluster.advertise-address=$(POD_IP):9094
- --cluster.listen-address=0.0.0.0:9094
{{- end }}
{{- if gt .Values.replicaCount 1.0}}
{{- $fullName := include "alertmanager.fullname" . }}
{{- range $i := until (int .Values.replicaCount) }}
- --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:9094
{{- end }}
{{- end }}
{{- if .Values.additionalPeers }}
{{- range $item := .Values.additionalPeers }}
- --cluster.peer={{ $item }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
ports:
- name: http
containerPort: 9093
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
{{- if .Values.config }}
- name: config
mountPath: /etc/alertmanager
{{- end }}
- name: storage
mountPath: /alertmanager
{{- if .Values.config }}
volumes:
- name: config
configMap:
name: {{ include "alertmanager.fullname" . }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
{{- toYaml .Values.persistence.accessModes | nindent 10 }}
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass }}
{{- end }}
{{- end }}
{{- else }}
- name: storage
emptyDir: {}
{{- end -}}

View File

@ -1,48 +0,0 @@
should match snapshot of default values:
1: |
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 1.0.0
helm.sh/chart: alertmanager-1.0.0
name: RELEASE-NAME-alertmanager
spec:
ingressClassName: nginx-test
rules:
- host: alertmanager.domain.com
http:
paths:
- backend:
service:
name: RELEASE-NAME-alertmanager
port:
number: 9093
path: /
pathType: ImplementationSpecific
should match snapshot of default values with old kubernetes ingress:
1: |
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx-test
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 1.0.0
helm.sh/chart: alertmanager-1.0.0
name: RELEASE-NAME-alertmanager
spec:
rules:
- host: alertmanager.domain.com
http:
paths:
- backend:
serviceName: RELEASE-NAME-alertmanager
servicePort: 9093
path: /

View File

@ -1,81 +0,0 @@
suite: test ingress
templates:
- ingress.yaml
tests:
- it: should be empty if ingress is not enabled
asserts:
- hasDocuments:
count: 0
- it: should have apiVersion extensions/v1beta1 for k8s < 1.14
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 13
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: extensions/v1beta1
- it: should have apiVersion networking.k8s.io/v1beta1 for k8s < 1.19
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 18
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: networking.k8s.io/v1beta1
- it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 19
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: networking.k8s.io/v1
- it: should have an ingressClassName for k8s >= 1.19
set:
ingress.enabled: true
ingress.className: nginx-test
capabilities:
majorVersion: 1
minorVersion: 19
asserts:
- hasDocuments:
count: 1
- equal:
path: spec.ingressClassName
value: nginx-test
- it: should match snapshot of default values
set:
ingress.enabled: true
ingress.className: nginx-test
chart:
version: 1.0.0
appVersion: 1.0.0
asserts:
- matchSnapshot: { }
- it: should match snapshot of default values with old kubernetes ingress
set:
ingress.enabled: true
ingress.className: nginx-test
capabilities:
majorVersion: 1
minorVersion: 17
chart:
version: 1.0.0
appVersion: 1.0.0
asserts:
- matchSnapshot: { }

View File

@ -1,189 +0,0 @@
# Default values for alertmanager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/alertmanager
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "0.5.0"
extraArgs: {}
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext:
fsGroup: 65534
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
securityContext:
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
runAsUser: 65534
runAsNonRoot: true
runAsGroup: 65534
additionalPeers: []
service:
annotations: {}
type: ClusterIP
port: 9093
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: alertmanager.domain.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - alertmanager.domain.com
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}
statefulSet:
annotations: {}
podAnnotations: {}
podLabels: {}
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
command: []
persistence:
enabled: true
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 100Mi
config:
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
templates:
- '/etc/alertmanager/*.tmpl'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: '{{ template "slack.title" . }}'
text: '{{ template "slack.text" . }}'
route:
receiver: 'slack-notifications'
## Monitors ConfigMap changes and POSTs to a URL
## Ref: https://github.com/jimmidyson/configmap-reload
##
configmapReload:
## If false, the configmap-reload container will not be deployed
##
enabled: false
## configmap-reload container name
##
name: configmap-reload
## configmap-reload container image
##
image:
repository: jimmidyson/configmap-reload
tag: v0.5.0
pullPolicy: IfNotPresent
## configmap-reload resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
templates:
title.tmpl: |-
{{ define "slack.title" }}
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
{{ end }}
text.tmpl: |-
{{ define "slack.text" }}
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}
{{ end }}

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,21 +0,0 @@
apiVersion: v2
name: frontend
description: A Helm chart for SigNoz Frontend Service
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.5.4
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.5.4

View File

@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "frontend.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "frontend.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "frontend.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "frontend.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "frontend.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "frontend.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "frontend.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "frontend.labels" -}}
helm.sh/chart: {{ include "frontend.chart" . }}
{{ include "frontend.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "frontend.selectorLabels" -}}
app.kubernetes.io/name: {{ include "frontend.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "frontend.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "frontend.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -1,40 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.config.name }}
labels:
release: {{ .Release.Name }}
data:
default.conf: |-
server {
listen {{ .Values.service.port }};
server_name _;
gzip on;
gzip_static on;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_proxied any;
gzip_vary on;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager{
proxy_pass http://{{ .Values.config.alertmanagerUrl }}/api/v2;
}
location /api {
proxy_pass http://{{ .Values.config.queryServiceUrl }}/api;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@ -1,64 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "frontend.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "frontend.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
volumes:
- name: nginx-config
configMap:
name: {{ .Values.config.name }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
- name: REACT_APP_QUERY_SERVICE_URL
value: {{ .Values.configVars.REACT_APP_QUERY_SERVICE_URL }}
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "frontend.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "frontend.fullname" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "frontend.selectorLabels" . | nindent 4 }}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "frontend.serviceAccountName" . }}
labels:
{{- include "frontend.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "frontend.fullname" . }}-test-connection"
labels:
{{- include "frontend.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "frontend.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -1,77 +0,0 @@
# Default values for frontend.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/frontend
tag: 0.5.4
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars: {}
config:
name: signoz-nginx-config
queryServiceUrl: signoz-query-service:8080
alertmanagerUrl: alertmanager:9093
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 3000
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,21 +0,0 @@
apiVersion: v2
name: query-service
description: A Helm chart for running SigNoz Query Service in Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.5.4
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.5.4

View File

@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "query-service.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "query-service.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "query-service.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "query-service.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "query-service.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "query-service.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "query-service.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "query-service.labels" -}}
helm.sh/chart: {{ include "query-service.chart" . }}
{{ include "query-service.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "query-service.selectorLabels" -}}
app.kubernetes.io/name: {{ include "query-service.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "query-service.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "query-service.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "query-service.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
data:
prometheus.yml: |
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "query-service.fullname" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "query-service.selectorLabels" . | nindent 4 }}

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "query-service.serviceAccountName" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,87 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "query-service.fullname" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
spec:
serviceName: query-service
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "query-service.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "query-service.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: {{ .Chart.Name }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["-config=/root/config/prometheus.yml"]
ports:
- name: http
containerPort: 8080
protocol: TCP
env:
- name: DruidClientUrl
value: {{ .Values.configVars.DruidClientUrl }}
- name: DruidDatasource
value: {{ .Values.configVars.DruidDatasource }}
- name: STORAGE
value: {{ .Values.configVars.STORAGE }}
- name: ClickHouseUrl
value: {{ .Values.configVars.ClickHouseUrl}}
- name: GODEBUG
value: netdns=go
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
volumeMounts:
- name: prometheus
mountPath: /root/config
- name: signoz-db
mountPath: /var/lib/signoz/
- name: dashboards
mountPath: /root/config/dashboards
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: prometheus
configMap:
name: prometheus-config
- name: dashboards
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: signoz-db
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "query-service.fullname" . }}-test-connection"
labels:
{{- include "query-service.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "query-service.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -1,78 +0,0 @@
# Default values for query-service.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/query-service
tag: 0.5.4
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
STORAGE: clickhouse
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: initdb-config
data:
init-db.sql: |-
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@ -1,107 +0,0 @@
{{ if (eq (.Values.cloud | toString) "gcp" )}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gce-resizable
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
fstype: ext4
replication-type: none
reclaimPolicy: Retain
#volumeBindingMode: Immediate
allowVolumeExpansion: true
---
{{- else if (eq (.Values.cloud | toString) "aws") }}
#
# AWS resizable disk example
#
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gp2-resizable
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Retain
#volumeBindingMode: Immediate
allowVolumeExpansion: true
---
{{- end }}
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: signoz
spec:
defaults:
templates:
dataVolumeClaimTemplate: default-volume-claim
# logVolumeClaimTemplate: default-volume-claim
serviceTemplate: chi-service-template
configuration:
zookeeper:
nodes:
- host: signoz-zookeeper
port: 2181
session_timeout_ms: 6000
clusters:
- name: cluster
# Templates are specified for this cluster explicitly
templates:
dataVolumeClaimTemplate: default-volume-claim
# logVolumeClaimTemplate: default-volume-claim
podTemplate: pod-template-with-volume
layout:
shardsCount: 1
replicasCount: 1
templates:
hostTemplates:
- name: port-distribution
portDistribution:
- type: ClusterScopeIndex
spec:
tcpPort: 9000
httpPort: 8123
interserverHTTPPort: 9009
podTemplates:
- name: pod-template-with-volume
spec:
containers:
- name: clickhouse
image: yandex/clickhouse-server:21.7
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
- name: initdb
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: initdb
configMap:
name: initdb-config
serviceTemplates:
- name: chi-service-template
generateName: signoz-clickhouse
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
type: {{ .Values.clickhouseOperator.serviceType }}
volumeClaimTemplates:
- name: default-volume-claim
reclaimPolicy: Retain
spec:
{{- if (eq (.Values.cloud | toString) "gcp" )}}
storageClassName: gce-resizable
{{- else if (eq (.Values.cloud | toString) "aws") }}
storageClassName: gp2-resizable
{{- else if (eq (.Values.cloud | toString) "hcloud") }}
storageClassName: hcloud-volumes
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.clickhouseOperator.storage | quote }}

View File

@ -1,157 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=#
# ROLE_KIND=ClusterRole
# ROLE_NAME=clickhouse-operator-posthog
# ROLE_BINDING_KIND=ClusterRoleBinding
# ROLE_BINDING_NAME=clickhouse-operator-posthog
#
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: clickhouse-operator-posthog
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- services
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- persistentvolumes
- pods
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- apps
resources:
- replicasets
verbs:
- delete
- get
- patch
- update
- apiGroups:
- apps
resourceNames:
- clickhouse-operator
resources:
- deployments
verbs:
- get
- patch
- update
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations
verbs:
- delete
- get
- patch
- update
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations
- clickhouseinstallationtemplates
- clickhouseoperatorconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/finalizers
- clickhouseinstallationtemplates/finalizers
- clickhouseoperatorconfigurations/finalizers
verbs:
- update
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/status
- clickhouseinstallationtemplates/status
- clickhouseoperatorconfigurations/status
verbs:
- create
- delete
- get
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Setup ClusterRoleBinding between ClusterRole and ServiceAccount.
# ClusterRoleBinding is namespace-less and must have unique name
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: clickhouse-operator-posthog
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: clickhouse-operator-posthog
subjects:
- kind: ServiceAccount
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
{{- end }}

View File

@ -1,418 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAME=etc-clickhouse-operator-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
config.yaml: |
################################################
##
## Watch Namespaces Section
##
################################################
# List of namespaces where clickhouse-operator watches for events.
# Concurrently running operators should watch on different namespaces
#watchNamespaces: ["dev", "test"]
watchNamespaces: []
################################################
##
## Additional Configuration Files Section
##
################################################
# Path to folder where ClickHouse configuration files common for all instances within CHI are located.
chCommonConfigsPath: config.d
# Path to folder where ClickHouse configuration files unique for each instance (host) within CHI are located.
chHostConfigsPath: conf.d
# Path to folder where ClickHouse configuration files with users settings are located.
# Files are common for all instances within CHI
chUsersConfigsPath: users.d
# Path to folder where ClickHouseInstallation .yaml manifests are located.
# Manifests are applied in sorted alpha-numeric order
chiTemplatesPath: templates.d
################################################
##
## Cluster Create/Update/Delete Objects Section
##
################################################
# How many seconds to wait for created/updated StatefulSet to be Ready
statefulSetUpdateTimeout: 300
# How many seconds to wait between checks for created/updated StatefulSet status
statefulSetUpdatePollPeriod: 5
# What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
# Possible options:
# 1. abort - do nothing, just break the process and wait for admin
# 2. delete - delete newly created problematic StatefulSet
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
onStatefulSetCreateFailureAction: ignore
# What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
# Possible options:
# 1. abort - do nothing, just break the process and wait for admin
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
# Pod would be recreated by StatefulSet based on rollback-ed configuration
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
onStatefulSetUpdateFailureAction: rollback
################################################
##
## ClickHouse Settings Section
##
################################################
# Default values for ClickHouse user configuration
# 1. user/profile - string
# 2. user/quota - string
# 3. user/networks/ip - multiple strings
# 4. user/password - string
chConfigUserDefaultProfile: default
chConfigUserDefaultQuota: default
chConfigUserDefaultNetworksIP:
- "::1"
- "127.0.0.1"
chConfigUserDefaultPassword: "default"
# Default host_regexp to limit network connectivity from outside
chConfigNetworksHostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
################################################
##
## Access to ClickHouse instances
##
################################################
# ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances
# for:
# 1. Metrics requests
# 2. Schema maintenance
# 3. DROP DNS CACHE
# User with such credentials can be specified in additional ClickHouse .xml config files,
# located in `chUsersConfigsPath` folder
chUsername: "clickhouse_operator"
chPassword: "clickhouse_operator_password"
# Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
# Can be used instead of explicitly specified username and password
chCredentialsSecretNamespace: ""
chCredentialsSecretName: ""
# Port where to connect to ClickHouse instances to
chPort: 8123
################################################
##
## Log parameters
##
################################################
logtostderr: "true"
alsologtostderr: "false"
v: "1"
stderrthreshold: ""
vmodule: ""
log_backtrace_at: ""
################################################
##
## Runtime parameters
##
################################################
# Max number of concurrent reconciles in progress
reconcileThreadsNumber: 10
reconcileWaitExclude: true
reconcileWaitInclude: false
################################################
##
## Labels management parameters
##
################################################
# When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
# exclude labels from the following list:
#excludeFromPropagationLabels:
# - "labelA"
# - "labelB"
# Whether to append *Scope* labels to StatefulSet and Pod.
# Full list of available *scope* labels check in labeler.go
# LabelShardScopeIndex
# LabelReplicaScopeIndex
# LabelCHIScopeIndex
# LabelCHIScopeCycleSize
# LabelCHIScopeCycleIndex
# LabelCHIScopeCycleOffset
# LabelClusterScopeIndex
# LabelClusterScopeCycleSize
# LabelClusterScopeCycleIndex
# LabelClusterScopeCycleOffset
appendScopeLabels: "no"
################################################
##
## Pod management parameters
##
################################################
# Grace period for Pod termination.
# How many seconds to wait between sending
# SIGTERM and SIGKILL during Pod termination process.
# Increase this number is case of slow shutdown.
terminationGracePeriod: 30
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-confd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-confd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-configd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-configd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
<yandex>
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
</yandex>
01-clickhouse-02-logger.xml: |
<yandex>
<logger>
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
<level>debug</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
<console>1</console>
</logger>
</yandex>
01-clickhouse-03-query_log.xml: |
<yandex>
<query_log replace="1">
<database>system</database>
<table>query_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<query_thread_log remove="1"/>
</yandex>
01-clickhouse-04-part_log.xml: |
<yandex>
<part_log replace="1">
<database>system</database>
<table>part_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
</yandex>
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-templatesd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
001-templates.json.example: |
{
"apiVersion": "clickhouse.altinity.com/v1",
"kind": "ClickHouseInstallationTemplate",
"metadata": {
"name": "01-default-volumeclaimtemplate"
},
"spec": {
"templates": {
"volumeClaimTemplates": [
{
"name": "chi-default-volume-claim-template",
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "2Gi"
}
}
}
}
],
"podTemplates": [
{
"name": "chi-default-oneperhost-pod-template",
"distribution": "OnePerHost",
"spec": {
"containers" : [
{
"name": "clickhouse",
"image": "yandex/clickhouse-server:19.3.7",
"ports": [
{
"name": "http",
"containerPort": 8123
},
{
"name": "client",
"containerPort": 9000
},
{
"name": "interserver",
"containerPort": 9009
}
]
}
]
}
}
]
}
}
}
default-pod-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-oneperhost-pod-template"
spec:
templates:
podTemplates:
- name: default-oneperhost-pod-template
distribution: "OnePerHost"
default-storage-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-storage-template-2Gi"
spec:
templates:
volumeClaimTemplates:
- name: default-storage-template-2Gi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
readme: |
Templates in this folder are packaged with an operator and available via 'useTemplate'
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-usersd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-usersd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
01-clickhouse-user.xml: |
<yandex>
<users>
<clickhouse_operator>
<networks>
<ip>127.0.0.1</ip>
<ip>0.0.0.0/0</ip>
<ip>::/0</ip>
</networks>
<password_sha256_hex>716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448</password_sha256_hex>
<profile>clickhouse_operator</profile>
<quota>default</quota>
</clickhouse_operator>
</users>
<profiles>
<clickhouse_operator>
<log_queries>0</log_queries>
<skip_unavailable_shards>1</skip_unavailable_shards>
<http_connection_timeout>10</http_connection_timeout>
</clickhouse_operator>
</profiles>
</yandex>
02-clickhouse-default-profile.xml: |
<yandex>
<profiles>
<default>
<log_queries>1</log_queries>
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
<parallel_view_processing>1</parallel_view_processing>
</default>
</profiles>
</yandex>
03-database-ordinary.xml: |
<!-- Remove it for ClickHouse versions before 20.4 -->
<yandex>
<profiles>
<default>
<default_database_engine>Ordinary</default_database_engine>
</default>
</profiles>
</yandex>
{{- end }}

View File

@ -1,129 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=
# OPERATOR_IMAGE=altinity/clickhouse-operator:latest
# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:latest
#
# Setup Deployment for clickhouse-operator
# Deployment would be created in kubectl-specified namespace
kind: Deployment
apiVersion: apps/v1
metadata:
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
spec:
replicas: 1
selector:
matchLabels:
app: clickhouse-operator
template:
metadata:
labels:
app: clickhouse-operator
annotations:
prometheus.io/port: '8888'
prometheus.io/scrape: 'true'
spec:
serviceAccountName: clickhouse-operator
volumes:
- name: etc-clickhouse-operator-folder
configMap:
name: etc-clickhouse-operator-files
- name: etc-clickhouse-operator-confd-folder
configMap:
name: etc-clickhouse-operator-confd-files
- name: etc-clickhouse-operator-configd-folder
configMap:
name: etc-clickhouse-operator-configd-files
- name: etc-clickhouse-operator-templatesd-folder
configMap:
name: etc-clickhouse-operator-templatesd-files
- name: etc-clickhouse-operator-usersd-folder
configMap:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
image: altinity/clickhouse-operator:latest
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
env:
# Pod-specific
# spec.nodeName: ip-172-20-52-62.ec2.internal
- name: OPERATOR_POD_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
- name: OPERATOR_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# metadata.namespace: kube-system
- name: OPERATOR_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# status.podIP: 100.96.3.2
- name: OPERATOR_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# spec.serviceAccount: clickhouse-operator
# spec.serviceAccountName: clickhouse-operator
- name: OPERATOR_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Container-specific
- name: OPERATOR_CONTAINER_CPU_REQUEST
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: requests.cpu
- name: OPERATOR_CONTAINER_CPU_LIMIT
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: limits.cpu
- name: OPERATOR_CONTAINER_MEM_REQUEST
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: requests.memory
- name: OPERATOR_CONTAINER_MEM_LIMIT
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: limits.memory
- name: metrics-exporter
image: altinity/metrics-exporter:latest
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=
#
# Setup ClusterIP Service to provide monitoring metrics for Prometheus
# Service would be created in kubectl-specified namespace
# In order to get access outside of k8s it should be exposed as:
# kubectl --namespace prometheus port-forward service/prometheus 9090
# and point browser to localhost:9090
kind: Service
apiVersion: v1
metadata:
name: clickhouse-operator-metrics
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
spec:
ports:
- port: 8888
name: clickhouse-operator-metrics
selector:
app: clickhouse-operator
{{- end }}

View File

@ -1,15 +0,0 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# COMMENT=
# NAMESPACE=posthog
# NAME=clickhouse-operator
#
# Setup ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
{{- end }}

View File

@ -1,53 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-metrics-conf
labels:
app: opentelemetry
component: otel-collector-metrics-conf
data:
otel-collector-metrics-config: |
receivers:
otlp:
protocols:
grpc:
http:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 30s
static_configs:
- targets: ["otel-collector:8889"]
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhousemetricswrite:
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
service:
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp, prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@ -1,72 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector-metrics
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector-metrics
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1 #TODO - adjust this to your own requirements
template:
metadata:
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
containers:
- command:
- "/otelcontribcol"
- "--config=/conf/otel-collector-metrics-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: signoz/otelcontribcol:0.4.2
name: otel-collector
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 55679 # Default endpoint for ZPages.
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.
volumeMounts:
- name: otel-collector-metrics-config-vol
mountPath: /conf
# - name: otel-collector-secrets
# mountPath: /secrets
livenessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
readinessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
volumes:
- configMap:
name: otel-collector-metrics-conf
items:
- key: otel-collector-metrics-config
path: otel-collector-metrics-config.yaml
name: otel-collector-metrics-config-vol
# - secret:
# name: otel-collector-secrets
# items:
# - key: cert.pem
# path: cert.pem
# - key: key.pem
# path: key.pem

View File

@ -1,31 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: otel-collector-metrics
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
ports:
- name: otlp # Default endpoint for OpenTelemetry receiver.
port: 55680
protocol: TCP
targetPort: 55680
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
port: 55681
protocol: TCP
targetPort: 55681
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
port: 14250
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
port: 14268
- name: zipkin # Default endpoint for Zipkin receiver.
port: 9411
- name: metrics # Default endpoint for querying metrics.
port: 8888
selector:
component: otel-collector-metrics

View File

@ -1,67 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
otlp/spanmetrics:
protocols:
grpc:
endpoint: "localhost:12345"
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
clickhousemetricswrite:
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: "0.0.0.0:8889"
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@ -1,73 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1 #TODO - adjust this to your own requirements
template:
metadata:
labels:
app: opentelemetry
component: otel-collector
spec:
containers:
- command:
- "/otelcontribcol"
- "--config=/conf/otel-collector-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: signoz/otelcontribcol:0.4.2
name: otel-collector
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 55679 # Default endpoint for ZPages.
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.
- containerPort: 8889 # Default endpoint for prometheus exported metrics.
volumeMounts:
- name: otel-collector-config-vol
mountPath: /conf
# - name: otel-collector-secrets
# mountPath: /secrets
livenessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
readinessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
volumes:
- configMap:
name: otel-collector-conf
items:
- key: otel-collector-config
path: otel-collector-config.yaml
name: otel-collector-config-vol
# - secret:
# name: otel-collector-secrets
# items:
# - key: cert.pem
# path: cert.pem
# - key: key.pem
# path: key.pem

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
ports:
- name: otlp # Default endpoint for OpenTelemetry receiver.
port: 55680
protocol: TCP
targetPort: 55680
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
port: 55681
protocol: TCP
targetPort: 55681
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
port: 14250
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
port: 14268
- name: zipkin # Default endpoint for Zipkin receiver.
port: 9411
- name: metrics # Default endpoint for querying metrics.
port: 8888
- name: prometheus-metrics # Default endpoint for querying prometheus metrics.
port: 8889
selector:
component: otel-collector

View File

@ -1,15 +0,0 @@
zookeeper:
autopurge:
purgeInterval: 1
query-service:
configVars:
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
STORAGE: clickhouse
cloud: aws
clickhouseOperator:
enabled: true
storage: 20Gi
serviceType: ClusterIP

View File

@ -32,6 +32,6 @@ RUN rm -rf /usr/share/nginx/html/*
# Copy from the stahg 1
COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3000
EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"]

View File

@ -44,7 +44,7 @@ In the project directory, you can run:
### `yarn start`
Runs the app in the development mode.\
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
The page will reload if you make edits.\
You will also see any lint errors in the console.

View File

@ -1,5 +1,5 @@
server {
listen 3000;
listen 3301;
server_name _;
gzip on;

View File

@ -4,4 +4,4 @@ services:
build: .
image: signoz/frontend:latest
ports:
- "3000:3000"
- "3301:3301"

View File

@ -42,11 +42,14 @@
"d3": "^6.2.0",
"d3-flame-graph": "^3.1.1",
"d3-tip": "^0.9.1",
"dayjs": "^1.10.7",
"dotenv": "8.2.0",
"file-loader": "6.1.1",
"history": "4.10.1",
"html-webpack-plugin": "5.1.0",
"jest": "26.6.0",
"less": "^4.1.2",
"less-loader": "^10.2.0",
"mini-css-extract-plugin": "2.4.5",
"monaco-editor": "^0.30.0",
"react": "17.0.0",
@ -116,6 +119,7 @@
"bundlesize": "^0.18.1",
"compression-webpack-plugin": "^9.0.0",
"copy-webpack-plugin": "^8.1.0",
"critters-webpack-plugin": "^3.0.1",
"cypress": "^8.3.0",
"eslint": "^7.30.0",
"eslint-config-prettier": "^8.3.0",

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 10 KiB

View File

@ -2,4 +2,4 @@
<path d="M765.131 338.922L805.631 334.984C808.068 348.578 812.99 358.562 820.396 364.938C827.896 371.312 837.974 374.5 850.631 374.5C864.037 374.5 874.115 371.688 880.865 366.062C887.709 360.344 891.131 353.688 891.131 346.094C891.131 341.219 889.677 337.094 886.771 333.719C883.959 330.25 878.99 327.25 871.865 324.719C866.99 323.031 855.881 320.031 838.537 315.719C816.224 310.188 800.568 303.391 791.568 295.328C778.912 283.984 772.584 270.156 772.584 253.844C772.584 243.344 775.537 233.547 781.443 224.453C787.443 215.266 796.021 208.281 807.177 203.5C818.427 198.719 831.974 196.328 847.818 196.328C873.693 196.328 893.146 202 906.177 213.344C919.302 224.688 926.193 239.828 926.849 258.766L885.224 260.594C883.443 250 879.599 242.406 873.693 237.812C867.881 233.125 859.115 230.781 847.396 230.781C835.302 230.781 825.834 233.266 818.99 238.234C814.584 241.422 812.381 245.688 812.381 251.031C812.381 255.906 814.443 260.078 818.568 263.547C823.818 267.953 836.568 272.547 856.818 277.328C877.068 282.109 892.021 287.078 901.677 292.234C911.427 297.297 919.021 304.281 924.459 313.188C929.99 322 932.756 332.922 932.756 345.953C932.756 357.766 929.474 368.828 922.912 379.141C916.349 389.453 907.068 397.141 895.068 402.203C883.068 407.172 868.115 409.656 850.209 409.656C824.146 409.656 804.131 403.656 790.162 391.656C776.193 379.562 767.849 361.984 765.131 338.922ZM967.49 236.406V199.844H1007.01V236.406H967.49ZM967.49 406V256.656H1007.01V406H967.49ZM1043.99 415.844L1089.13 421.328C1089.88 426.578 1091.61 430.188 1094.33 432.156C1098.08 434.969 1103.99 436.375 1112.05 436.375C1122.36 436.375 1130.1 434.828 1135.26 431.734C1138.72 429.672 1141.35 426.344 1143.13 421.75C1144.35 418.469 1144.96 412.422 1144.96 403.609V381.812C1133.15 397.938 1118.24 406 1100.24 406C1080.18 406 1064.29 397.516 1052.57 380.547C1043.38 367.141 1038.79 350.453 1038.79 330.484C1038.79 305.453 1044.79 286.328 1056.79 273.109C1068.88 259.891 1083.88 253.281 1101.79 253.281C1120.26 253.281 1135.49 261.391 1147.49 277.609V256.656H1184.47V390.672C1184.47 408.297 1183.02 421.469 1180.11 430.188C1177.21 438.906 1173.13 445.75 1167.88 450.719C1162.63 455.688 1155.6 459.578 1146.79 462.391C1138.07 465.203 1127.01 466.609 1113.6 466.609C1088.29 466.609 1070.33 462.25 1059.74 453.531C1049.15 444.906 1043.85 433.938 1043.85 420.625C1043.85 419.312 1043.9 417.719 1043.99 415.844ZM1079.29 328.234C1079.29 344.078 1082.33 355.703 1088.43 363.109C1094.61 370.422 1102.21 374.078 1111.21 374.078C1120.86 374.078 1129.02 370.328 1135.68 362.828C1142.33 355.234 1145.66 344.031 1145.66 329.219C1145.66 313.75 1142.47 302.266 1136.1 294.766C1129.72 287.266 1121.66 283.516 1111.91 283.516C1102.44 283.516 1094.61 287.219 1088.43 294.625C1082.33 301.938 1079.29 313.141 1079.29 328.234ZM1224.41 406V199.844H1264.91L1349.29 337.516V199.844H1387.96V406H1346.19L1263.08 271.562V406H1224.41ZM1422.69 329.219C1422.69 316.094 1425.93 303.391 1432.4 291.109C1438.86 278.828 1448.01 269.453 1459.82 262.984C1471.72 256.516 1484.99 253.281 1499.61 253.281C1522.21 253.281 1540.72 260.641 1555.16 275.359C1569.6 289.984 1576.82 308.5 1576.82 330.906C1576.82 353.5 1569.51 372.25 1554.88 387.156C1540.35 401.969 1522.02 409.375 1499.9 409.375C1486.21 409.375 1473.13 406.281 1460.66 400.094C1448.29 393.906 1438.86 384.859 1432.4 372.953C1425.93 360.953 1422.69 346.375 1422.69 329.219ZM1463.19 331.328C1463.19 346.141 1466.71 357.484 1473.74 365.359C1480.77 373.234 1489.44 377.172 1499.76 377.172C1510.07 377.172 1518.69 373.234 1525.63 365.359C1532.66 357.484 1536.18 346.047 1536.18 331.047C1536.18 316.422 1532.66 305.172 1525.63 297.297C1518.69 289.422 1510.07 285.484 1499.76 285.484C1489.44 285.484 1480.77 289.422 1473.74 297.297C1466.71 305.172 1463.19 316.516 1463.19 331.328ZM1592.01 406V375.203L1647.97 310.938C1657.16 300.438 1663.96 292.984 1668.36 288.578C1663.77 288.859 1657.72 289.047 1650.22 289.141L1597.49 289.422V256.656H1720.96V284.641L1663.86 350.453L1643.76 372.25C1654.72 371.594 1661.52 371.266 1664.15 371.266H1725.32V406H1592.01Z" fill="white"/>
<path opacity="0.9" d="M296.795 599.499C131.909 599.499 0 468.361 0 304.437C0 142.153 131.909 9.37476 296.795 9.37476H483.116C544.124 9.37476 591.941 58.5518 591.941 117.564V304.437C591.941 468.361 460.032 599.499 296.795 599.499Z" fill="#F25733"/>
<path d="M294.467 176.702C171.309 176.702 101.936 280.076 99.0428 284.476C91.91 295.315 91.91 309.334 99.0481 320.181C101.936 324.574 171.309 427.947 294.467 427.947C417.624 427.947 486.997 324.574 489.89 320.173C497.023 309.334 497.024 295.315 489.885 284.468C486.997 280.076 417.624 176.702 294.467 176.702ZM116.09 308.659C113.557 304.811 113.557 299.839 116.09 295.99C118.416 292.45 167.808 218.911 256.115 201.271C216.099 216.928 187.625 256.307 187.625 302.325C187.625 348.342 216.099 387.721 256.115 403.378C167.808 385.737 118.416 312.198 116.09 308.659ZM245.232 302.324C245.232 308.059 240.646 312.706 234.989 312.706C229.331 312.706 224.746 308.059 224.746 302.324C224.746 263.357 256.022 231.655 294.466 231.655C300.123 231.655 304.709 236.303 304.709 242.037C304.709 247.772 300.123 252.419 294.466 252.419C267.317 252.419 245.232 274.806 245.232 302.324ZM294.467 327.565C280.736 327.565 269.565 316.243 269.565 302.325C269.565 288.407 280.736 277.084 294.467 277.084C308.199 277.084 319.369 288.406 319.369 302.325C319.369 316.243 308.199 327.565 294.467 327.565ZM472.843 308.659C470.516 312.198 421.125 385.737 332.818 403.378C372.836 387.72 401.309 348.342 401.309 302.325C401.309 256.307 372.836 216.929 332.818 201.272C421.125 218.913 470.516 292.451 472.843 295.99C475.376 299.839 475.376 304.811 472.843 308.659Z" fill="#F9F2F9"/>
</svg>
</svg>

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@ -19,7 +19,7 @@ export const ServiceMapPage = Loadable(
);
export const TraceDetailPages = Loadable(
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/TraceDetails'),
() => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/Trace'),
);
export const TraceGraphPage = Loadable(

View File

@ -3,14 +3,13 @@ import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/alerts/getGroups';
import convertObjectIntoParams from 'lib/query/convertObjectIntoParams';
const getGroups = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const queryParams = Object.keys(props)
.map((e) => `${e}=${props[e]}`)
.join('&');
const queryParams = convertObjectIntoParams(props);
const response = await AxiosAlertManagerInstance.get(
`/alerts/groups?${queryParams}`,

View File

@ -0,0 +1,48 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getFilters';
import omitBy from 'lodash-es/omitBy';
const getFilters = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const duration =
omitBy(props.other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(props.other, (_, key) =>
key.startsWith('duration'),
);
const exclude: string[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const response = await axios.post<PayloadProps>(`/getSpanFilters`, {
start: props.start,
end: props.end,
getFilters: props.getFilters,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude: exclude,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getFilters;

View File

@ -1,26 +0,0 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpans';
const getSpans = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/spans?&start=${props.start}&end=${props.end}&kind=${props.kind}&lookback=${props.lookback}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&limit=${props.limit}&tags=${props.tags}`,
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpans;

View File

@ -1,26 +0,0 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate';
const getSpansAggregate = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(
`/spans/aggregates?start=${props.start}&end=${props.end}&aggregation_option=${props.aggregation_option}&dimension=${props.dimension}&kind=${props.kind}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&step=${props.step}&tags=${props.tags}`,
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpansAggregate;

View File

@ -0,0 +1,59 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import omitBy from 'lodash-es/omitBy';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpans';
const getSpans = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Operator: e.Operator,
Values: e.Values,
}));
const exclude: string[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const other = Object.fromEntries(props.selectedFilter);
const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(other, (_, key) => key.startsWith('duration'));
const response = await axios.post<PayloadProps>(
`/getFilteredSpans/aggregates`,
{
start: String(props.start),
end: String(props.end),
function: props.function,
groupBy: props.groupBy,
step: props.step,
tags: updatedSelectedTags,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude,
},
);
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpans;

View File

@ -0,0 +1,60 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import omitBy from 'lodash-es/omitBy';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate';
import { TraceFilterEnum } from 'types/reducer/trace';
const getSpanAggregate = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const preProps = {
start: String(props.start),
end: String(props.end),
limit: props.limit,
offset: props.offset,
};
const exclude: TraceFilterEnum[] = [];
props.isFilterExclude.forEach((value, key) => {
if (value) {
exclude.push(key);
}
});
const updatedSelectedTags = props.selectedTags.map((e) => ({
Key: e.Key[0],
Operator: e.Operator,
Values: e.Values,
}));
const other = Object.fromEntries(props.selectedFilter);
const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(other, (_, key) => key.startsWith('duration'));
const response = await axios.post<PayloadProps>(`/getFilteredSpans`, {
...preProps,
tags: updatedSelectedTags,
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
exclude,
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getSpanAggregate;

View File

@ -0,0 +1,38 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { omitBy } from 'lodash-es';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getTagFilters';
const getTagFilters = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const duration =
omitBy(props.other, (_, key) => !key.startsWith('duration')) || [];
const nonDuration = omitBy(props.other, (_, key) =>
key.startsWith('duration'),
);
const response = await axios.post<PayloadProps>(`/getTagFilters`, {
start: String(props.start),
end: String(props.end),
...nonDuration,
maxDuration: String((duration['duration'] || [])[0] || ''),
minDuration: String((duration['duration'] || [])[1] || ''),
});
return {
statusCode: 200,
error: null,
message: 'Success',
payload: response.data,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default getTagFilters;

View File

@ -2,18 +2,18 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getServiceOperation';
import { PayloadProps } from 'types/api/user/getUserPreference';
const getServiceOperation = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
const getPreference = async (): Promise<
SuccessResponse<PayloadProps> | ErrorResponse
> => {
try {
const response = await axios.get(`/service/${props.service}/operations`);
const response = await axios.get(`/userPreferences`);
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@ -21,4 +21,4 @@ const getServiceOperation = async (
}
};
export default getServiceOperation;
export default getPreference;

View File

@ -2,18 +2,18 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps } from 'types/api/trace/getServiceList';
import { PayloadProps } from 'types/api/user/getVersion';
const getServiceList = async (): Promise<
const getVersion = async (): Promise<
SuccessResponse<PayloadProps> | ErrorResponse
> => {
try {
const response = await axios.get('/services/list');
const response = await axios.get(`/version`);
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@ -21,4 +21,4 @@ const getServiceList = async (): Promise<
}
};
export default getServiceList;
export default getVersion;

View File

@ -2,18 +2,20 @@ import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/trace/getTags';
import { PayloadProps, Props } from 'types/api/user/setUserPreference';
const getTags = async (
const setPreference = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const response = await axios.get(`/tags?service=${props.service}`);
const response = await axios.post(`/userPreferences`, {
...props,
});
return {
statusCode: 200,
error: null,
message: 'Success',
message: response.data.status,
payload: response.data,
};
} catch (error) {
@ -21,4 +23,4 @@ const getTags = async (
}
};
export default getTags;
export default setPreference;

View File

@ -9,8 +9,7 @@ const signup = async (
): Promise<SuccessResponse<undefined> | ErrorResponse> => {
try {
const response = await axios.post(`/user`, {
email: props.email,
name: props.name,
...props,
});
return {

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More