From 1b398039e32210631c80733799e6913929a8d0e8 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Tue, 4 Jan 2022 12:47:16 +0200 Subject: [PATCH 01/81] Swapping images on the "build" GH workflow. (#578) query-service job is currently building flattener and flattener job is currently building query-service. This PR should fix that mix. Signed-off-by: Yoni Bettan --- .github/workflows/build.yaml | 4 ++-- Makefile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d38abc6c2e..1b436a32bb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -61,7 +61,7 @@ jobs: - name: Build query-service image shell: bash run: | - make build-flattener-amd64 + make build-query-service-amd64 build-flattener: runs-on: ubuntu-latest @@ -74,4 +74,4 @@ jobs: - name: Build flattener docker image shell: bash run: | - make build-query-service-amd64 + make build-flattener-amd64 diff --git a/Makefile b/Makefile index 1f2d65709a..085045e339 100644 --- a/Makefile +++ b/Makefile @@ -10,8 +10,8 @@ REPONAME ?= signoz DOCKER_TAG ?= latest FRONTEND_DOCKER_IMAGE ?= frontend -FLATTERNER_DOCKER_IMAGE ?= query-service -QUERY_SERVICE_DOCKER_IMAGE ?= flattener-processor +QUERY_SERVICE_DOCKER_IMAGE ?= query-service +FLATTERNER_DOCKER_IMAGE ?= flattener-processor all: build-push-frontend build-push-query-service build-push-flattener # Steps to build and push docker image of frontend From 63c2e67cfc59861c57eb125bc87346cdb6bb0916 Mon Sep 17 00:00:00 2001 From: Pranay Prateek Date: Thu, 6 Jan 2022 21:35:31 +0530 Subject: [PATCH 02/81] Update CONTRIBUTING.md --- CONTRIBUTING.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 78613cb375..c58a713482 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,13 +1,16 @@ # How to Contribute -There are primarily 3 areas in which you can contribute in SigNoz +There are primarily 2 areas in which you can contribute in SigNoz - Frontend ( written in Typescript, React) -- Query Service (written in Go) -- Flattener Processor (written in Go) +- Backend - ( Query Service - written in Go) Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area +> Please note: If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻 + +> If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted. + # Develop Frontend Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend) @@ -53,11 +56,6 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht > If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080` -# Contribute to Flattener Processor - -Not needed to run for the ClickHouse setup - -more info at [https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener](https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener) ## General Instructions From 9a2aa7bcbdde473c62f76029edbaea46d598cad7 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Sat, 8 Jan 2022 09:14:14 +0200 Subject: [PATCH 03/81] ci: adding 'e2e' GH workflows (#579) The flow contains of multiple steps: * build 'query-service' and 'frontend' images and push them to the image registry * deploy a disposable k3s cluster * deploy the app on the cluster * set a tunnel to allow accessing the UI from the web browser Signed-off-by: Yoni Bettan --- .github/workflows/README.md | 16 ++- .github/workflows/e2e.yaml | 112 ++++++++++++++++++ .../frontend/templates/deployment.yaml | 2 +- .../signoz-charts/frontend/values.yaml | 1 + .../query-service/templates/statefulset.yaml | 4 +- .../signoz-charts/query-service/values.yaml | 1 + 6 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/e2e.yaml diff --git a/.github/workflows/README.md b/.github/workflows/README.md index c596e08d91..f1f24a1dd8 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1,6 +1,18 @@ -To run GitHub workflow, a few environment variables needs to add in GitHub secrets +# Github actions -#### Environment Variables +## Testing the UI manually on each PR + +First we need to make sure the UI is ready +* Check the `Start tunnel` step in `e2e-k8s/deploy-on-k3s-cluster` job and make sure you see `your url is: https://pull--signoz.loca.lt` +* This job will run until the PR is merged or closed to keep the local tunneling alive + - github will cancel this job if the PR wasn't merged after 6h + - if the job was cancel, go to the action and press `Re-run all jobs` + +Now you can open your browser at https://pull--signoz.loca.lt and check the UI. + +## Environment Variables + +To run GitHub workflow, a few environment variables needs to add in GitHub secrets diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 0000000000..e39de533ce --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,112 @@ +name: e2e-k8s + +on: + pull_request: + paths: + - 'pkg/query-service/**' + - 'frontend/**' + +jobs: + + image-build-and-push-query-service: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push docker image + env: + DOCKER_TAG: pull-${{ github.event.number }} + run: make build-push-query-service + + image-build-and-push-frontend: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Install dependencies + working-directory: frontend + run: yarn install + - name: Run Prettier + working-directory: frontend + run: npm run prettify + continue-on-error: true + - name: Run ESLint + working-directory: frontend + run: npm run lint + continue-on-error: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push docker image + env: + DOCKER_TAG: pull-${{ github.event.number }} + run: make build-push-frontend + + deploy-on-k3s-cluster: + runs-on: ubuntu-latest + needs: + - image-build-and-push-query-service + - image-build-and-push-frontend + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Create a k3s cluster + uses: AbsaOSS/k3d-action@v2 + with: + cluster-name: "test-cluster" + - name: Deploy the app + env: + TAG: pull-${{ github.event.number }} + run: | + helm dependency update deploy/kubernetes/platform + helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ + --set cloud=null \ + --set frontend.service.type=LoadBalancer \ + --set frontend.image.tag=$TAG \ + --set query-service.image.tag=$TAG + kubectl describe deploy/signoz-frontend -n platform | grep Image + kubectl describe statefulset/signoz-query-service -n platform | grep Image + # comment the next 3 lines if you open an SSH connection for debugging + kubectl -n platform get deploy --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl -n platform get statefulset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl -n platform get daemonset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl get pods -n platform + kubectl get svc -n platform + # Uncomment for debugging using SSH + #- name: Setup upterm session + # uses: lhotari/action-upterm@v1 + - name: Kick off a sample-app workload + run: | + kubectl create ns sample-application + kubectl -n sample-application apply -Rf sample-apps/hotrod/ + kubectl -n sample-application run strzal --image=djbingham/curl \ + --restart='OnFailure' -i --rm --command -- curl -X POST -F \ + 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + kubectl -n sample-application get statefulset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + kubectl -n sample-application get daemonset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + - name: Start tunnel + env: + SUBDOMAIN: pull-${{ github.event.number }}-signoz + run: | + npm install -g localtunnel + host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) + port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) + lt -p $port -l $host -s $SUBDOMAIN diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml index 5da3f8cf74..7f0ac67e9e 100644 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml +++ b/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml @@ -28,7 +28,7 @@ spec: - name: {{ .Chart.Name }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml index a75390c191..7538dcfec6 100644 --- a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml +++ b/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml @@ -6,6 +6,7 @@ replicaCount: 1 image: repository: signoz/frontend + tag: 0.5.4 pullPolicy: IfNotPresent imagePullSecrets: [] diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml index 097e2bbcdc..179bb4d72f 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml @@ -23,7 +23,7 @@ spec: {{- end }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} args: ["-config=/root/config/prometheus.yml"] ports: @@ -84,4 +84,4 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi \ No newline at end of file + storage: 1Gi diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml index b18bf629ab..82d438b51b 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml @@ -6,6 +6,7 @@ replicaCount: 1 image: repository: signoz/query-service + tag: 0.5.4 pullPolicy: IfNotPresent imagePullSecrets: [] From b3d5d6c281a4d2913cbe6b3064fc3c6c42e8fc3e Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Sat, 8 Jan 2022 09:21:46 +0200 Subject: [PATCH 04/81] ci: requiring the 'ok-to-test' label for running some workflows (#592) * ci: adding 'e2e' GH workflows The flow contains of multiple steps: * build 'query-service' and 'frontend' images and push them to the image registry * deploy a disposable k3s cluster * deploy the app on the cluster * set a tunnel to allow accessing the UI from the web browser Signed-off-by: Yoni Bettan * ci: requiring the 'ok-to-test' label for running some workflows As of now, the 'e2e' workflow will require the 'ok-to-test' label in order to get triggered. In addition to that, on each change to the PR, Github will remove the label from it and it will be required again. Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 116 ++++++++++++++++++++++++++++ .github/workflows/remove-label.yaml | 18 +++++ 2 files changed, 134 insertions(+) create mode 100644 .github/workflows/e2e-k3s.yaml create mode 100644 .github/workflows/remove-label.yaml diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml new file mode 100644 index 0000000000..5719f169bd --- /dev/null +++ b/.github/workflows/e2e-k3s.yaml @@ -0,0 +1,116 @@ +name: e2e-k3s + +on: + pull_request: + types: [labeled] + paths: + - 'pkg/query-service/**' + - 'frontend/**' + +jobs: + + image-build-and-push-query-service: + runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push docker image + env: + DOCKER_TAG: pull-${{ github.event.number }} + run: make build-push-query-service + + image-build-and-push-frontend: + runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Install dependencies + working-directory: frontend + run: yarn install + - name: Run Prettier + working-directory: frontend + run: npm run prettify + continue-on-error: true + - name: Run ESLint + working-directory: frontend + run: npm run lint + continue-on-error: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push docker image + env: + DOCKER_TAG: pull-${{ github.event.number }} + run: make build-push-frontend + + deploy-on-k3s-cluster: + runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} + needs: + - image-build-and-push-query-service + - image-build-and-push-frontend + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Create a k3s cluster + uses: AbsaOSS/k3d-action@v2 + with: + cluster-name: "test-cluster" + - name: Deploy the app + env: + TAG: pull-${{ github.event.number }} + run: | + helm dependency update deploy/kubernetes/platform + helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ + --set cloud=null \ + --set frontend.service.type=LoadBalancer \ + --set frontend.image.tag=$TAG \ + --set query-service.image.tag=$TAG + kubectl describe deploy/signoz-frontend -n platform | grep Image + kubectl describe statefulset/signoz-query-service -n platform | grep Image + # comment the next 3 lines if you open an SSH connection for debugging + kubectl -n platform get deploy --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl -n platform get statefulset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl -n platform get daemonset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s + kubectl get pods -n platform + kubectl get svc -n platform + # Uncomment for debugging using SSH + #- name: Setup upterm session + # uses: lhotari/action-upterm@v1 + - name: Kick off a sample-app workload + run: | + kubectl create ns sample-application + kubectl -n sample-application apply -Rf sample-apps/hotrod/ + kubectl -n sample-application run strzal --image=djbingham/curl \ + --restart='OnFailure' -i --rm --command -- curl -X POST -F \ + 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + kubectl -n sample-application get statefulset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + kubectl -n sample-application get daemonset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + - name: Start tunnel + env: + SUBDOMAIN: pull-${{ github.event.number }}-signoz + run: | + npm install -g localtunnel + host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) + port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) + lt -p $port -l $host -s $SUBDOMAIN diff --git a/.github/workflows/remove-label.yaml b/.github/workflows/remove-label.yaml new file mode 100644 index 0000000000..597c235f81 --- /dev/null +++ b/.github/workflows/remove-label.yaml @@ -0,0 +1,18 @@ +name: remove-label + +on: + pull_request: + types: [synchronize] + +jobs: + remove: + runs-on: ubuntu-latest + if: github.event.pull_request.head.repo.full_name != 'openshift-psap/special-resource-operator' + steps: + - name: Remove label + uses: buildsville/add-remove-label@v1 + with: + label: ok-to-test + type: remove + token: ${{ secrets.REPO_ACCESS_TOKEN }} + From b433d4ad4a04bf87a910236bc5b369129f003873 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Sat, 8 Jan 2022 13:07:38 +0200 Subject: [PATCH 05/81] Revert "ci: requiring the 'ok-to-test' label for running some workflows (#592)" (#595) This reverts commit b3d5d6c281a4d2913cbe6b3064fc3c6c42e8fc3e. --- .github/workflows/e2e-k3s.yaml | 116 ---------------------------- .github/workflows/remove-label.yaml | 18 ----- 2 files changed, 134 deletions(-) delete mode 100644 .github/workflows/e2e-k3s.yaml delete mode 100644 .github/workflows/remove-label.yaml diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml deleted file mode 100644 index 5719f169bd..0000000000 --- a/.github/workflows/e2e-k3s.yaml +++ /dev/null @@ -1,116 +0,0 @@ -name: e2e-k3s - -on: - pull_request: - types: [labeled] - paths: - - 'pkg/query-service/**' - - 'frontend/**' - -jobs: - - image-build-and-push-query-service: - runs-on: ubuntu-latest - if: ${{ github.event.label.name == 'ok-to-test' }} - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - with: - version: latest - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker image - env: - DOCKER_TAG: pull-${{ github.event.number }} - run: make build-push-query-service - - image-build-and-push-frontend: - runs-on: ubuntu-latest - if: ${{ github.event.label.name == 'ok-to-test' }} - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Install dependencies - working-directory: frontend - run: yarn install - - name: Run Prettier - working-directory: frontend - run: npm run prettify - continue-on-error: true - - name: Run ESLint - working-directory: frontend - run: npm run lint - continue-on-error: true - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - with: - version: latest - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker image - env: - DOCKER_TAG: pull-${{ github.event.number }} - run: make build-push-frontend - - deploy-on-k3s-cluster: - runs-on: ubuntu-latest - if: ${{ github.event.label.name == 'ok-to-test' }} - needs: - - image-build-and-push-query-service - - image-build-and-push-frontend - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Create a k3s cluster - uses: AbsaOSS/k3d-action@v2 - with: - cluster-name: "test-cluster" - - name: Deploy the app - env: - TAG: pull-${{ github.event.number }} - run: | - helm dependency update deploy/kubernetes/platform - helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ - --set cloud=null \ - --set frontend.service.type=LoadBalancer \ - --set frontend.image.tag=$TAG \ - --set query-service.image.tag=$TAG - kubectl describe deploy/signoz-frontend -n platform | grep Image - kubectl describe statefulset/signoz-query-service -n platform | grep Image - # comment the next 3 lines if you open an SSH connection for debugging - kubectl -n platform get deploy --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s - kubectl -n platform get statefulset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s - kubectl -n platform get daemonset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s - kubectl get pods -n platform - kubectl get svc -n platform - # Uncomment for debugging using SSH - #- name: Setup upterm session - # uses: lhotari/action-upterm@v1 - - name: Kick off a sample-app workload - run: | - kubectl create ns sample-application - kubectl -n sample-application apply -Rf sample-apps/hotrod/ - kubectl -n sample-application run strzal --image=djbingham/curl \ - --restart='OnFailure' -i --rm --command -- curl -X POST -F \ - 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm - kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - kubectl -n sample-application get statefulset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - kubectl -n sample-application get daemonset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - - name: Start tunnel - env: - SUBDOMAIN: pull-${{ github.event.number }}-signoz - run: | - npm install -g localtunnel - host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) - port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) - lt -p $port -l $host -s $SUBDOMAIN diff --git a/.github/workflows/remove-label.yaml b/.github/workflows/remove-label.yaml deleted file mode 100644 index 597c235f81..0000000000 --- a/.github/workflows/remove-label.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: remove-label - -on: - pull_request: - types: [synchronize] - -jobs: - remove: - runs-on: ubuntu-latest - if: github.event.pull_request.head.repo.full_name != 'openshift-psap/special-resource-operator' - steps: - - name: Remove label - uses: buildsville/add-remove-label@v1 - with: - label: ok-to-test - type: remove - token: ${{ secrets.REPO_ACCESS_TOKEN }} - From e4ef059d1930ab45b1b938f0b702a019cca2de60 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Fri, 7 Jan 2022 09:48:52 +0200 Subject: [PATCH 06/81] ci: requiring the 'ok-to-test' label for running some workflows As of now, the 'e2e-k3s' workflow will require the 'ok-to-test' label in order to get triggered. In addition to that, on each change to the PR on the relevant files, Github will remove the label from it and it will be required again. Signed-off-by: Yoni Bettan --- .github/workflows/{e2e.yaml => e2e-k3s.yaml} | 6 +++++- .github/workflows/remove-label.yaml | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) rename .github/workflows/{e2e.yaml => e2e-k3s.yaml} (95%) create mode 100644 .github/workflows/remove-label.yaml diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e-k3s.yaml similarity index 95% rename from .github/workflows/e2e.yaml rename to .github/workflows/e2e-k3s.yaml index e39de533ce..5719f169bd 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -1,7 +1,8 @@ -name: e2e-k8s +name: e2e-k3s on: pull_request: + types: [labeled] paths: - 'pkg/query-service/**' - 'frontend/**' @@ -10,6 +11,7 @@ jobs: image-build-and-push-query-service: runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} steps: - name: Checkout code uses: actions/checkout@v2 @@ -31,6 +33,7 @@ jobs: image-build-and-push-frontend: runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} steps: - name: Checkout code uses: actions/checkout@v2 @@ -61,6 +64,7 @@ jobs: deploy-on-k3s-cluster: runs-on: ubuntu-latest + if: ${{ github.event.label.name == 'ok-to-test' }} needs: - image-build-and-push-query-service - image-build-and-push-frontend diff --git a/.github/workflows/remove-label.yaml b/.github/workflows/remove-label.yaml new file mode 100644 index 0000000000..427cf9017d --- /dev/null +++ b/.github/workflows/remove-label.yaml @@ -0,0 +1,20 @@ +name: remove-label + +on: + pull_request: + types: [synchronize] + paths: + - 'pkg/query-service/**' + - 'frontend/**' + +jobs: + remove: + runs-on: ubuntu-latest + steps: + - name: Remove label + uses: buildsville/add-remove-label@v1 + with: + label: ok-to-test + type: remove + token: ${{ secrets.REPO_ACCESS_TOKEN }} + From e555e05f586dfed4514e886499041f13138769e6 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Fri, 7 Jan 2022 11:18:35 +0200 Subject: [PATCH 07/81] ci: adding 'push' workflow This workflow will push up to 2 images with 4 tags, depending on if they changed since the last image. * query-service: * query-service:master * frontend: * frontend:master Signed-off-by: Yoni Bettan --- .github/workflows/push.yaml | 182 ++++++++---------------------------- Makefile | 14 +++ 2 files changed, 55 insertions(+), 141 deletions(-) diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index facd460d08..288b956d92 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -1,172 +1,72 @@ -name: push-pipeline +name: push + on: push: - branches: - - main - - ^v[0-9]*.[0-9]*.x$ - tags: - - "*" - # pull_request: - # branches: - # - main - # - v* - # paths: - # - 'pkg/**' - # - 'frontend/**' + paths: + - 'pkg/query-service/**' + - 'frontend/**' + jobs: - get-envs: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - shell: bash - run: | - img_tag="" - array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`) - if [ ${array[1]} == "tags" ] - then - echo "tag build" - img_tag=${GITHUB_REF#refs/*/v} - elif [ ${array[1]} == "pull" ] - then - img_tag="pull-${{ github.event.number }}" - else - echo "non tag build" - img_tag="latest" - fi - # This is a condition where image tag looks like "pull/" during pull request build - NEW_IMG_TAG=`echo $img_tag | sed "s/\//-/g"` - echo $NEW_IMG_TAG - echo export IMG_TAG=$NEW_IMG_TAG >> env-vars - echo export FRONTEND_IMAGE="frontend" >> env-vars - echo export QUERY_SERVICE="query-service" >> env-vars - echo export FLATTENER_PROCESSOR="flattener-processor" >> env-vars - - name: Uploading envs - uses: actions/upload-artifact@v2 - with: - name: env_artifact - path: env-vars - - build-and-push-frontend: + image-build-and-push-query-service: runs-on: ubuntu-latest - needs: - - get-envs steps: - name: Checkout code uses: actions/checkout@v2 - - - name: Downloading image artifact - uses: actions/download-artifact@v2 - with: - name: env_artifact - + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx - id: buildx uses: docker/setup-buildx-action@v1 with: version: latest - - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - uses: benjlevesque/short-sha@v1.2 + id: short-sha + - name: Get branch name + id: branch-name + uses: tj-actions/branch-names@v5.1 + - name: Build and push docker image + env: + DOCKER_TAG: ${{ steps.short-sha.outputs.sha }} + DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }} + run: make build-push-query-service - - name: Build & Push Frontend Docker Image - shell: bash - env: - FRONTEND_DIRECTORY: "frontend" - REPONAME: ${{ secrets.REPONAME }} - FRONTEND_DOCKER_IMAGE: ${FRONTEND_IMAGE} - DOCKER_TAG: ${IMG_TAG} - run: | - branch=${GITHUB_REF#refs/*/} - array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`) - if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]] - then - source env-vars - make build-push-frontend - fi - - build-and-push-query-service: + image-build-and-push-frontend: runs-on: ubuntu-latest - needs: - - get-envs steps: - name: Checkout code uses: actions/checkout@v2 - - - name: Downloading image artifact - uses: actions/download-artifact@v2 - with: - name: env_artifact - + - name: Install dependencies + working-directory: frontend + run: yarn install + - name: Run Prettier + working-directory: frontend + run: npm run prettify + continue-on-error: true + - name: Run ESLint + working-directory: frontend + run: npm run lint + continue-on-error: true - name: Set up Docker Buildx - id: buildx uses: docker/setup-buildx-action@v1 with: version: latest - - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build & Push Query Service Docker Image - shell: bash - env: - QUERY_SERVICE_DIRECTORY: "pkg/query-service" - REPONAME: ${{ secrets.REPONAME }} - QUERY_SERVICE_DOCKER_IMAGE: ${QUERY_SERVICE} - DOCKER_TAG: ${IMG_TAG} - run: | - branch=${GITHUB_REF#refs/*/} - array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`) - if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] ||[[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]] - then - source env-vars - make build-push-query-service - fi - - build-and-push-flattener: - runs-on: ubuntu-latest - needs: - - get-envs - steps: - - name: Checkout code - uses: actions/checkout@v2 - - - name: Downloading image artifact - uses: actions/download-artifact@v2 - with: - name: env_artifact - - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - with: - version: latest - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build & Push Flattener Processor Docker Image - shell: bash - env: - FLATTENER_DIRECTORY: "pkg/processors/flattener" - REPONAME: ${{ secrets.REPONAME }} - FLATTERNER_DOCKER_IMAGE: ${FLATTENER_PROCESSOR} - DOCKER_TAG: ${IMG_TAG} - run: | - branch=${GITHUB_REF#refs/*/} - array=(`echo ${GITHUB_REF} | sed 's/\//\n/g'`) - if [ $branch == "main" ] || [ ${array[1]} == "tags" ] || [ ${array[1]} == "pull" ] || [[ $branch =~ ^v[0-9]*.[0-9]*.x$ ]] - then - source env-vars - make build-push-flattener - fi + - uses: benjlevesque/short-sha@v1.2 + id: short-sha + - name: Get branch name + id: branch-name + uses: tj-actions/branch-names@v5.1 + - name: Build and push docker image + env: + DOCKER_TAG: ${{ steps.short-sha.outputs.sha }} + DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }} + run: make build-push-frontend diff --git a/Makefile b/Makefile index 085045e339..83ba66976b 100644 --- a/Makefile +++ b/Makefile @@ -29,8 +29,15 @@ build-push-frontend: @echo "------------------" @echo "--> Building and pushing frontend docker image" @echo "------------------" +ifndef DOCKER_SECOND_TAG @cd $(FRONTEND_DIRECTORY) && \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . +else + @cd $(FRONTEND_DIRECTORY) && \ + docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 . \ + --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \ + --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) +endif # Steps to build and push docker image of query service .PHONY: build-query-service-amd64 build-push-query-service @@ -47,8 +54,15 @@ build-push-query-service: @echo "------------------" @echo "--> Building and pushing query-service docker image" @echo "------------------" +ifndef DOCKER_SECOND_TAG @cd $(QUERY_SERVICE_DIRECTORY) && \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . +else + @cd $(QUERY_SERVICE_DIRECTORY) && \ + docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 . \ + --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \ + --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) +endif # Steps to build and push docker image of flattener .PHONY: build-flattener-amd64 build-push-flattener From e08bf85edfa4f2fc39862232f301498a15d7c124 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Sat, 8 Jan 2022 11:41:13 +0200 Subject: [PATCH 08/81] ci: using --wait helm install flag instead of waiting manually Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 5719f169bd..4a2710c6e7 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -81,21 +81,15 @@ jobs: run: | helm dependency update deploy/kubernetes/platform helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ + --wait --timeout 10m0s \ --set cloud=null \ --set frontend.service.type=LoadBalancer \ --set frontend.image.tag=$TAG \ --set query-service.image.tag=$TAG kubectl describe deploy/signoz-frontend -n platform | grep Image kubectl describe statefulset/signoz-query-service -n platform | grep Image - # comment the next 3 lines if you open an SSH connection for debugging - kubectl -n platform get deploy --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s - kubectl -n platform get statefulset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s - kubectl -n platform get daemonset --output name | xargs -r -n1 -t kubectl -n platform rollout status --timeout=300s kubectl get pods -n platform kubectl get svc -n platform - # Uncomment for debugging using SSH - #- name: Setup upterm session - # uses: lhotari/action-upterm@v1 - name: Kick off a sample-app workload run: | kubectl create ns sample-application From 53045fc58e840df9b8cdc44aa5a21c36945be338 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Tue, 11 Jan 2022 11:12:56 +0200 Subject: [PATCH 09/81] ci: inject local images to k3d instead of publishing them (#600) --- .github/workflows/e2e-k3s.yaml | 74 ++++++---------------------------- 1 file changed, 12 insertions(+), 62 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 4a2710c6e7..402d775d09 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -9,83 +9,33 @@ on: jobs: - image-build-and-push-query-service: + e2e-k3s: runs-on: ubuntu-latest if: ${{ github.event.label.name == 'ok-to-test' }} + env: + DOCKER_TAG: pull-${{ github.event.number }} steps: - name: Checkout code uses: actions/checkout@v2 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - with: - version: latest - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker image - env: - DOCKER_TAG: pull-${{ github.event.number }} - run: make build-push-query-service - - image-build-and-push-frontend: - runs-on: ubuntu-latest - if: ${{ github.event.label.name == 'ok-to-test' }} - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Install dependencies - working-directory: frontend - run: yarn install - - name: Run Prettier - working-directory: frontend - run: npm run prettify - continue-on-error: true - - name: Run ESLint - working-directory: frontend - run: npm run lint - continue-on-error: true - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - with: - version: latest - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push docker image - env: - DOCKER_TAG: pull-${{ github.event.number }} - run: make build-push-frontend - - deploy-on-k3s-cluster: - runs-on: ubuntu-latest - if: ${{ github.event.label.name == 'ok-to-test' }} - needs: - - image-build-and-push-query-service - - image-build-and-push-frontend - steps: - - name: Checkout code - uses: actions/checkout@v2 + - name: Build query-service image + run: make build-query-service-amd64 + - name: Build frontend image + run: make build-frontend-amd64 - name: Create a k3s cluster uses: AbsaOSS/k3d-action@v2 with: - cluster-name: "test-cluster" + cluster-name: "signoz-cluster" + - name: Inject the images to the cluster + run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz-cluster - name: Deploy the app - env: - TAG: pull-${{ github.event.number }} run: | helm dependency update deploy/kubernetes/platform helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ --wait --timeout 10m0s \ --set cloud=null \ --set frontend.service.type=LoadBalancer \ - --set frontend.image.tag=$TAG \ - --set query-service.image.tag=$TAG + --set query-service.image.tag=$DOCKER_TAG \ + --set frontend.image.tag=$DOCKER_TAG kubectl describe deploy/signoz-frontend -n platform | grep Image kubectl describe statefulset/signoz-query-service -n platform | grep Image kubectl get pods -n platform From 0bc82237fc5acdf92f77c6e64ba6cb0cdc57589f Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Wed, 12 Jan 2022 11:27:14 +0200 Subject: [PATCH 10/81] ci: making sure the sample-application is up before running the job (#603) ci: making sure the sample-application is up before running the job * tmp - timeout Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 402d775d09..f765f2988d 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -12,22 +12,28 @@ jobs: e2e-k3s: runs-on: ubuntu-latest if: ${{ github.event.label.name == 'ok-to-test' }} - env: - DOCKER_TAG: pull-${{ github.event.number }} steps: - name: Checkout code uses: actions/checkout@v2 - name: Build query-service image + env: + DOCKER_TAG: pull-${{ github.event.number }} run: make build-query-service-amd64 - name: Build frontend image + env: + DOCKER_TAG: pull-${{ github.event.number }} run: make build-frontend-amd64 - name: Create a k3s cluster uses: AbsaOSS/k3d-action@v2 with: cluster-name: "signoz-cluster" - name: Inject the images to the cluster + env: + DOCKER_TAG: pull-${{ github.event.number }} run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz-cluster - name: Deploy the app + env: + DOCKER_TAG: pull-${{ github.event.number }} run: | helm dependency update deploy/kubernetes/platform helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ @@ -44,17 +50,15 @@ jobs: run: | kubectl create ns sample-application kubectl -n sample-application apply -Rf sample-apps/hotrod/ + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s kubectl -n sample-application run strzal --image=djbingham/curl \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \ 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm - kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - kubectl -n sample-application get statefulset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - kubectl -n sample-application get daemonset --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - name: Start tunnel env: - SUBDOMAIN: pull-${{ github.event.number }}-signoz + DOCKER_TAG: pull-${{ github.event.number }} run: | npm install -g localtunnel host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) - lt -p $port -l $host -s $SUBDOMAIN + lt -p $port -l $host -s "$DOCKER_TAG" From 51dc54bcb9627dbd91dd4b74d34c5119ffc54389 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Wed, 12 Jan 2022 11:45:09 +0200 Subject: [PATCH 11/81] ci: removing the timeout from the rollout command It makes the flow fail for some reason. Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index f765f2988d..b54b08e8d1 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -50,7 +50,7 @@ jobs: run: | kubectl create ns sample-application kubectl -n sample-application apply -Rf sample-apps/hotrod/ - kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status kubectl -n sample-application run strzal --image=djbingham/curl \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \ 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm From 68d68c2b5794ee86a013ae13774ff33d4b7a5866 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sun, 16 Jan 2022 14:55:14 +0530 Subject: [PATCH 12/81] =?UTF-8?q?fix(frontend):=20=F0=9F=93=8C=20=20pin=20?= =?UTF-8?q?mini-css-extract-plugin=20version=20to=202.4.5=20to=20fix=20bre?= =?UTF-8?q?aking=20builds=20(#612)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/package.json b/frontend/package.json index a48595c124..46ced969cf 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -47,7 +47,7 @@ "history": "4.10.1", "html-webpack-plugin": "5.1.0", "jest": "26.6.0", - "mini-css-extract-plugin": "^2.4.5", + "mini-css-extract-plugin": "2.4.5", "monaco-editor": "^0.30.0", "react": "17.0.0", "react-dom": "17.0.0", From 1eb0013352b761998379da0879345a5a857d7ac3 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Mon, 17 Jan 2022 08:10:17 +0200 Subject: [PATCH 13/81] ci: removing file filtering from some workflows (#610) There are other files that can affect the correctness of the code rather than the src files like the deployment yamls, Makefile etc. Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 3 --- .github/workflows/remove-label.yaml | 3 --- 2 files changed, 6 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index b54b08e8d1..2b0072aa87 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -3,9 +3,6 @@ name: e2e-k3s on: pull_request: types: [labeled] - paths: - - 'pkg/query-service/**' - - 'frontend/**' jobs: diff --git a/.github/workflows/remove-label.yaml b/.github/workflows/remove-label.yaml index 427cf9017d..94156a7c48 100644 --- a/.github/workflows/remove-label.yaml +++ b/.github/workflows/remove-label.yaml @@ -3,9 +3,6 @@ name: remove-label on: pull_request: types: [synchronize] - paths: - - 'pkg/query-service/**' - - 'frontend/**' jobs: remove: From e0e4c7afe66a47f24e7ba336e8d60657e2aca032 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Mon, 17 Jan 2022 11:47:31 +0200 Subject: [PATCH 14/81] ci: filtering 'push' workflow to main and release branches (#614) Signed-off-by: Yoni Bettan --- .github/workflows/push.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 288b956d92..76385569e1 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -5,6 +5,10 @@ on: paths: - 'pkg/query-service/**' - 'frontend/**' + branches: + - main + tags: + - ^v[0-9]+.[0-9]+.[0-9]+$ jobs: From b0b235cbc59325bfb69c57ca1340a701676003cc Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Mon, 17 Jan 2022 12:29:27 +0200 Subject: [PATCH 15/81] ci: making some improvements to e2e-k3s workflow (#615) Signed-off-by: Yoni Bettan --- .github/workflows/e2e-k3s.yaml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 2b0072aa87..dcd819a570 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -9,28 +9,22 @@ jobs: e2e-k3s: runs-on: ubuntu-latest if: ${{ github.event.label.name == 'ok-to-test' }} + env: + DOCKER_TAG: pull-${{ github.event.number }} steps: - name: Checkout code uses: actions/checkout@v2 - name: Build query-service image - env: - DOCKER_TAG: pull-${{ github.event.number }} run: make build-query-service-amd64 - name: Build frontend image - env: - DOCKER_TAG: pull-${{ github.event.number }} run: make build-frontend-amd64 - name: Create a k3s cluster uses: AbsaOSS/k3d-action@v2 with: - cluster-name: "signoz-cluster" + cluster-name: "signoz" - name: Inject the images to the cluster - env: - DOCKER_TAG: pull-${{ github.event.number }} - run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz-cluster + run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz - name: Deploy the app - env: - DOCKER_TAG: pull-${{ github.event.number }} run: | helm dependency update deploy/kubernetes/platform helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ @@ -47,15 +41,21 @@ jobs: run: | kubectl create ns sample-application kubectl -n sample-application apply -Rf sample-apps/hotrod/ - kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + #TODO: we can remove that sleep once the sample app readiness prob is fixed. + sleep 300 # if we don't wait the curl command return success but doesn't kick-of the requests kubectl -n sample-application run strzal --image=djbingham/curl \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \ 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm + sleep 600 # give some time for the data to reach signoz + - name: Get commit short sha + uses: benjlevesque/short-sha@v1.2 + id: short-sha - name: Start tunnel env: - DOCKER_TAG: pull-${{ github.event.number }} + COMMIT_SHA: pr-${{ steps.short-sha.outputs.sha }} run: | npm install -g localtunnel host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) - lt -p $port -l $host -s "$DOCKER_TAG" + lt -p $port -l $host -s $COMMIT_SHA From 8b47f4af2126d46a794f29180d843b05e1ae1cd7 Mon Sep 17 00:00:00 2001 From: Yoni Bettan Date: Mon, 17 Jan 2022 12:42:18 +0200 Subject: [PATCH 16/81] ci: adding a dummy push to check if the image push workflow works (#609) Signed-off-by: Yoni Bettan --- pkg/query-service/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index a452c26a9a..15cc9ad05c 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -28,7 +28,7 @@ func main() { defer loggerMgr.Sync() // flushes buffer, if any logger := loggerMgr.Sugar() - logger.Debug("START!") + logger.Debug("STARTING!") serverOptions := &app.ServerOptions{ // HTTPHostPort: v.GetString(app.HTTPHostPort), From cac31072a9338f29553ee8fe4b3f9be3f002b03a Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Mon, 17 Jan 2022 21:35:44 +0530 Subject: [PATCH 17/81] ci: use pull_request_target for remove label permission (#618) --- .github/workflows/remove-label.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/remove-label.yaml b/.github/workflows/remove-label.yaml index 94156a7c48..6881b9eb77 100644 --- a/.github/workflows/remove-label.yaml +++ b/.github/workflows/remove-label.yaml @@ -1,7 +1,7 @@ name: remove-label on: - pull_request: + pull_request_target: types: [synchronize] jobs: @@ -13,5 +13,4 @@ jobs: with: label: ok-to-test type: remove - token: ${{ secrets.REPO_ACCESS_TOKEN }} - + token: ${{ secrets.GITHUB_TOKEN }} From 8467d6a00c1322a2b6af3c474fde9131b8bad14b Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Thu, 20 Jan 2022 00:16:46 +0530 Subject: [PATCH 18/81] chore: Add migration file path in otel collector config (#628) * chore: Add migration file path in otel collector config * Update otel-collector-config.yaml --- deploy/docker/clickhouse-setup/otel-collector-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index eeaf7221d7..0519785309 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -45,6 +45,7 @@ extensions: exporters: clickhouse: datasource: tcp://clickhouse:9000 + migrations: /migrations clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: @@ -64,4 +65,4 @@ service: exporters: [clickhousemetricswrite] metrics/spanmetrics: receivers: [otlp/spanmetrics] - exporters: [prometheus] \ No newline at end of file + exporters: [prometheus] From 50e8f32291225a6bd0344d150b8e220b07c0ca98 Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Thu, 20 Jan 2022 00:22:21 +0530 Subject: [PATCH 19/81] Revert "chore: Add migration file path in otel collector config (#628)" (#629) This reverts commit 8467d6a00c1322a2b6af3c474fde9131b8bad14b. --- deploy/docker/clickhouse-setup/otel-collector-config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index 0519785309..eeaf7221d7 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -45,7 +45,6 @@ extensions: exporters: clickhouse: datasource: tcp://clickhouse:9000 - migrations: /migrations clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: @@ -65,4 +64,4 @@ service: exporters: [clickhousemetricswrite] metrics/spanmetrics: receivers: [otlp/spanmetrics] - exporters: [prometheus] + exporters: [prometheus] \ No newline at end of file From af2399e6271cb6c94ce7b50ba2db088131921f9b Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Fri, 21 Jan 2022 00:31:58 +0530 Subject: [PATCH 20/81] Feat/support error tab page (#626) * build: integrate sql migrations for clickhouse * feat: support error/exception attributes for trace * chore: fixing dependencies for docker go client libs * chore: fixing dependencies for go-migrate * feat: move mirgate run from docker to code * fix: migration file 404 issue * feat: error tab APIs * chore: move migrations file * chore: remove SQL migration (shifted to otel collector) * chore: remove sql migration configs from dockerfile Co-authored-by: Ankit Nayan --- .../clickhouse-setup/docker-compose.yaml | 6 +- .../docker-entrypoint-initdb.d/init-db.sql | 31 -- .../app/clickhouseReader/options.go | 4 + .../app/clickhouseReader/reader.go | 84 +++- pkg/query-service/app/druidReader/reader.go | 12 + pkg/query-service/app/http_handler.go | 48 +++ pkg/query-service/app/interface.go | 4 +- pkg/query-service/app/parser.go | 41 ++ pkg/query-service/app/server.go | 2 +- pkg/query-service/config/dashboards/.gitkeep | 0 pkg/query-service/go.mod | 36 +- pkg/query-service/go.sum | 382 ++++++++++++------ pkg/query-service/main.go | 3 + pkg/query-service/model/queryParams.go | 11 + pkg/query-service/model/response.go | 35 +- 15 files changed, 531 insertions(+), 168 deletions(-) delete mode 100644 deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql create mode 100644 pkg/query-service/config/dashboards/.gitkeep diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index d6637ee5f6..16851112da 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -11,7 +11,6 @@ services: - 8123:8123 volumes: - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - - ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql - ./data/clickhouse/:/var/lib/clickhouse/ healthcheck: @@ -66,7 +65,6 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - otel-collector: image: signoz/otelcontribcol:0.4.2 command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"] @@ -98,6 +96,7 @@ services: depends_on: clickhouse: condition: service_healthy + hotrod: image: jaegertracing/example-hotrod:latest container_name: hotrod @@ -107,7 +106,6 @@ services: environment: - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - load-hotrod: image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" container_name: load-hotrod @@ -123,4 +121,4 @@ services: QUIET_MODE: "${QUIET_MODE:-false}" LOCUST_OPTS: "--headless -u 10 -r 1" volumes: - - ../common/locust-scripts:/locust \ No newline at end of file + - ../common/locust-scripts:/locust diff --git a/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql b/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql deleted file mode 100644 index f71983c083..0000000000 --- a/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql +++ /dev/null @@ -1,31 +0,0 @@ -CREATE TABLE IF NOT EXISTS signoz_index ( - timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), - traceID String CODEC(ZSTD(1)), - spanID String CODEC(ZSTD(1)), - parentSpanID String CODEC(ZSTD(1)), - serviceName LowCardinality(String) CODEC(ZSTD(1)), - name LowCardinality(String) CODEC(ZSTD(1)), - kind Int32 CODEC(ZSTD(1)), - durationNano UInt64 CODEC(ZSTD(1)), - tags Array(String) CODEC(ZSTD(1)), - tagsKeys Array(String) CODEC(ZSTD(1)), - tagsValues Array(String) CODEC(ZSTD(1)), - statusCode Int64 CODEC(ZSTD(1)), - references String CODEC(ZSTD(1)), - externalHttpMethod Nullable(String) CODEC(ZSTD(1)), - externalHttpUrl Nullable(String) CODEC(ZSTD(1)), - component Nullable(String) CODEC(ZSTD(1)), - dbSystem Nullable(String) CODEC(ZSTD(1)), - dbName Nullable(String) CODEC(ZSTD(1)), - dbOperation Nullable(String) CODEC(ZSTD(1)), - peerService Nullable(String) CODEC(ZSTD(1)), - INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4, - INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4, - INDEX idx_name name TYPE bloom_filter GRANULARITY 4, - INDEX idx_kind kind TYPE minmax GRANULARITY 4, - INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_duration durationNano TYPE minmax GRANULARITY 1 -) ENGINE MergeTree() -PARTITION BY toDate(timestamp) -ORDER BY (serviceName, -toUnixTimestamp(timestamp)) \ No newline at end of file diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index abf66c852d..979d0bf147 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -20,6 +20,7 @@ const ( defaultOperationsTable string = "signoz_operations" defaultIndexTable string = "signoz_index" defaultSpansTable string = "signoz_spans" + defaultErrorTable string = "signoz_error_index" defaultArchiveSpansTable string = "signoz_archive_spans" defaultWriteBatchDelay time.Duration = 5 * time.Second defaultWriteBatchSize int = 10000 @@ -45,6 +46,7 @@ type namespaceConfig struct { OperationsTable string IndexTable string SpansTable string + ErrorTable string WriteBatchDelay time.Duration WriteBatchSize int Encoding Encoding @@ -89,6 +91,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s OperationsTable: defaultOperationsTable, IndexTable: defaultIndexTable, SpansTable: defaultSpansTable, + ErrorTable: defaultErrorTable, WriteBatchDelay: defaultWriteBatchDelay, WriteBatchSize: defaultWriteBatchSize, Encoding: defaultEncoding, @@ -105,6 +108,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s OperationsTable: "", IndexTable: "", SpansTable: defaultArchiveSpansTable, + ErrorTable: "", WriteBatchDelay: defaultWriteBatchDelay, WriteBatchSize: defaultWriteBatchSize, Encoding: defaultEncoding, diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 30dd324e6f..2ea149a563 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "crypto/md5" + "database/sql" "encoding/json" "flag" "fmt" @@ -38,9 +39,9 @@ import ( "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/storage/tsdb" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/strutil" - "github.com/prometheus/tsdb" "go.signoz.io/query-service/constants" "go.signoz.io/query-service/model" @@ -72,6 +73,7 @@ type ClickHouseReader struct { localDB *sqlx.DB operationsTable string indexTable string + errorTable string spansTable string queryEngine *promql.Engine remoteStorage *remote.Storage @@ -97,6 +99,7 @@ func NewReader(localDB *sqlx.DB) *ClickHouseReader { operationsTable: options.primary.OperationsTable, indexTable: options.primary.IndexTable, spansTable: options.primary.SpansTable, + errorTable: options.primary.ErrorTable, } } @@ -1580,7 +1583,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[ var searchScanReponses []model.SearchSpanReponseItem - query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references FROM %s WHERE traceID=?", r.indexTable) + query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references, events FROM %s WHERE traceID='%s'", r.indexTable, traceId) err := r.db.Select(&searchScanReponses, query, traceId) @@ -1593,7 +1596,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[ searchSpansResult := []model.SearchSpansResult{ model.SearchSpansResult{ - Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References"}, + Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events"}, Events: make([][]interface{}, len(searchScanReponses)), }, } @@ -1877,3 +1880,78 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa return &model.GetTTLResponseItem{TracesTime: parseTTL(db1.EngineFull), MetricsTime: parseTTL(db2.EngineFull)}, nil } + +func (r *ClickHouseReader) GetErrors(ctx context.Context, queryParams *model.GetErrorsParams) (*[]model.Error, *model.ApiError) { + + var getErrorReponses []model.Error + + query := fmt.Sprintf("SELECT exceptionType, exceptionMessage, count() AS exceptionCount, min(timestamp) as firstSeen, max(timestamp) as lastSeen, serviceName FROM %s WHERE timestamp >= ? AND timestamp <= ? GROUP BY serviceName, exceptionType, exceptionMessage", r.errorTable) + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} + + err := r.db.Select(&getErrorReponses, query, args...) + + zap.S().Info(query) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + return &getErrorReponses, nil + +} + +func (r *ClickHouseReader) GetErrorForId(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { + + if queryParams.ErrorID == "" { + zap.S().Debug("errorId missing from params") + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("ErrorID missing from params")} + } + var getErrorWithSpanReponse model.ErrorWithSpan + + query := fmt.Sprintf("SELECT spanID, traceID, errorID, timestamp, serviceName, exceptionType, exceptionMessage, excepionStacktrace, exceptionEscaped, olderErrorId, newerErrorId FROM (SELECT *, lagInFrame(errorID) over w as olderErrorId, leadInFrame(errorID) over w as newerErrorId FROM %s window w as (ORDER BY exceptionType, serviceName, timestamp rows between unbounded preceding and unbounded following)) WHERE errorID = ?", r.errorTable) + args := []interface{}{queryParams.ErrorID} + + err := r.db.Get(&getErrorWithSpanReponse, query, args...) + + zap.S().Info(query) + + if err == sql.ErrNoRows { + return nil, nil + } + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + return &getErrorWithSpanReponse, nil + +} + +func (r *ClickHouseReader) GetErrorForType(ctx context.Context, queryParams *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { + + if queryParams.ErrorType == "" || queryParams.ServiceName == "" { + zap.S().Debug("errorType/serviceName missing from params") + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("ErrorType/serviceName missing from params")} + } + var getErrorWithSpanReponse model.ErrorWithSpan + + query := fmt.Sprintf("SELECT spanID, traceID, errorID, timestamp , serviceName, exceptionType, exceptionMessage, excepionStacktrace, exceptionEscaped, newerErrorId, olderErrorId FROM (SELECT *, lagInFrame(errorID) over w as olderErrorId, leadInFrame(errorID) over w as newerErrorId FROM %s WHERE serviceName = ? AND exceptionType = ? window w as (ORDER BY timestamp rows between unbounded preceding and unbounded following)) limit 1", r.errorTable) + args := []interface{}{queryParams.ServiceName, queryParams.ErrorType} + + err := r.db.Get(&getErrorWithSpanReponse, query, args...) + + zap.S().Info(query) + + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + return &getErrorWithSpanReponse, nil + +} diff --git a/pkg/query-service/app/druidReader/reader.go b/pkg/query-service/app/druidReader/reader.go index dc018fc430..db6bac273d 100644 --- a/pkg/query-service/app/druidReader/reader.go +++ b/pkg/query-service/app/druidReader/reader.go @@ -164,3 +164,15 @@ func (druid *DruidReader) SetTTL(_ context.Context, _ *model.TTLParams) (*model. func (druid *DruidReader) GetTTL(_ context.Context, _ *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) { return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support setting ttl configuration")} } + +func (druid *DruidReader) GetErrors(_ context.Context, _ *model.GetErrorsParams) (*[]model.Error, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support get error API")} +} + +func (druid *DruidReader) GetErrorForId(_ context.Context, _ *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support get error API")} +} + +func (druid *DruidReader) GetErrorForType(_ context.Context, _ *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support get error API")} +} diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 0f4438739a..f190def948 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -203,6 +203,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) { router.HandleFunc("/api/v1/serviceMapDependencies", aH.serviceMapDependencies).Methods(http.MethodGet) router.HandleFunc("/api/v1/settings/ttl", aH.setTTL).Methods(http.MethodPost) router.HandleFunc("/api/v1/settings/ttl", aH.getTTL).Methods(http.MethodGet) + router.HandleFunc("/api/v1/errors", aH.getErrors).Methods(http.MethodGet) + router.HandleFunc("/api/v1/errorWithId", aH.getErrorForId).Methods(http.MethodGet) + router.HandleFunc("/api/v1/errorWithType", aH.getErrorForType).Methods(http.MethodGet) } func Intersection(a, b []int) (c []int) { @@ -885,6 +888,51 @@ func (aH *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) { } +func (aH *APIHandler) getErrors(w http.ResponseWriter, r *http.Request) { + + query, err := parseErrorsRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + result, apiErr := (*aH.reader).GetErrors(context.Background(), query) + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) + +} + +func (aH *APIHandler) getErrorForId(w http.ResponseWriter, r *http.Request) { + + query, err := parseErrorRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + result, apiErr := (*aH.reader).GetErrorForId(context.Background(), query) + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) + +} + +func (aH *APIHandler) getErrorForType(w http.ResponseWriter, r *http.Request) { + + query, err := parseErrorRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + result, apiErr := (*aH.reader).GetErrorForType(context.Background(), query) + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) + +} + func (aH *APIHandler) searchSpansAggregates(w http.ResponseWriter, r *http.Request) { query, err := parseSearchSpanAggregatesRequest(r) diff --git a/pkg/query-service/app/interface.go b/pkg/query-service/app/interface.go index 75483b1b43..01cebe5173 100644 --- a/pkg/query-service/app/interface.go +++ b/pkg/query-service/app/interface.go @@ -37,7 +37,9 @@ type Reader interface { GetServicesList(ctx context.Context) (*[]string, error) GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) GetTTL(ctx context.Context, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) - + GetErrors(ctx context.Context, params *model.GetErrorsParams) (*[]model.Error, *model.ApiError) + GetErrorForId(ctx context.Context, params *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) + GetErrorForType(ctx context.Context, params *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) // Search Interfaces SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) SearchSpans(ctx context.Context, query *model.SpanSearchParams) (*[]model.SearchSpansResult, error) diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 2cf98542dd..864b5e9b29 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -480,6 +480,47 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) { return params, nil } +func parseErrorRequest(r *http.Request) (*model.GetErrorParams, error) { + + params := &model.GetErrorParams{} + + serviceName := r.URL.Query().Get("serviceName") + if len(serviceName) != 0 { + params.ServiceName = serviceName + } + + errorType := r.URL.Query().Get("errorType") + if len(errorType) != 0 { + params.ErrorType = errorType + } + + errorId := r.URL.Query().Get("errorId") + if len(errorId) != 0 { + params.ErrorID = errorId + } + + return params, nil +} + +func parseErrorsRequest(r *http.Request) (*model.GetErrorsParams, error) { + + startTime, err := parseTime("start", r) + if err != nil { + return nil, err + } + endTime, err := parseTimeMinusBuffer("end", r) + if err != nil { + return nil, err + } + + params := &model.GetErrorsParams{ + Start: startTime, + End: endTime, + } + + return params, nil +} + func parseTags(param string, r *http.Request) (*[]model.TagQuery, error) { tags := new([]model.TagQuery) diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 8e4088d8d0..8e2b42b28a 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -94,7 +94,7 @@ func createHTTPServer() (*http.Server, error) { posthogClient = posthog.New("H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w") distinctId = uuid.New().String() - localDB, err := dashboards.InitDB("/var/lib/signoz/signoz.db") + localDB, err := dashboards.InitDB("./signoz.db") if err != nil { return nil, err } diff --git a/pkg/query-service/config/dashboards/.gitkeep b/pkg/query-service/config/dashboards/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/query-service/go.mod b/pkg/query-service/go.mod index f069568f19..023f231dbe 100644 --- a/pkg/query-service/go.mod +++ b/pkg/query-service/go.mod @@ -3,17 +3,20 @@ module go.signoz.io/query-service go 1.14 require ( + cloud.google.com/go v0.88.0 // indirect github.com/ClickHouse/clickhouse-go v1.4.5 + github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/aws/aws-sdk-go v1.27.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/containerd/containerd v1.4.12 // indirect + github.com/dhui/dktest v0.3.4 // indirect + github.com/docker/docker v20.10.12+incompatible github.com/frankban/quicktest v1.13.0 // indirect - github.com/go-kit/kit v0.9.0 // indirect github.com/go-kit/log v0.1.0 - github.com/gogo/protobuf v1.2.1 // indirect - github.com/google/uuid v1.1.2 + github.com/golang-migrate/migrate/v4 v4.14.1 + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.3.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/gosimple/slug v1.10.0 @@ -21,21 +24,23 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.0.0 // indirect github.com/hashicorp/go-msgpack v1.1.5 // indirect - github.com/hashicorp/go-multierror v1.0.0 // indirect github.com/hashicorp/go-sockaddr v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/memberlist v0.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmoiron/sqlx v1.3.4 github.com/json-iterator/go v1.1.10 github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.0 // indirect github.com/mattn/go-sqlite3 v1.14.8 - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/oklog/oklog v0.3.2 github.com/oklog/run v1.1.0 // indirect - github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/gomega v1.14.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pascaldekloe/goe v0.1.0 // indirect github.com/pierrec/lz4 v2.4.1+incompatible // indirect @@ -45,20 +50,25 @@ require ( github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 github.com/prometheus/procfs v0.0.8 // indirect github.com/prometheus/prometheus v2.5.0+incompatible - github.com/prometheus/tsdb v0.0.0-20181003080831-0ce41118ed20 github.com/rs/cors v1.7.0 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/smartystreets/goconvey v1.6.4 github.com/soheilhy/cmux v0.1.4 github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect go.uber.org/zap v1.16.0 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + golang.org/x/net v0.0.0-20211013171255-e13a2654a71e // indirect + golang.org/x/sys v0.0.0-20211013075003-97ac67df715c // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/api v0.51.0 // indirect + google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4 // indirect + google.golang.org/grpc v1.41.0 // indirect google.golang.org/grpc/examples v0.0.0-20210803221256-6ba56c814be7 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gotest.tools/v3 v3.0.3 // indirect ) diff --git a/pkg/query-service/go.sum b/pkg/query-service/go.sum index 34097b7357..7daf49653f 100644 --- a/pkg/query-service/go.sum +++ b/pkg/query-service/go.sum @@ -12,6 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.63.0/go.mod h1:GmezbQc7T2snqkEXWfZ0sy0VfkB/ivI2DdtJL2DEmlg= +cloud.google.com/go v0.64.0/go.mod h1:xfORb36jGvE+6EexW71nMEtL025s3x6xvuYUKM4JLv4= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= @@ -20,123 +22,129 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.88.0 h1:MZ2cf9Elnv1wqccq8ooKO2MqHQLc+ChCp/+QWObCpxg= +cloud.google.com/go v0.88.0/go.mod h1:dnKwfYbP9hQhefiUvpbcAyoGSHUrOxR20JVElLiUvEY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.9.0/go.mod h1:xvlEn0NZ5v1iJPYsBnUVRDNvccDxsBTEi16pJRKQVws= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v5.0.0-beta.0.20161028183111-bd73d950fa44+incompatible h1:+5hx+ZckahrubYyxbjTwnq9w5xpnq1CwSL4N54I8/qc= github.com/Azure/azure-sdk-for-go v5.0.0-beta.0.20161028183111-bd73d950fa44+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go v1.4.5 h1:FfhyEnv6/BaWldyjgT2k4gDDmeNwJ9C4NbY/MXxJlXk= github.com/ClickHouse/clickhouse-go v1.4.5/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/SigNoz/prometheus v1.9.4 h1:HqMPTM/QXgsjzuG5whSS+9l0mVePuya/uZmrAt2H9HQ= -github.com/SigNoz/prometheus v1.9.4/go.mod h1:39/E+7N2hh5Q6NunhoRz9EHCRAyRGj63YK1hE4SiHdk= -github.com/SigNoz/prometheus v1.9.5 h1:ITuK/71BFY3Hxv50/Upsvo6KJUdOxT7o6lCXmVxyLF0= -github.com/SigNoz/prometheus v1.9.5/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.6 h1:G1QYQkwCzTZG961a16r2z/GPK4Esx6PMHa1GF4sVeuo= -github.com/SigNoz/prometheus v1.9.6/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.62 h1:YaemiWBVbaALzgjDHz4qtd6RlJuP2TabLdJZOORWJ/U= -github.com/SigNoz/prometheus v1.9.62/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.63 h1:K/uQMltbLYZEgUGSYxVMK3HEUzPe3VmT2oH91Omnkmo= -github.com/SigNoz/prometheus v1.9.63/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.64 h1:oPB53pbWncTUOAbQxQDq/f8Zz6Ffe6iZSicbsjXKxPQ= -github.com/SigNoz/prometheus v1.9.64/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.65 h1:mbbFs055F3bapnvbreyMLtvMeNRWGH0zTzgBlGVhYJw= -github.com/SigNoz/prometheus v1.9.65/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.66 h1:gmS/wdj+sRQa+PJCRzZQROUqbgC6GWtelSwWJ/iMZhA= -github.com/SigNoz/prometheus v1.9.66/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/SigNoz/prometheus v1.9.67 h1:JYQhs/Y0lwLoiExYzSEa0wQYYe0C96Y4ZPiWaC9MaMU= -github.com/SigNoz/prometheus v1.9.67/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/SigNoz/prometheus v1.9.68 h1:YeouJ2bLUksLtOPVutKFWLVgLDI0CRldosihPZtFCJY= github.com/SigNoz/prometheus v1.9.68/go.mod h1:BHEawFYBYkVr9BjPXsz9Ye6QVVQ+2a99m6r/S8hO/lA= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/aws/aws-sdk-go v1.13.44-0.20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v0.0.0-20161118035902-4a94f899c20b/go.mod h1:fX/lfQBkSCDXZSUgv6jVIu/EVA3/JNseAX5asI4c4T4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 h1:dzj1/xcivGjNPwwifh/dWTczkwcuqsXXFHY1X/TZMtw= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= -github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f h1:0FHGBrsIyDci8tF7zujQkHdMTJdCTSIV9esrni2fKQI= github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= +github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.12 h1:V+SHzYmhng/iju6M5nFrpTTusrhidoxKTwdwLw+u4c4= +github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.0.1-0.20161101193935-9ed569b5d1ac+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bits v0.0.0-20160601073636-2ad8d707cc05 h1:XXlpYVyW5p4t3IvIv1jegXhqjgFMHnDwZ4AKvmwOL7I= github.com/dgryski/go-bits v0.0.0-20160601073636-2ad8d707cc05/go.mod h1:/9UYwwvZuEgp+mQ4960SHWCU1FS+FgdFX+m5ExFByNs= +github.com/dhui/dktest v0.3.3/go.mod h1:EML9sP4sqJELHn4jV7B0TY8oF6077nk83/tz7M56jcQ= +github.com/dhui/dktest v0.3.4 h1:VbUEcaSP+U2/yUr9d2JhSThXYEnDlGabRSHe2rIE46E= +github.com/dhui/dktest v0.3.4/go.mod h1:4m4n6lmXlmVfESth7mzdcv8nBI5mOb5UROPqjM02csU= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -145,18 +153,15 @@ github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.21.1 h1:+QXUYsI7Tfxc64oD6R5BxU/Aq+UwGkyjH4W/hMNG7bg= github.com/go-ini/ini v1.21.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.4.1-0.20170517165212-6964666de57c h1:lGtNy7NU/+ytYPPneoErOaNrYkF5DOVCYViUK/7t7XA= github.com/go-kit/kit v0.4.1-0.20170517165212-6964666de57c/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -168,11 +173,16 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-stack/stack v1.5.4/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.14.1 h1:qmRd/rNGjM1r3Ve5gHd5ZplytrD02UcItYNxJ3iUHHE= +github.com/golang-migrate/migrate/v4 v4.14.1/go.mod h1:l7Ks0Au6fYHuUIxUhQ0rcVX1uLlJg54C/VvW7tvxSz0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20210429001901-424d2337a529 h1:2voWjNECnrZRbfwXxHB1/j8wa6xdKn85B5NzgVL/pTU= github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -188,8 +198,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -209,12 +219,16 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -228,14 +242,16 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -250,12 +266,13 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -265,8 +282,13 @@ github.com/gophercloud/gophercloud v0.0.0-20170607034829-caf34a65f602 h1:Acc1d6m github.com/gophercloud/gophercloud v0.0.0-20170607034829-caf34a65f602/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gosimple/slug v1.10.0 h1:3XbiQua1IpCdrvuntWvGBxVm+K99wCSxJjlxkP49GGQ= @@ -278,6 +300,7 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul v1.1.1-0.20180615161029-bed22a81e9fd h1:u6o+bd6FHxDKoCSa8PJ5vrHhAYSKgJtAHQtLO1EYgos= github.com/hashicorp/consul v1.1.1-0.20180615161029-bed22a81e9fd/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -289,8 +312,8 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= @@ -306,16 +329,40 @@ github.com/hashicorp/memberlist v0.1.0 h1:qSsCiC0WYD39lbSitKNt40e30uorm2Ss/d4JGU github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= github.com/hashicorp/serf v0.8.1-0.20161007004122-1d4fa605f6ff h1:epPiU3hEuHbpThFTQSGbdBBJemXM7aNQIU1thmpucTU= github.com/hashicorp/serf v0.8.1-0.20161007004122-1d4fa605f6ff/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/influxdata/influxdb v1.2.3-0.20170331210902-15e594fc09f1 h1:GUipvZTEOj/jmmk0RcCFL0JKMPuTiwivshNNc2cx5dk= github.com/influxdata/influxdb v1.2.3-0.20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.3.2/go.mod h1:LvCquS3HbBKwgl7KbX9KyqEIumJAbm1UMcTvGaIf3bM= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= @@ -323,71 +370,96 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20150905172533-109e267447e9 h1:9BJzO5IK9VOH5HV48q1+pV+V5wiUA30N+gQAsA6HGtY= github.com/julienschmidt/httprouter v1.1.1-0.20150905172533-109e267447e9/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU= github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v0.0.0-20150406173934-fc2b8d3a73c4/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.4 h1:Ec3LTJwwzqT1++63P12fhtdEbQhtPE7TBdD6rlhqrMM= github.com/miekg/dns v1.0.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9 h1:Y94YB7jrsihrbGSqRNMwRWJ2/dCxr0hdC2oPRohkx0A= github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.2.3-0.20170918173356-f857583a70c3 h1:2RCgCV8V4tpTv/J6PSPXGYsuYwaKVKlN3vv45d2JFHI= github.com/oklog/oklog v0.2.3-0.20170918173356-f857583a70c3/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/oklog v0.3.2 h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v0.3.1-0.20170117200651-66bb6560562f h1:tt7Qj+4Pic1KiUqT7XNMnbAE3TLJAGH+5LMuX4roYbE= github.com/oklog/ulid v0.3.1-0.20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7 h1:8KbikWulLUcMM96hBxjgoo6gTmCkG6HYSDohv/WygYU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/opentracing-go v1.0.1/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -396,11 +468,11 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/peterbourgon/diskv v2.0.2-0.20180312054125-0646ccaebea1+incompatible h1:FhnA4iH8T/yYW+AolPONZjGE897wxj3MAzfEbrZkSYw= github.com/peterbourgon/diskv v2.0.2-0.20180312054125-0646ccaebea1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.1-0.20161029093637-248dadf4e906/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -420,53 +492,69 @@ github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLk github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.0.0-20181003080831-0ce41118ed20 h1:Jh/eKJuru9z9u3rUGdQ8gYc3aZmCGkjXT3gmy0Ex8W8= github.com/prometheus/tsdb v0.0.0-20181003080831-0ce41118ed20/go.mod h1:lFf/o1J2a31WmWQbxYXfY1azJK5Xp5D8hwKMnVMBTGU= -github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04 h1:y0cMJ0qjii33BnD6tMGcF/+gHYsoKQ6tbwQpy233OII= github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= +github.com/snowflakedb/gosnowflake v1.3.5/go.mod h1:13Ky+lxzIm3VqNDZJdyvu9MCGy+WgRdYFdXp96UcLZU= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -475,22 +563,32 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -500,10 +598,8 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -519,7 +615,6 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -531,11 +626,14 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -544,6 +642,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -558,8 +657,11 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -568,9 +670,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211013171255-e13a2654a71e h1:Xj+JO91noE97IN6F/7WZxzC5QE6yENAQPrwIYhW3bsA= +golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -596,17 +701,24 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -626,13 +738,16 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201029080932-201ba4db2418/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -642,9 +757,11 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -654,15 +771,17 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -671,12 +790,15 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -703,8 +825,13 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200806022845-90696ccdc692/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200814230902-9882f1d1823d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200818005847-188abfa75333/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -712,6 +839,7 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -719,6 +847,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -751,7 +881,9 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0 h1:SQaA2Cx57B+iPw2MBgyjEkoeMkRK2IenSGoia0U3lCk= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -759,7 +891,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20160622021550-0a83eba2cadb h1:IAkbDsVCpt6+HIt4uWPISM5JflWvw6fMk+A1S8aZLxc= google.golang.org/cloud v0.0.0-20160622021550-0a83eba2cadb/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -791,12 +922,16 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -808,8 +943,10 @@ google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210721163202-f1cecdd8b78a/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4 h1:NBxB1XxiWpGqkPUiJ9PoBXkHV5A9+GohMOA+EmWoPbU= +google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -823,6 +960,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= @@ -832,9 +970,10 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20210803221256-6ba56c814be7 h1:k3XsiLoPLXhNlZJy1sHKvkgGEfpMk8bsdJVHKKhTdrc= google.golang.org/grpc/examples v0.0.0-20210803221256-6ba56c814be7/go.mod h1:bF8wuZSAZTcbF7ZPKrDI/qY52toTP/yxLpRRY4Eu9Js= @@ -852,18 +991,17 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.5 h1:qskSCq465uEvC3oGocwvZNsO3RF3SpLVLumOAhL0bXo= gopkg.in/alecthomas/kingpin.v2 v2.2.5/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.3.0/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -871,11 +1009,18 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -892,9 +1037,18 @@ k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7Hfitm k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a h1:tHgpQvrWaYfrnC8G4N0Oszw5HHCsZxKilDi2R7HuCSM= k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index 15cc9ad05c..b8e614868e 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -5,6 +5,9 @@ import ( "os/signal" "syscall" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/github" + "go.signoz.io/query-service/app" "go.signoz.io/query-service/constants" diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index bacb8b328a..9b22a1a0fc 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -120,3 +120,14 @@ type GetTTLParams struct { Type string GetAllTTL bool } + +type GetErrorsParams struct { + Start *time.Time + End *time.Time +} + +type GetErrorParams struct { + ErrorType string + ErrorID string + ServiceName string +} diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index db6b19c6fb..d6f6e1fcc7 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -161,6 +161,7 @@ type SearchSpanReponseItem struct { DurationNano int64 `db:"durationNano"` TagsKeys []string `db:"tagsKeys"` TagsValues []string `db:"tagsValues"` + Events []string `db:"events"` } type OtelSpanRef struct { @@ -186,8 +187,17 @@ func (item *SearchSpanReponseItem) GetValues() []interface{} { for _, item := range references { referencesStringArray = append(referencesStringArray, item.toString()) } + var errorEvent map[string]interface{} + for _, e := range item.Events { + json.Unmarshal([]byte(e), &errorEvent) + if errorEvent["name"] == "exception" { + break + } else { + errorEvent = nil + } + } - returnArray := []interface{}{int64(timeObj.UnixNano() / 1000000), item.SpanID, item.TraceID, item.ServiceName, item.Name, strconv.Itoa(int(item.Kind)), strconv.FormatInt(item.DurationNano, 10), item.TagsKeys, item.TagsValues, referencesStringArray} + returnArray := []interface{}{int64(timeObj.UnixNano() / 1000000), item.SpanID, item.TraceID, item.ServiceName, item.Name, strconv.Itoa(int(item.Kind)), strconv.FormatInt(item.DurationNano, 10), item.TagsKeys, item.TagsValues, referencesStringArray, errorEvent} return returnArray } @@ -261,3 +271,26 @@ type GetTTLResponseItem struct { MetricsTime int `json:"metrics_ttl_duration_hrs"` TracesTime int `json:"traces_ttl_duration_hrs"` } + +type Error struct { + ExceptionType string `json:"exceptionType" db:"exceptionType"` + ExceptionMsg string `json:"exceptionMessage" db:"exceptionMessage"` + ExceptionCount int64 `json:"exceptionCount" db:"exceptionCount"` + LastSeen time.Time `json:"lastSeen" db:"lastSeen"` + FirstSeen time.Time `json:"firstSeen" db:"firstSeen"` + ServiceName string `json:"serviceName" db:"serviceName"` +} + +type ErrorWithSpan struct { + ErrorID string `json:"errorId" db:"errorID"` + ExceptionType string `json:"exceptionType" db:"exceptionType"` + ExcepionStacktrace string `json:"excepionStacktrace" db:"excepionStacktrace"` + ExceptionEscaped string `json:"exceptionEscaped" db:"exceptionEscaped"` + ExceptionMsg string `json:"exceptionMessage" db:"exceptionMessage"` + Timestamp time.Time `json:"timestamp" db:"timestamp"` + SpanID string `json:"spanID" db:"spanID"` + TraceID string `json:"traceID" db:"traceID"` + ServiceName string `json:"serviceName" db:"serviceName"` + NewerErrorID string `json:"newerErrorId" db:"newerErrorId"` + OlderErrorID string `json:"olderErrorId" db:"olderErrorId"` +} From cbbd3ce6ad62cdada906d323769d95ad7751635d Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sat, 22 Jan 2022 16:08:19 +0530 Subject: [PATCH 21/81] chore: add codeowners for automatic review request (#633) Signed-off-by: Prashant Shahi --- .github/CODEOWNERS | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..c37e2f3284 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,6 @@ +# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners +# Owners are automatically requested for review for PRs that changes code +# that they own. +* @ankitnayan +/frontend/ @palash-signoz +/deploy/ @prashant-shahi \ No newline at end of file From 15680757695a67ac9005b3b6e887b92d666c0e38 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Sat, 22 Jan 2022 23:16:09 +0530 Subject: [PATCH 22/81] chore: update clean signoz command in install script (#631) --- deploy/install.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deploy/install.sh b/deploy/install.sh index 132d250d80..5e52ebb3c0 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -496,7 +496,11 @@ else echo "" if [ $setup_type == 'clickhouse' ]; then - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v" + if is_arm64; then + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v" + else + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml down -v" + fi else echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v" fi From 0e6a1082dcceb65cdbb0d17457f2131247ce351d Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Sun, 23 Jan 2022 14:53:44 +0530 Subject: [PATCH 23/81] fix: init-db.sql restored --- .../clickhouse-setup/docker-compose.yaml | 1 + .../docker-entrypoint-initdb.d/init-db.sql | 31 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 16851112da..2a9d3db67a 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -11,6 +11,7 @@ services: - 8123:8123 volumes: - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml + - ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql - ./data/clickhouse/:/var/lib/clickhouse/ healthcheck: diff --git a/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql b/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql new file mode 100644 index 0000000000..f71983c083 --- /dev/null +++ b/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql @@ -0,0 +1,31 @@ +CREATE TABLE IF NOT EXISTS signoz_index ( + timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), + traceID String CODEC(ZSTD(1)), + spanID String CODEC(ZSTD(1)), + parentSpanID String CODEC(ZSTD(1)), + serviceName LowCardinality(String) CODEC(ZSTD(1)), + name LowCardinality(String) CODEC(ZSTD(1)), + kind Int32 CODEC(ZSTD(1)), + durationNano UInt64 CODEC(ZSTD(1)), + tags Array(String) CODEC(ZSTD(1)), + tagsKeys Array(String) CODEC(ZSTD(1)), + tagsValues Array(String) CODEC(ZSTD(1)), + statusCode Int64 CODEC(ZSTD(1)), + references String CODEC(ZSTD(1)), + externalHttpMethod Nullable(String) CODEC(ZSTD(1)), + externalHttpUrl Nullable(String) CODEC(ZSTD(1)), + component Nullable(String) CODEC(ZSTD(1)), + dbSystem Nullable(String) CODEC(ZSTD(1)), + dbName Nullable(String) CODEC(ZSTD(1)), + dbOperation Nullable(String) CODEC(ZSTD(1)), + peerService Nullable(String) CODEC(ZSTD(1)), + INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4, + INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4, + INDEX idx_name name TYPE bloom_filter GRANULARITY 4, + INDEX idx_kind kind TYPE minmax GRANULARITY 4, + INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64, + INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64, + INDEX idx_duration durationNano TYPE minmax GRANULARITY 1 +) ENGINE MergeTree() +PARTITION BY toDate(timestamp) +ORDER BY (serviceName, -toUnixTimestamp(timestamp)) \ No newline at end of file From 2f665fcc63e4e785165880e85dc938e56a968d1c Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Sun, 23 Jan 2022 14:57:18 +0530 Subject: [PATCH 24/81] chore: added version in clickhouse tag --- deploy/docker/clickhouse-setup/env/x86_64.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/docker/clickhouse-setup/env/x86_64.env b/deploy/docker/clickhouse-setup/env/x86_64.env index fdb9d247c8..d2abdd17e8 100644 --- a/deploy/docker/clickhouse-setup/env/x86_64.env +++ b/deploy/docker/clickhouse-setup/env/x86_64.env @@ -1 +1 @@ -clickhouse_image=yandex/clickhouse-server \ No newline at end of file +clickhouse_image=yandex/clickhouse-server:21.10 \ No newline at end of file From 8059fe14da0a4b876f442e111f698898e32327c7 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 25 Jan 2022 11:33:08 +0530 Subject: [PATCH 25/81] =?UTF-8?q?chore:=20=F0=9F=94=A7=20Add=20behaviorbot?= =?UTF-8?q?=20config=20YAML=20(#640)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- .github/config.yaml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/config.yaml diff --git a/.github/config.yaml b/.github/config.yaml new file mode 100644 index 0000000000..9105934e67 --- /dev/null +++ b/.github/config.yaml @@ -0,0 +1,29 @@ +# Configuration for welcome - https://github.com/behaviorbot/welcome + +# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome +# Comment to be posted to on first time issues +newIssueWelcomeComment: > + Thanks for opening this issue. A team member should give feedback soon. + In the meantime, feel free to check out the [contributing guidelines](https://github.com/signoz/signoz/blob/main/CONTRIBUTING.md). + + +# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome +# Comment to be posted to on PRs from first time contributors in your repository +newPRWelcomeComment: > + Welcome to the SigNoz community! Thank you for your first pull request and making this project better. 🤗 + + +# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge +# Comment to be posted to on pull requests merged by a first time user +firstPRMergeComment: > + Congrats on merging your first pull request! + + ![minion-party](https://i.imgur.com/Xlg59lP.gif) + + We here at SigNoz are proud of you! 🥳 + + +# Configuration for request-info - https://github.com/behaviorbot/request-info +# Comment to be posted in issues or pull requests, when no description is provided. +requestInfoReplyComment: > + We would appreciate it if you could provide us with more info about this issue/pr! From 40f9a4a5aaad06c19f075962137fc9a430f5e19d Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 25 Jan 2022 12:29:55 +0530 Subject: [PATCH 26/81] =?UTF-8?q?chore:=20=E2=99=BB=EF=B8=8F=20single=20ma?= =?UTF-8?q?nifest=20file=20for=20the=20hotrod=20(#639)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- sample-apps/hotrod/deployment.yaml | 29 --- sample-apps/hotrod/hotrod-load/locust-cm.yaml | 7 - .../hotrod/hotrod-load/master-deployment.yaml | 59 ----- .../hotrod/hotrod-load/scripts-cm.yaml | 23 -- sample-apps/hotrod/hotrod-load/service.yaml | 17 -- .../hotrod/hotrod-load/slave-deployment.yaml | 52 ---- sample-apps/hotrod/hotrod.yaml | 223 ++++++++++++++++++ sample-apps/hotrod/service.yaml | 13 - 8 files changed, 223 insertions(+), 200 deletions(-) delete mode 100644 sample-apps/hotrod/deployment.yaml delete mode 100644 sample-apps/hotrod/hotrod-load/locust-cm.yaml delete mode 100644 sample-apps/hotrod/hotrod-load/master-deployment.yaml delete mode 100644 sample-apps/hotrod/hotrod-load/scripts-cm.yaml delete mode 100644 sample-apps/hotrod/hotrod-load/service.yaml delete mode 100644 sample-apps/hotrod/hotrod-load/slave-deployment.yaml create mode 100644 sample-apps/hotrod/hotrod.yaml delete mode 100644 sample-apps/hotrod/service.yaml diff --git a/sample-apps/hotrod/deployment.yaml b/sample-apps/hotrod/deployment.yaml deleted file mode 100644 index 35641c9ca4..0000000000 --- a/sample-apps/hotrod/deployment.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - service: hotrod - name: hotrod -spec: - replicas: 1 - selector: - matchLabels: - service: hotrod - strategy: {} - template: - metadata: - labels: - service: hotrod - spec: - containers: - - args: - - all - env: - - name: JAEGER_ENDPOINT - value: http://otel-collector.platform.svc.cluster.local:14268/api/traces - image: jaegertracing/example-hotrod:latest - imagePullPolicy: IfNotPresent - name: hotrod - ports: - - containerPort: 8080 - restartPolicy: Always diff --git a/sample-apps/hotrod/hotrod-load/locust-cm.yaml b/sample-apps/hotrod/hotrod-load/locust-cm.yaml deleted file mode 100644 index a1f56523a9..0000000000 --- a/sample-apps/hotrod/hotrod-load/locust-cm.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: locust-cm -data: - # ATTACKED_HOST: http://locust-master:8089 - ATTACKED_HOST: http://hotrod:8080 \ No newline at end of file diff --git a/sample-apps/hotrod/hotrod-load/master-deployment.yaml b/sample-apps/hotrod/hotrod-load/master-deployment.yaml deleted file mode 100644 index 9702ed4d71..0000000000 --- a/sample-apps/hotrod/hotrod-load/master-deployment.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - deployment.kubernetes.io/revision: "1" - labels: - role: locust-master - name: locust-master -spec: - replicas: 1 - selector: - matchLabels: - role: locust-master - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - role: locust-master - spec: - containers: - - image: grubykarol/locust:0.8.1-py3.6 - imagePullPolicy: IfNotPresent - name: locust-master - env: - - name: ATTACKED_HOST - valueFrom: - configMapKeyRef: - name: locust-cm - key: ATTACKED_HOST - - name: LOCUST_MODE - value: MASTER - - name: LOCUST_OPTS - value: --print-stats - volumeMounts: - - mountPath: /locust - name: locust-scripts - ports: - - containerPort: 5557 - name: comm - - containerPort: 5558 - name: comm-plus-1 - - containerPort: 8089 - name: web-ui - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - volumes: - - name: locust-scripts - configMap: - name: scripts-cm diff --git a/sample-apps/hotrod/hotrod-load/scripts-cm.yaml b/sample-apps/hotrod/hotrod-load/scripts-cm.yaml deleted file mode 100644 index b8b114d5f1..0000000000 --- a/sample-apps/hotrod/hotrod-load/scripts-cm.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: scripts-cm -data: - locustfile.py: | - from locust import HttpLocust, TaskSet, task - class UserTasks(TaskSet): - @task - def rachel(self): - self.client.get("/dispatch?customer=123&nonse=0.6308392664170006") - @task - def trom(self): - self.client.get("/dispatch?customer=392&nonse=0.015296363321630757") - @task - def japanese(self): - self.client.get("/dispatch?customer=731&nonse=0.8022286220408668") - @task - def coffee(self): - self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593") - class WebsiteUser(HttpLocust): - task_set = UserTasks - diff --git a/sample-apps/hotrod/hotrod-load/service.yaml b/sample-apps/hotrod/hotrod-load/service.yaml deleted file mode 100644 index 59586d7bda..0000000000 --- a/sample-apps/hotrod/hotrod-load/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - role: locust-master - name: locust-master -spec: - ports: - - port: 5557 - name: communication - - port: 5558 - name: communication-plus-1 - - port: 8089 - targetPort: 8089 - name: web-ui - selector: - role: locust-master diff --git a/sample-apps/hotrod/hotrod-load/slave-deployment.yaml b/sample-apps/hotrod/hotrod-load/slave-deployment.yaml deleted file mode 100644 index 1dbb63fc71..0000000000 --- a/sample-apps/hotrod/hotrod-load/slave-deployment.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - deployment.kubernetes.io/revision: "1" - labels: - role: locust-slave - name: locust-slave -spec: - replicas: 1 - selector: - matchLabels: - role: locust-slave - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - role: locust-slave - spec: - containers: - - image: grubykarol/locust:0.8.1-py3.6 - imagePullPolicy: IfNotPresent - name: locust-slave - env: - - name: ATTACKED_HOST - valueFrom: - configMapKeyRef: - name: locust-cm - key: ATTACKED_HOST - - name: LOCUST_MODE - value: SLAVE - - name: LOCUST_MASTER - value: locust-master - volumeMounts: - - mountPath: /locust - name: locust-scripts - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - volumes: - - name: locust-scripts - configMap: - name: scripts-cm diff --git a/sample-apps/hotrod/hotrod.yaml b/sample-apps/hotrod/hotrod.yaml new file mode 100644 index 0000000000..5fada6a2d5 --- /dev/null +++ b/sample-apps/hotrod/hotrod.yaml @@ -0,0 +1,223 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: locust-cm +data: + ATTACKED_HOST: http://hotrod:8080 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: scripts-cm +data: + locustfile.py: | + from locust import HttpLocust, TaskSet, task + class UserTasks(TaskSet): + @task + def rachel(self): + self.client.get("/dispatch?customer=123&nonse=0.6308392664170006") + @task + def trom(self): + self.client.get("/dispatch?customer=392&nonse=0.015296363321630757") + @task + def japanese(self): + self.client.get("/dispatch?customer=731&nonse=0.8022286220408668") + @task + def coffee(self): + self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593") + class WebsiteUser(HttpLocust): + task_set = UserTasks +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + service: hotrod + name: hotrod +spec: + replicas: 1 + selector: + matchLabels: + service: hotrod + strategy: {} + template: + metadata: + labels: + service: hotrod + spec: + containers: + - args: + - all + env: + - name: JAEGER_ENDPOINT + value: http://otel-collector.platform.svc.cluster.local:14268/api/traces + image: jaegertracing/example-hotrod:1.30 + imagePullPolicy: IfNotPresent + name: hotrod + ports: + - containerPort: 8080 + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + service: hotrod + name: hotrod +spec: + ports: + - name: "8080" + port: 8080 + targetPort: 8080 + selector: + service: hotrod +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + labels: + role: locust-master + name: locust-master +spec: + replicas: 1 + selector: + matchLabels: + role: locust-master + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + role: locust-master + spec: + containers: + - image: grubykarol/locust:0.8.1-py3.6 + imagePullPolicy: IfNotPresent + name: locust-master + env: + - name: ATTACKED_HOST + valueFrom: + configMapKeyRef: + name: locust-cm + key: ATTACKED_HOST + - name: LOCUST_MODE + value: MASTER + - name: LOCUST_OPTS + value: --print-stats + volumeMounts: + - mountPath: /locust + name: locust-scripts + ports: + - containerPort: 5557 + name: comm + - containerPort: 5558 + name: comm-plus-1 + - containerPort: 8089 + name: web-ui + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: locust-scripts + configMap: + name: scripts-cm +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: locust-master + name: locust-master +spec: + ports: + - port: 5557 + name: communication + - port: 5558 + name: communication-plus-1 + - port: 8089 + targetPort: 8089 + name: web-ui + selector: + role: locust-master +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + labels: + role: locust-slave + name: locust-slave +spec: + replicas: 1 + selector: + matchLabels: + role: locust-slave + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + role: locust-slave + spec: + containers: + - image: grubykarol/locust:0.8.1-py3.6 + imagePullPolicy: IfNotPresent + name: locust-slave + env: + - name: ATTACKED_HOST + valueFrom: + configMapKeyRef: + name: locust-cm + key: ATTACKED_HOST + - name: LOCUST_MODE + value: SLAVE + - name: LOCUST_MASTER + value: locust-master + volumeMounts: + - mountPath: /locust + name: locust-scripts + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: locust-scripts + configMap: + name: scripts-cm diff --git a/sample-apps/hotrod/service.yaml b/sample-apps/hotrod/service.yaml deleted file mode 100644 index 1f0e989145..0000000000 --- a/sample-apps/hotrod/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - service: hotrod - name: hotrod -spec: - ports: - - name: "8080" - port: 8080 - targetPort: 8080 - selector: - service: hotrod From 9c07ac376db8f0125b1fb8cadb129867af376e8e Mon Sep 17 00:00:00 2001 From: Kartik Verma Date: Tue, 25 Jan 2022 16:31:19 +0530 Subject: [PATCH 27/81] fix: update Contributing and Makefile for dev-setup (#599) --- .gitignore | 1 + CONTRIBUTING.md | 12 ++++++------ Makefile | 14 ++++++++++++++ pkg/query-service/go.mod | 2 +- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 4e720c6728..1556564080 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,4 @@ frontend/cypress.env.json frontend/*.env pkg/query-service/signoz.db +/deploy/docker/clickhouse-setup/data/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c58a713482..99764e34fd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,13 +44,13 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht ### To run ClickHouse setup (recommended for local development) -- `git clone https://github.com/SigNoz/signoz.git && cd signoz/deploy` -- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38` -- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22` +- git clone https://github.com/SigNoz/signoz.git +- run `sudo make dev-setup` to configure local setup to run query-service +- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38` +- comment out query-service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L22` - Install signoz locally without the frontend and query-service - - If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d` - - If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml up -d` -- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go` + - If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86` + - If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm` **_Query Service should now be available at `http://localhost:8080`_** diff --git a/Makefile b/Makefile index 83ba66976b..67dbd9a522 100644 --- a/Makefile +++ b/Makefile @@ -81,3 +81,17 @@ build-push-flattener: @echo "------------------" @cd $(FLATTENER_DIRECTORY) && \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(FLATTERNER_DOCKER_IMAGE):$(DOCKER_TAG) . + +dev-setup: + mkdir -p /var/lib/signoz + sqlite3 /var/lib/signoz/signoz.db "VACUUM"; + mkdir -p pkg/query-service/config/dashboards + @echo "------------------" + @echo "--> Local Setup completed" + @echo "------------------" + +run-x86: + @sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/x86_64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d + +run-arm: + @sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/arm64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d \ No newline at end of file diff --git a/pkg/query-service/go.mod b/pkg/query-service/go.mod index 023f231dbe..d64a824ab2 100644 --- a/pkg/query-service/go.mod +++ b/pkg/query-service/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go v1.27.0 // indirect github.com/containerd/containerd v1.4.12 // indirect github.com/dhui/dktest v0.3.4 // indirect - github.com/docker/docker v20.10.12+incompatible + github.com/docker/docker v20.10.12+incompatible // indirect github.com/frankban/quicktest v1.13.0 // indirect github.com/go-kit/log v0.1.0 github.com/golang-migrate/migrate/v4 v4.14.1 From dcb17fb33a5c9bfef487c71a68dfc965d9600b0e Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 26 Jan 2022 12:48:29 +0530 Subject: [PATCH 28/81] ci(k3s): k3s CI workflow enhancements (#643) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci(k3s): k3s CI workflow enhancements * ci(k3s): 💚 Fix the names of deployment and statefulset Signed-off-by: Prashant Shahi --- .github/workflows/e2e-k3s.yaml | 56 ++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index dcd819a570..41bea93c8e 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -14,48 +14,72 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v2 + - name: Build query-service image run: make build-query-service-amd64 + - name: Build frontend image run: make build-frontend-amd64 + - name: Create a k3s cluster uses: AbsaOSS/k3d-action@v2 with: cluster-name: "signoz" + - name: Inject the images to the cluster run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz + - name: Deploy the app run: | - helm dependency update deploy/kubernetes/platform - helm install signoz deploy/kubernetes/platform/ -n platform --create-namespace \ - --wait --timeout 10m0s \ + # add signoz helm repository + helm repo add signoz https://charts.signoz.io + + # create platform namespace + kubectl create ns platform + + # installing signoz using helm + helm install my-release signoz/signoz -n platform \ + --wait \ + --timeout 10m0s \ --set cloud=null \ --set frontend.service.type=LoadBalancer \ --set query-service.image.tag=$DOCKER_TAG \ --set frontend.image.tag=$DOCKER_TAG - kubectl describe deploy/signoz-frontend -n platform | grep Image - kubectl describe statefulset/signoz-query-service -n platform | grep Image + + # get pods, services and the container images + kubectl describe deploy/frontend -n platform | grep Image + kubectl describe statefulset/query-service -n platform | grep Image kubectl get pods -n platform kubectl get svc -n platform + - name: Kick off a sample-app workload run: | + # create sample-application namespace kubectl create ns sample-application - kubectl -n sample-application apply -Rf sample-apps/hotrod/ + + # apply hotrod k8s manifest file + kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/blob/main/sample-apps/hotrod/hotrod.yaml + + # wait for all deployments in sample-application namespace to be READY kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - #TODO: we can remove that sleep once the sample app readiness prob is fixed. - sleep 300 # if we don't wait the curl command return success but doesn't kick-of the requests + + # start the locust swarm kubectl -n sample-application run strzal --image=djbingham/curl \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \ 'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm - sleep 600 # give some time for the data to reach signoz - - name: Get commit short sha - uses: benjlevesque/short-sha@v1.2 - id: short-sha + + - name: Get short commit SHA and display tunnel URL + id: get-subdomain + run: | + subdomain="pr-$(git rev-parse --short HEAD)" + echo "URL for tunnelling: https://$subdomain.loca.lt" + echo "::set-output name=subdomain::$subdomain" + - name: Start tunnel env: - COMMIT_SHA: pr-${{ steps.short-sha.outputs.sha }} + SUBDOMAIN: ${{ steps.get-subdomain.outputs.subdomain }} run: | npm install -g localtunnel - host=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f4) - port=$(kubectl get svc -n platform | grep signoz-frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) - lt -p $port -l $host -s $COMMIT_SHA + host=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f4) + port=$(kubectl get svc -n platform | grep frontend | tr -s ' ' | cut -d" " -f5 | cut -d":" -f1) + lt -p $port -l $host -s $SUBDOMAIN From 0ab91707e99cfc17b7b64670ef071af58e6c4f26 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 26 Jan 2022 20:41:59 +0530 Subject: [PATCH 29/81] New Trace Filter Page API changes (Backend) (#646) * build: integrate sql migrations for clickhouse * feat: support error/exception attributes for trace * chore: fixing dependencies for docker go client libs * feat: get trace filter api checkpoint * chore: fixing dependencies for go-migrate * feat: add new columns * feat: move mirgate run from docker to code * fix: migration file 404 issue * feat: getSpanFilter API * fix: migrate version naming bug * chore: change url param format to array * feat: add getTagFilter API * feat: add getFilteredSpans API * fix: using OFFSET in sqlx driver * feat: aggregates API on getFilteredSpan, use IN and NOT IN for tag filtering * feat: add more function support to span aggregate API * fix: null component edge case * feat: groupBy support for filteredSpanAggregate * feat: add function param to span aggregate API * feat: add support to return totalSpans in getFilteredSpans API * fix: don't return null string as keys in span filters * chore: remove SQL migrations(moved to otel collector) * fix: null string issue in aggregate API * Merge main * fix: trace API db query param * fix: signoz sql db path * fix: case when both error and ok status are selected Co-authored-by: Ankit Nayan --- .../clickhouse-setup/docker-compose.yaml | 1 - .../docker-entrypoint-initdb.d/init-db.sql | 31 - .../app/clickhouseReader/reader.go | 1054 ++++++++++++++++- pkg/query-service/app/druidReader/reader.go | 16 + pkg/query-service/app/http_handler.go | 68 ++ pkg/query-service/app/interface.go | 5 + pkg/query-service/app/parser.go | 367 +++++- pkg/query-service/model/queryParams.go | 81 ++ pkg/query-service/model/response.go | 100 ++ 9 files changed, 1689 insertions(+), 34 deletions(-) delete mode 100644 deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 2a9d3db67a..16851112da 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -11,7 +11,6 @@ services: - 8123:8123 volumes: - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - - ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql - ./data/clickhouse/:/var/lib/clickhouse/ healthcheck: diff --git a/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql b/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql deleted file mode 100644 index f71983c083..0000000000 --- a/deploy/docker/clickhouse-setup/docker-entrypoint-initdb.d/init-db.sql +++ /dev/null @@ -1,31 +0,0 @@ -CREATE TABLE IF NOT EXISTS signoz_index ( - timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), - traceID String CODEC(ZSTD(1)), - spanID String CODEC(ZSTD(1)), - parentSpanID String CODEC(ZSTD(1)), - serviceName LowCardinality(String) CODEC(ZSTD(1)), - name LowCardinality(String) CODEC(ZSTD(1)), - kind Int32 CODEC(ZSTD(1)), - durationNano UInt64 CODEC(ZSTD(1)), - tags Array(String) CODEC(ZSTD(1)), - tagsKeys Array(String) CODEC(ZSTD(1)), - tagsValues Array(String) CODEC(ZSTD(1)), - statusCode Int64 CODEC(ZSTD(1)), - references String CODEC(ZSTD(1)), - externalHttpMethod Nullable(String) CODEC(ZSTD(1)), - externalHttpUrl Nullable(String) CODEC(ZSTD(1)), - component Nullable(String) CODEC(ZSTD(1)), - dbSystem Nullable(String) CODEC(ZSTD(1)), - dbName Nullable(String) CODEC(ZSTD(1)), - dbOperation Nullable(String) CODEC(ZSTD(1)), - peerService Nullable(String) CODEC(ZSTD(1)), - INDEX idx_traceID traceID TYPE bloom_filter GRANULARITY 4, - INDEX idx_service serviceName TYPE bloom_filter GRANULARITY 4, - INDEX idx_name name TYPE bloom_filter GRANULARITY 4, - INDEX idx_kind kind TYPE minmax GRANULARITY 4, - INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_duration durationNano TYPE minmax GRANULARITY 1 -) ENGINE MergeTree() -PARTITION BY toDate(timestamp) -ORDER BY (serviceName, -toUnixTimestamp(timestamp)) \ No newline at end of file diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 2ea149a563..4d418ce0e4 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1329,6 +1329,730 @@ func (r *ClickHouseReader) SearchSpans(ctx context.Context, queryParams *model.S return &searchSpansResult, nil } +func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError) { + + var query string + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} + if len(queryParams.ServiceName) > 0 { + for i, e := range queryParams.ServiceName { + if i == 0 && i == len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?)" + } else if i == 0 && i != len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?" + } else if i != 0 && i == len(queryParams.ServiceName)-1 { + query += " OR serviceName=?)" + } else { + query += " OR serviceName=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpRoute) > 0 { + for i, e := range queryParams.HttpRoute { + if i == 0 && i == len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?)" + } else if i == 0 && i != len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?" + } else if i != 0 && i == len(queryParams.HttpRoute)-1 { + query += " OR httpRoute=?)" + } else { + query += " OR httpRoute=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpCode) > 0 { + for i, e := range queryParams.HttpCode { + if i == 0 && i == len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?)" + } else if i == 0 && i != len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?" + } else if i != 0 && i == len(queryParams.HttpCode)-1 { + query += " OR httpCode=?)" + } else { + query += " OR httpCode=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpHost) > 0 { + for i, e := range queryParams.HttpHost { + if i == 0 && i == len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?)" + } else if i == 0 && i != len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?" + } else if i != 0 && i == len(queryParams.HttpHost)-1 { + query += " OR httpHost=?)" + } else { + query += " OR httpHost=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpMethod) > 0 { + for i, e := range queryParams.HttpMethod { + if i == 0 && i == len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?)" + } else if i == 0 && i != len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?" + } else if i != 0 && i == len(queryParams.HttpMethod)-1 { + query += " OR httpMethod=?)" + } else { + query += " OR httpMethod=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpUrl) > 0 { + for i, e := range queryParams.HttpUrl { + if i == 0 && i == len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?)" + } else if i == 0 && i != len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?" + } else if i != 0 && i == len(queryParams.HttpUrl)-1 { + query += " OR httpUrl=?)" + } else { + query += " OR httpUrl=?" + } + args = append(args, e) + } + } + if len(queryParams.Component) > 0 { + for i, e := range queryParams.Component { + if i == 0 && i == len(queryParams.Component)-1 { + query += " AND (component=?)" + } else if i == 0 && i != len(queryParams.Component)-1 { + query += " AND (component=?" + } else if i != 0 && i == len(queryParams.Component)-1 { + query += " OR component=?)" + } else { + query += " OR component=?" + } + args = append(args, e) + } + } + if len(queryParams.Operation) > 0 { + for i, e := range queryParams.Operation { + if i == 0 && i == len(queryParams.Operation)-1 { + query += " AND (name=?)" + } else if i == 0 && i != len(queryParams.Operation)-1 { + query += " AND (name=?" + } else if i != 0 && i == len(queryParams.Operation)-1 { + query += " OR name=?)" + } else { + query += " OR name=?" + } + args = append(args, e) + } + } + + if len(queryParams.MinDuration) != 0 { + query = query + " AND durationNano >= ?" + args = append(args, queryParams.MinDuration) + } + if len(queryParams.MaxDuration) != 0 { + query = query + " AND durationNano <= ?" + args = append(args, queryParams.MaxDuration) + } + + // status can only be two and if both are selected than they are equivalent to none selected + if len(queryParams.Status) == 1 { + if queryParams.Status[0] == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if queryParams.Status[0] == "ok" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" + } + } + + traceFilterReponse := model.SpanFiltersResponse{ + Status: map[string]int{}, + Duration: map[string]int{}, + ServiceName: map[string]int{}, + Operation: map[string]int{}, + HttpCode: map[string]int{}, + HttpMethod: map[string]int{}, + HttpUrl: map[string]int{}, + HttpRoute: map[string]int{}, + HttpHost: map[string]int{}, + Component: map[string]int{}, + } + + for _, e := range queryParams.GetFilters { + switch e { + case "serviceName": + finalQuery := fmt.Sprintf("SELECT serviceName, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY serviceName" + var dBResponse []model.DBResponseServiceName + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.ServiceName != "" { + traceFilterReponse.ServiceName[service.ServiceName] = service.Count + } + } + case "httpCode": + finalQuery := fmt.Sprintf("SELECT httpCode, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY httpCode" + var dBResponse []model.DBResponseHttpCode + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.HttpCode != "" { + traceFilterReponse.HttpCode[service.HttpCode] = service.Count + } + } + case "httpRoute": + finalQuery := fmt.Sprintf("SELECT httpRoute, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY httpRoute" + var dBResponse []model.DBResponseHttpRoute + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.HttpRoute != "" { + traceFilterReponse.HttpRoute[service.HttpRoute] = service.Count + } + } + case "httpUrl": + finalQuery := fmt.Sprintf("SELECT httpUrl, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY httpUrl" + var dBResponse []model.DBResponseHttpUrl + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.HttpUrl != "" { + traceFilterReponse.HttpUrl[service.HttpUrl] = service.Count + } + } + case "httpMethod": + finalQuery := fmt.Sprintf("SELECT httpMethod, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY httpMethod" + var dBResponse []model.DBResponseHttpMethod + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.HttpMethod != "" { + traceFilterReponse.HttpMethod[service.HttpMethod] = service.Count + } + } + case "httpHost": + finalQuery := fmt.Sprintf("SELECT httpHost, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY httpHost" + var dBResponse []model.DBResponseHttpHost + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.HttpHost != "" { + traceFilterReponse.HttpHost[service.HttpHost] = service.Count + } + } + case "operation": + finalQuery := fmt.Sprintf("SELECT name, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY name" + var dBResponse []model.DBResponseOperation + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.Operation != "" { + traceFilterReponse.Operation[service.Operation] = service.Count + } + } + case "component": + finalQuery := fmt.Sprintf("SELECT component, count() as count FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + finalQuery += " GROUP BY component" + var dBResponse []model.DBResponseComponent + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + if service.Component.String != "" { + traceFilterReponse.Component[service.Component.String] = service.Count + } + } + case "status": + finalQuery := fmt.Sprintf("SELECT COUNT(*) as numErrors FROM %s WHERE timestamp >= ? AND timestamp <= ? AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))", r.indexTable) + finalQuery += query + var dBResponse []model.DBResponseErrors + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + + finalQuery2 := fmt.Sprintf("SELECT COUNT(*) as numTotal FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery2 += query + var dBResponse2 []model.DBResponseTotal + err = r.db.Select(&dBResponse2, finalQuery2, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + traceFilterReponse.Status = map[string]int{"ok": dBResponse2[0].NumTotal - dBResponse[0].NumErrors, "error": dBResponse[0].NumErrors} + case "duration": + finalQuery := fmt.Sprintf("SELECT min(durationNano), max(durationNano) FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + finalQuery += query + var dBResponse []model.DBResponseMinMaxDuration + err := r.db.Select(&dBResponse, finalQuery, args...) + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query", err)} + } + for _, service := range dBResponse { + traceFilterReponse.Duration["minDuration"] = service.MinDuration + traceFilterReponse.Duration["maxDuration"] = service.MaxDuration + } + default: + return nil, &model.ApiError{model.ErrorBadData, fmt.Errorf("filter type: %s not supported", e)} + } + } + + return &traceFilterReponse, nil +} + +func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError) { + + baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpCode, httpMethod FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + + var query string + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} + if len(queryParams.ServiceName) > 0 { + for i, e := range queryParams.ServiceName { + if i == 0 && i == len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?)" + } else if i == 0 && i != len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?" + } else if i != 0 && i == len(queryParams.ServiceName)-1 { + query += " OR serviceName=?)" + } else { + query += " OR serviceName=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpRoute) > 0 { + for i, e := range queryParams.HttpRoute { + if i == 0 && i == len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?)" + } else if i == 0 && i != len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?" + } else if i != 0 && i == len(queryParams.HttpRoute)-1 { + query += " OR httpRoute=?)" + } else { + query += " OR httpRoute=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpCode) > 0 { + for i, e := range queryParams.HttpCode { + if i == 0 && i == len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?)" + } else if i == 0 && i != len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?" + } else if i != 0 && i == len(queryParams.HttpCode)-1 { + query += " OR httpCode=?)" + } else { + query += " OR httpCode=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpHost) > 0 { + for i, e := range queryParams.HttpHost { + if i == 0 && i == len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?)" + } else if i == 0 && i != len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?" + } else if i != 0 && i == len(queryParams.HttpHost)-1 { + query += " OR httpHost=?)" + } else { + query += " OR httpHost=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpMethod) > 0 { + for i, e := range queryParams.HttpMethod { + if i == 0 && i == len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?)" + } else if i == 0 && i != len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?" + } else if i != 0 && i == len(queryParams.HttpMethod)-1 { + query += " OR httpMethod=?)" + } else { + query += " OR httpMethod=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpUrl) > 0 { + for i, e := range queryParams.HttpUrl { + if i == 0 && i == len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?)" + } else if i == 0 && i != len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?" + } else if i != 0 && i == len(queryParams.HttpUrl)-1 { + query += " OR httpUrl=?)" + } else { + query += " OR httpUrl=?" + } + args = append(args, e) + } + } + if len(queryParams.Component) > 0 { + for i, e := range queryParams.Component { + if i == 0 && i == len(queryParams.Component)-1 { + query += " AND (component=?)" + } else if i == 0 && i != len(queryParams.Component)-1 { + query += " AND (component=?" + } else if i != 0 && i == len(queryParams.Component)-1 { + query += " OR component=?)" + } else { + query += " OR component=?" + } + args = append(args, e) + } + } + if len(queryParams.Operation) > 0 { + for i, e := range queryParams.Operation { + if i == 0 && i == len(queryParams.Operation)-1 { + query += " AND (name=?)" + } else if i == 0 && i != len(queryParams.Operation)-1 { + query += " AND (name=?" + } else if i != 0 && i == len(queryParams.Operation)-1 { + query += " OR name=?)" + } else { + query += " OR name=?" + } + args = append(args, e) + } + } + if len(queryParams.MinDuration) != 0 { + query = query + " AND durationNano >= ?" + args = append(args, queryParams.MinDuration) + } + if len(queryParams.MaxDuration) != 0 { + query = query + " AND durationNano <= ?" + args = append(args, queryParams.MaxDuration) + } + if len(queryParams.Status) != 0 { + for _, e := range queryParams.Status { + if e == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if e == "ok" { + query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" + } + } + } + if len(queryParams.Kind) != 0 { + query = query + " AND kind = ?" + args = append(args, queryParams.Kind) + } + + for _, item := range queryParams.Tags { + + if item.Operator == "in" { + for i, value := range item.Values { + if i == 0 && i == len(item.Values)-1 { + query += " AND has(tags, ?)" + } else if i == 0 && i != len(item.Values)-1 { + query += " AND (has(tags, ?)" + } else if i != 0 && i == len(item.Values)-1 { + query += " OR has(tags, ?))" + } else { + query += " OR has(tags, ?)" + } + args = append(args, fmt.Sprintf("%s:%s", item.Key, value)) + } + } else if item.Operator == "not in" { + for i, value := range item.Values { + if i == 0 && i == len(item.Values)-1 { + query += " AND NOT has(tags, ?)" + } else if i == 0 && i != len(item.Values)-1 { + query += " AND NOT (has(tags, ?)" + } else if i != 0 && i == len(item.Values)-1 { + query += " OR has(tags, ?))" + } else { + query += " OR has(tags, ?)" + } + args = append(args, fmt.Sprintf("%s:%s", item.Key, value)) + } + } else if item.Operator == "regex" { + if len(item.Values) != 1 { + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Regex tag operator should only have one value")} + } + query = query + " AND match(tagsValues[indexOf(tagsKeys, ?)], ?)" + args = append(args, item.Key) + args = append(args, item.Values[0]) + } else if item.Operator == "isnotnull" { + for range item.Values { + query = query + " AND has(tagsKeys, ?)" + args = append(args, item.Key) + } + } else { + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Tag Operator %s not supported", item.Operator)} + } + + } + + var totalSpans []model.DBResponseTotal + + totalSpansQuery := fmt.Sprintf(`SELECT count() as numTotal FROM %s WHERE timestamp >= ? AND timestamp <= ?`, r.indexTable) + + totalSpansQuery += query + err := r.db.Select(&totalSpans, totalSpansQuery, args...) + + zap.S().Info(totalSpansQuery) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + if len(queryParams.Order) != 0 { + if queryParams.Order == "descending" { + query = query + " ORDER BY timestamp DESC" + } + if queryParams.Order == "ascending" { + query = query + " ORDER BY timestamp ASC" + } + } + if queryParams.Limit > 0 { + query = query + " LIMIT ?" + args = append(args, queryParams.Limit) + } + + if queryParams.Offset > 0 { + // due to bug in SQLx driver, using %d temporarily + query = query + fmt.Sprintf(" OFFSET %d", queryParams.Offset) + // args = append(args, queryParams.Offset) + } + + var getFilterSpansResponseItems []model.GetFilterSpansResponseItem + + baseQuery += query + err = r.db.Select(&getFilterSpansResponseItems, baseQuery, args...) + + zap.S().Info(baseQuery) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + getFilterSpansResponse := model.GetFilterSpansResponse{ + Spans: getFilterSpansResponseItems, + TotalSpans: totalSpans[0].NumTotal, + } + + return &getFilterSpansResponse, nil +} + +func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model.TagFilterParams) (*[]model.TagFilters, *model.ApiError) { + + var query string + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} + if len(queryParams.ServiceName) > 0 { + for i, e := range queryParams.ServiceName { + if i == 0 && i == len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?)" + } else if i == 0 && i != len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?" + } else if i != 0 && i == len(queryParams.ServiceName)-1 { + query += " OR serviceName=?)" + } else { + query += " OR serviceName=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpRoute) > 0 { + for i, e := range queryParams.HttpRoute { + if i == 0 && i == len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?)" + } else if i == 0 && i != len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?" + } else if i != 0 && i == len(queryParams.HttpRoute)-1 { + query += " OR httpRoute=?)" + } else { + query += " OR httpRoute=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpCode) > 0 { + for i, e := range queryParams.HttpCode { + if i == 0 && i == len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?)" + } else if i == 0 && i != len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?" + } else if i != 0 && i == len(queryParams.HttpCode)-1 { + query += " OR httpCode=?)" + } else { + query += " OR httpCode=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpHost) > 0 { + for i, e := range queryParams.HttpHost { + if i == 0 && i == len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?)" + } else if i == 0 && i != len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?" + } else if i != 0 && i == len(queryParams.HttpHost)-1 { + query += " OR httpHost=?)" + } else { + query += " OR httpHost=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpMethod) > 0 { + for i, e := range queryParams.HttpMethod { + if i == 0 && i == len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?)" + } else if i == 0 && i != len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?" + } else if i != 0 && i == len(queryParams.HttpMethod)-1 { + query += " OR httpMethod=?)" + } else { + query += " OR httpMethod=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpUrl) > 0 { + for i, e := range queryParams.HttpUrl { + if i == 0 && i == len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?)" + } else if i == 0 && i != len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?" + } else if i != 0 && i == len(queryParams.HttpUrl)-1 { + query += " OR httpUrl=?)" + } else { + query += " OR httpUrl=?" + } + args = append(args, e) + } + } + if len(queryParams.Component) > 0 { + for i, e := range queryParams.Component { + if i == 0 && i == len(queryParams.Component)-1 { + query += " AND (component=?)" + } else if i == 0 && i != len(queryParams.Component)-1 { + query += " AND (component=?" + } else if i != 0 && i == len(queryParams.Component)-1 { + query += " OR component=?)" + } else { + query += " OR component=?" + } + args = append(args, e) + } + } + if len(queryParams.Operation) > 0 { + for i, e := range queryParams.Operation { + if i == 0 && i == len(queryParams.Operation)-1 { + query += " AND (name=?)" + } else if i == 0 && i != len(queryParams.Operation)-1 { + query += " AND (name=?" + } else if i != 0 && i == len(queryParams.Operation)-1 { + query += " OR name=?)" + } else { + query += " OR name=?" + } + args = append(args, e) + } + } + + if len(queryParams.MinDuration) != 0 { + query = query + " AND durationNano >= ?" + args = append(args, queryParams.MinDuration) + } + if len(queryParams.MaxDuration) != 0 { + query = query + " AND durationNano <= ?" + args = append(args, queryParams.MaxDuration) + } + if len(queryParams.Status) != 0 { + for _, e := range queryParams.Status { + if e == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if e == "ok" { + query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" + } + } + } + tagFilters := []model.TagFilters{} + + finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagsKeys) as tagKeys FROM %s WHERE timestamp >= ? AND timestamp <= ?`, r.indexTable) + finalQuery += query + fmt.Println(finalQuery) + err := r.db.Select(&tagFilters, finalQuery, args...) + + zap.S().Info(query) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + tagFilters = excludeTags(ctx, tagFilters) + + return &tagFilters, nil +} + +func excludeTags(ctx context.Context, tags []model.TagFilters) []model.TagFilters { + excludedTagsMap := map[string]bool{ + "http.code": true, + "http.route": true, + "http.method": true, + "http.url": true, + "http.status_code": true, + "http.host": true, + "messaging.system": true, + "messaging.operation": true, + "component": true, + "error": true, + } + var newTags []model.TagFilters + for _, tag := range tags { + _, ok := excludedTagsMap[tag.TagKeys] + if !ok { + newTags = append(newTags, tag) + } + } + return newTags +} + func (r *ClickHouseReader) GetServiceDBOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) { var serviceDBOverviewItems []model.ServiceDBOverviewItem @@ -1583,7 +2307,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[ var searchScanReponses []model.SearchSpanReponseItem - query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references, events FROM %s WHERE traceID='%s'", r.indexTable, traceId) + query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references, events FROM %s WHERE traceID=?", r.indexTable) err := r.db.Select(&searchScanReponses, query, traceId) @@ -1759,6 +2483,334 @@ func (r *ClickHouseReader) SearchSpansAggregate(ctx context.Context, queryParams } +func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, queryParams *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError) { + + SpanAggregatesDBResponseItems := []model.SpanAggregatesDBResponseItem{} + + aggregation_query := "" + if queryParams.Dimension == "duration" { + switch queryParams.AggregationOption { + case "p50": + aggregation_query = " quantile(0.50)(durationNano) as value " + case "p95": + aggregation_query = " quantile(0.95)(durationNano) as value " + case "p90": + aggregation_query = " quantile(0.90)(durationNano) as value " + case "p99": + aggregation_query = " quantile(0.99)(durationNano) as value " + case "max": + aggregation_query = " max(durationNano) as value " + case "min": + aggregation_query = " min(durationNano) as value " + case "avg": + aggregation_query = " avg(durationNano) as value " + case "sum": + aggregation_query = " sum(durationNano) as value " + default: + return nil, &model.ApiError{model.ErrorBadData, fmt.Errorf("Aggregate type: %s not supported", queryParams.AggregationOption)} + } + } else if queryParams.Dimension == "calls" { + aggregation_query = " count(*) as value " + } + + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} + + var query string + if queryParams.GroupBy != "" { + switch queryParams.GroupBy { + case "serviceName": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, serviceName as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "httpCode": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpCode as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "httpMethod": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpMethod as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "httpUrl": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpUrl as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "httpRoute": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpRoute as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "httpHost": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, httpHost as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "dbName": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbName as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "dbOperation": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbOperation as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "operation": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, name as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "msgSystem": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgSystem as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "msgOperation": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, msgOperation as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "dbSystem": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, dbSystem as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + case "component": + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, component as groupBy, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + default: + return nil, &model.ApiError{model.ErrorBadData, fmt.Errorf("groupBy type: %s not supported", queryParams.GroupBy)} + } + } else { + query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable) + } + + if len(queryParams.ServiceName) > 0 { + for i, e := range queryParams.ServiceName { + if i == 0 && i == len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?)" + } else if i == 0 && i != len(queryParams.ServiceName)-1 { + query += " AND (serviceName=?" + } else if i != 0 && i == len(queryParams.ServiceName)-1 { + query += " OR serviceName=?)" + } else { + query += " OR serviceName=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpRoute) > 0 { + for i, e := range queryParams.HttpRoute { + if i == 0 && i == len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?)" + } else if i == 0 && i != len(queryParams.HttpRoute)-1 { + query += " AND (httpRoute=?" + } else if i != 0 && i == len(queryParams.HttpRoute)-1 { + query += " OR httpRoute=?)" + } else { + query += " OR httpRoute=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpCode) > 0 { + for i, e := range queryParams.HttpCode { + if i == 0 && i == len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?)" + } else if i == 0 && i != len(queryParams.HttpCode)-1 { + query += " AND (httpCode=?" + } else if i != 0 && i == len(queryParams.HttpCode)-1 { + query += " OR httpCode=?)" + } else { + query += " OR httpCode=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpHost) > 0 { + for i, e := range queryParams.HttpHost { + if i == 0 && i == len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?)" + } else if i == 0 && i != len(queryParams.HttpHost)-1 { + query += " AND (httpHost=?" + } else if i != 0 && i == len(queryParams.HttpHost)-1 { + query += " OR httpHost=?)" + } else { + query += " OR httpHost=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpMethod) > 0 { + for i, e := range queryParams.HttpMethod { + if i == 0 && i == len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?)" + } else if i == 0 && i != len(queryParams.HttpMethod)-1 { + query += " AND (httpMethod=?" + } else if i != 0 && i == len(queryParams.HttpMethod)-1 { + query += " OR httpMethod=?)" + } else { + query += " OR httpMethod=?" + } + args = append(args, e) + } + } + if len(queryParams.HttpUrl) > 0 { + for i, e := range queryParams.HttpUrl { + if i == 0 && i == len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?)" + } else if i == 0 && i != len(queryParams.HttpUrl)-1 { + query += " AND (httpUrl=?" + } else if i != 0 && i == len(queryParams.HttpUrl)-1 { + query += " OR httpUrl=?)" + } else { + query += " OR httpUrl=?" + } + args = append(args, e) + } + } + if len(queryParams.Component) > 0 { + for i, e := range queryParams.Component { + if i == 0 && i == len(queryParams.Component)-1 { + query += " AND (component=?)" + } else if i == 0 && i != len(queryParams.Component)-1 { + query += " AND (component=?" + } else if i != 0 && i == len(queryParams.Component)-1 { + query += " OR component=?)" + } else { + query += " OR component=?" + } + args = append(args, e) + } + } + if len(queryParams.Operation) > 0 { + for i, e := range queryParams.Operation { + if i == 0 && i == len(queryParams.Operation)-1 { + query += " AND (name=?)" + } else if i == 0 && i != len(queryParams.Operation)-1 { + query += " AND (name=?" + } else if i != 0 && i == len(queryParams.Operation)-1 { + query += " OR name=?)" + } else { + query += " OR name=?" + } + args = append(args, e) + } + } + if len(queryParams.MinDuration) != 0 { + query = query + " AND durationNano >= ?" + args = append(args, queryParams.MinDuration) + } + if len(queryParams.MaxDuration) != 0 { + query = query + " AND durationNano <= ?" + args = append(args, queryParams.MaxDuration) + } + if len(queryParams.Status) != 0 { + for _, e := range queryParams.Status { + if e == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if e == "ok" { + query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" + } + } + } + if len(queryParams.Kind) != 0 { + query = query + " AND kind = ?" + args = append(args, queryParams.Kind) + } + + for _, item := range queryParams.Tags { + + if item.Operator == "in" { + for i, value := range item.Values { + if i == 0 && i == len(item.Values)-1 { + query += " AND has(tags, ?)" + } else if i == 0 && i != len(item.Values)-1 { + query += " AND (has(tags, ?)" + } else if i != 0 && i == len(item.Values)-1 { + query += " OR has(tags, ?))" + } else { + query += " OR has(tags, ?)" + } + args = append(args, fmt.Sprintf("%s:%s", item.Key, value)) + } + } else if item.Operator == "not in" { + for i, value := range item.Values { + if i == 0 && i == len(item.Values)-1 { + query += " AND NOT has(tags, ?)" + } else if i == 0 && i != len(item.Values)-1 { + query += " AND NOT (has(tags, ?)" + } else if i != 0 && i == len(item.Values)-1 { + query += " OR has(tags, ?))" + } else { + query += " OR has(tags, ?)" + } + args = append(args, fmt.Sprintf("%s:%s", item.Key, value)) + } + } else if item.Operator == "regex" { + if len(item.Values) != 1 { + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Regex tag operator should only have one value")} + } + query = query + " AND match(tagsValues[indexOf(tagsKeys, ?)], ?)" + args = append(args, item.Key) + args = append(args, item.Values[0]) + } else if item.Operator == "isnotnull" { + for range item.Values { + query = query + " AND has(tagsKeys, ?)" + args = append(args, item.Key) + } + } else { + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Tag Operator %s not supported", item.Operator)} + } + + } + + if queryParams.GroupBy != "" { + switch queryParams.GroupBy { + case "serviceName": + query = query + " GROUP BY time, serviceName as groupBy ORDER BY time" + case "httpCode": + query = query + " GROUP BY time, httpCode as groupBy ORDER BY time" + case "httpMethod": + query = query + " GROUP BY time, httpMethod as groupBy ORDER BY time" + case "httpUrl": + query = query + " GROUP BY time, httpUrl as groupBy ORDER BY time" + case "httpRoute": + query = query + " GROUP BY time, httpRoute as groupBy ORDER BY time" + case "httpHost": + query = query + " GROUP BY time, httpHost as groupBy ORDER BY time" + case "dbName": + query = query + " GROUP BY time, dbName as groupBy ORDER BY time" + case "dbOperation": + query = query + " GROUP BY time, dbOperation as groupBy ORDER BY time" + case "operation": + query = query + " GROUP BY time, name as groupBy ORDER BY time" + case "msgSystem": + query = query + " GROUP BY time, msgSystem as groupBy ORDER BY time" + case "msgOperation": + query = query + " GROUP BY time, msgOperation as groupBy ORDER BY time" + case "dbSystem": + query = query + " GROUP BY time, dbSystem as groupBy ORDER BY time" + case "component": + query = query + " GROUP BY time, component as groupBy ORDER BY time" + default: + return nil, &model.ApiError{model.ErrorBadData, fmt.Errorf("groupBy type: %s not supported", queryParams.GroupBy)} + } + } else { + query = query + " GROUP BY time ORDER BY time" + } + + err := r.db.Select(&SpanAggregatesDBResponseItems, query, args...) + + zap.S().Info(query) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{model.ErrorExec, fmt.Errorf("Error in processing sql query")} + } + + GetFilteredSpansAggregatesResponse := model.GetFilteredSpansAggregatesResponse{ + Items: map[int64]model.SpanAggregatesResponseItem{}, + } + + for i, _ := range SpanAggregatesDBResponseItems { + + timeObj, _ := time.Parse(time.RFC3339Nano, SpanAggregatesDBResponseItems[i].Time) + SpanAggregatesDBResponseItems[i].Timestamp = int64(timeObj.UnixNano()) + SpanAggregatesDBResponseItems[i].Time = "" + if queryParams.AggregationOption == "rate_per_sec" { + SpanAggregatesDBResponseItems[i].Value = float32(SpanAggregatesDBResponseItems[i].Value) / float32(queryParams.StepSeconds) + } + if responseElement, ok := GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp]; !ok { + if queryParams.GroupBy != "" { + GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{ + Timestamp: SpanAggregatesDBResponseItems[i].Timestamp, + GroupBy: map[string]float32{SpanAggregatesDBResponseItems[i].GroupBy.String: SpanAggregatesDBResponseItems[i].Value}, + } + } else { + GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{ + Timestamp: SpanAggregatesDBResponseItems[i].Timestamp, + Value: SpanAggregatesDBResponseItems[i].Value, + } + } + + } else { + if queryParams.GroupBy != "" { + responseElement.GroupBy[SpanAggregatesDBResponseItems[i].GroupBy.String] = SpanAggregatesDBResponseItems[i].Value + } + GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = responseElement + } + } + + return &GetFilteredSpansAggregatesResponse, nil +} + func (r *ClickHouseReader) SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) { switch ttlParams.Type { diff --git a/pkg/query-service/app/druidReader/reader.go b/pkg/query-service/app/druidReader/reader.go index db6bac273d..bd44ad2670 100644 --- a/pkg/query-service/app/druidReader/reader.go +++ b/pkg/query-service/app/druidReader/reader.go @@ -165,6 +165,22 @@ func (druid *DruidReader) GetTTL(_ context.Context, _ *model.GetTTLParams) (*mod return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support setting ttl configuration")} } +func (druid *DruidReader) GetSpanFilters(_ context.Context, _ *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support getting spanfilters")} +} + +func (druid *DruidReader) GetTagFilters(_ context.Context, _ *model.TagFilterParams) (*[]model.TagFilters, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support getting tagFilters")} +} + +func (druid *DruidReader) GetFilteredSpans(_ context.Context, _ *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support getting FilteredSpans")} +} + +func (druid *DruidReader) GetFilteredSpansAggregates(_ context.Context, _ *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError) { + return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support getting FilteredSpans")} +} + func (druid *DruidReader) GetErrors(_ context.Context, _ *model.GetErrorsParams) (*[]model.Error, *model.ApiError) { return nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("druid does not support get error API")} } diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index f190def948..e64a9ac1d6 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -203,6 +203,10 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) { router.HandleFunc("/api/v1/serviceMapDependencies", aH.serviceMapDependencies).Methods(http.MethodGet) router.HandleFunc("/api/v1/settings/ttl", aH.setTTL).Methods(http.MethodPost) router.HandleFunc("/api/v1/settings/ttl", aH.getTTL).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getSpanFilters", aH.getSpanFilters).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getTagFilters", aH.getTagFilters).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getFilteredSpans", aH.getFilteredSpans).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getFilteredSpans/aggregates", aH.getFilteredSpanAggregates).Methods(http.MethodGet) router.HandleFunc("/api/v1/errors", aH.getErrors).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorWithId", aH.getErrorForId).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorWithType", aH.getErrorForType).Methods(http.MethodGet) @@ -965,6 +969,70 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) { aH.writeJSON(w, r, result) } +func (aH *APIHandler) getSpanFilters(w http.ResponseWriter, r *http.Request) { + + query, err := parseSpanFilterRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + + result, apiErr := (*aH.reader).GetSpanFilters(context.Background(), query) + + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) +} + +func (aH *APIHandler) getFilteredSpans(w http.ResponseWriter, r *http.Request) { + + query, err := parseFilteredSpansRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + + result, apiErr := (*aH.reader).GetFilteredSpans(context.Background(), query) + + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) +} + +func (aH *APIHandler) getFilteredSpanAggregates(w http.ResponseWriter, r *http.Request) { + + query, err := parseFilteredSpanAggregatesRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + + result, apiErr := (*aH.reader).GetFilteredSpansAggregates(context.Background(), query) + + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) +} + +func (aH *APIHandler) getTagFilters(w http.ResponseWriter, r *http.Request) { + + query, err := parseTagFilterRequest(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + + result, apiErr := (*aH.reader).GetTagFilters(context.Background(), query) + + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, result) +} + func (aH *APIHandler) setTTL(w http.ResponseWriter, r *http.Request) { ttlParams, err := parseDuration(r) if aH.handleError(w, err, http.StatusBadRequest) { diff --git a/pkg/query-service/app/interface.go b/pkg/query-service/app/interface.go index 01cebe5173..de5bdec35b 100644 --- a/pkg/query-service/app/interface.go +++ b/pkg/query-service/app/interface.go @@ -37,6 +37,11 @@ type Reader interface { GetServicesList(ctx context.Context) (*[]string, error) GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) GetTTL(ctx context.Context, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) + GetSpanFilters(ctx context.Context, query *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError) + GetTagFilters(ctx context.Context, query *model.TagFilterParams) (*[]model.TagFilters, *model.ApiError) + GetFilteredSpans(ctx context.Context, query *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError) + GetFilteredSpansAggregates(ctx context.Context, query *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError) + GetErrors(ctx context.Context, params *model.GetErrorsParams) (*[]model.Error, *model.ApiError) GetErrorForId(ctx context.Context, params *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) GetErrorForType(ctx context.Context, params *model.GetErrorParams) (*model.ErrorWithSpan, *model.ApiError) diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 864b5e9b29..39113823b9 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -19,9 +19,11 @@ import ( var allowedDimesions = []string{"calls", "duration"} +var allowedFunctions = []string{"count", "ratePerSec", "sum", "avg", "min", "max", "p50", "p90", "p95", "p99"} + var allowedAggregations = map[string][]string{ "calls": {"count", "rate_per_sec"}, - "duration": {"avg", "p50", "p95", "p99"}, + "duration": {"avg", "p50", "p95", "p90", "p99", "min", "max", "sum"}, } func parseUser(r *http.Request) (*model.User, error) { @@ -480,6 +482,284 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) { return params, nil } +func parseSpanFilterRequest(r *http.Request) (*model.SpanFilterParams, error) { + + startTime, err := parseTime("start", r) + if err != nil { + return nil, err + } + endTime, err := parseTimeMinusBuffer("end", r) + if err != nil { + return nil, err + } + + params := &model.SpanFilterParams{ + Start: startTime, + End: endTime, + ServiceName: []string{}, + HttpRoute: []string{}, + HttpCode: []string{}, + HttpUrl: []string{}, + HttpHost: []string{}, + HttpMethod: []string{}, + Component: []string{}, + Status: []string{}, + Operation: []string{}, + GetFilters: []string{}, + } + + params.ServiceName = fetchArrayValues("serviceName", r) + + params.Status = fetchArrayValues("status", r) + + params.Operation = fetchArrayValues("operation", r) + + params.HttpCode = fetchArrayValues("httpCode", r) + + params.HttpUrl = fetchArrayValues("httpUrl", r) + + params.HttpHost = fetchArrayValues("httpHost", r) + + params.HttpRoute = fetchArrayValues("httpRoute", r) + + params.HttpMethod = fetchArrayValues("httpMethod", r) + + params.Component = fetchArrayValues("component", r) + + params.GetFilters = fetchArrayValues("getFilters", r) + + minDuration, err := parseTimestamp("minDuration", r) + if err == nil { + params.MinDuration = *minDuration + } + maxDuration, err := parseTimestamp("maxDuration", r) + if err == nil { + params.MaxDuration = *maxDuration + } + + return params, nil +} + +func parseFilteredSpansRequest(r *http.Request) (*model.GetFilteredSpansParams, error) { + + startTime, err := parseTime("start", r) + if err != nil { + return nil, err + } + endTime, err := parseTimeMinusBuffer("end", r) + if err != nil { + return nil, err + } + + params := &model.GetFilteredSpansParams{ + Start: startTime, + End: endTime, + ServiceName: []string{}, + HttpRoute: []string{}, + HttpCode: []string{}, + HttpUrl: []string{}, + HttpHost: []string{}, + HttpMethod: []string{}, + Component: []string{}, + Status: []string{}, + Operation: []string{}, + Limit: 100, + Order: "descending", + } + + params.ServiceName = fetchArrayValues("serviceName", r) + + params.Status = fetchArrayValues("status", r) + + params.Operation = fetchArrayValues("operation", r) + + params.HttpCode = fetchArrayValues("httpCode", r) + + params.HttpUrl = fetchArrayValues("httpUrl", r) + + params.HttpHost = fetchArrayValues("httpHost", r) + + params.HttpRoute = fetchArrayValues("httpRoute", r) + + params.HttpMethod = fetchArrayValues("httpMethod", r) + + params.Component = fetchArrayValues("component", r) + + limitStr := r.URL.Query().Get("limit") + if len(limitStr) != 0 { + limit, err := strconv.ParseInt(limitStr, 10, 64) + if err != nil { + return nil, errors.New("Limit param is not in correct format") + } + params.Limit = limit + } else { + params.Limit = 100 + } + + offsetStr := r.URL.Query().Get("offset") + if len(offsetStr) != 0 { + offset, err := strconv.ParseInt(offsetStr, 10, 64) + if err != nil { + return nil, errors.New("Offset param is not in correct format") + } + params.Offset = offset + } + + tags, err := parseTagsV2("tags", r) + if err != nil { + return nil, err + } + if len(*tags) != 0 { + params.Tags = *tags + } + + minDuration, err := parseTimestamp("minDuration", r) + if err == nil { + params.MinDuration = *minDuration + } + maxDuration, err := parseTimestamp("maxDuration", r) + if err == nil { + params.MaxDuration = *maxDuration + } + + kind := r.URL.Query().Get("kind") + if len(kind) != 0 { + params.Kind = kind + } + + return params, nil +} + +func parseFilteredSpanAggregatesRequest(r *http.Request) (*model.GetFilteredSpanAggregatesParams, error) { + + startTime, err := parseTime("start", r) + if err != nil { + return nil, err + } + + endTime, err := parseTimeMinusBuffer("end", r) + if err != nil { + return nil, err + } + + stepStr := r.URL.Query().Get("step") + if len(stepStr) == 0 { + return nil, errors.New("step param missing in query") + } + + stepInt, err := strconv.Atoi(stepStr) + if err != nil { + return nil, errors.New("step param is not in correct format") + } + + function := r.URL.Query().Get("function") + if len(function) == 0 { + return nil, errors.New("function param missing in query") + } else { + if !DoesExistInSlice(function, allowedFunctions) { + return nil, errors.New(fmt.Sprintf("given function: %s is not allowed in query", function)) + } + } + + var dimension, aggregationOption string + + switch function { + case "count": + dimension = "calls" + aggregationOption = "count" + case "ratePerSec": + dimension = "calls" + aggregationOption = "rate_per_sec" + case "avg": + dimension = "duration" + aggregationOption = "avg" + case "sum": + dimension = "duration" + aggregationOption = "sum" + case "p50": + dimension = "duration" + aggregationOption = "p50" + case "p90": + dimension = "duration" + aggregationOption = "p90" + case "p95": + dimension = "duration" + aggregationOption = "p95" + case "p99": + dimension = "duration" + aggregationOption = "p99" + case "min": + dimension = "duration" + aggregationOption = "min" + case "max": + dimension = "duration" + aggregationOption = "max" + } + + params := &model.GetFilteredSpanAggregatesParams{ + Start: startTime, + End: endTime, + ServiceName: []string{}, + HttpRoute: []string{}, + HttpCode: []string{}, + HttpUrl: []string{}, + HttpHost: []string{}, + HttpMethod: []string{}, + Component: []string{}, + Status: []string{}, + Operation: []string{}, + StepSeconds: stepInt, + Dimension: dimension, + AggregationOption: aggregationOption, + } + + params.ServiceName = fetchArrayValues("serviceName", r) + + params.Status = fetchArrayValues("status", r) + + params.Operation = fetchArrayValues("operation", r) + + params.HttpCode = fetchArrayValues("httpCode", r) + + params.HttpUrl = fetchArrayValues("httpUrl", r) + + params.HttpHost = fetchArrayValues("httpHost", r) + + params.HttpRoute = fetchArrayValues("httpRoute", r) + + params.HttpMethod = fetchArrayValues("httpMethod", r) + + params.Component = fetchArrayValues("component", r) + + tags, err := parseTagsV2("tags", r) + if err != nil { + return nil, err + } + if len(*tags) != 0 { + params.Tags = *tags + } + + minDuration, err := parseTimestamp("minDuration", r) + if err == nil { + params.MinDuration = *minDuration + } + maxDuration, err := parseTimestamp("maxDuration", r) + if err == nil { + params.MaxDuration = *maxDuration + } + + kind := r.URL.Query().Get("kind") + if len(kind) != 0 { + params.Kind = kind + } + groupBy := r.URL.Query().Get("groupBy") + if len(groupBy) != 0 { + params.GroupBy = groupBy + } + + return params, nil +} + func parseErrorRequest(r *http.Request) (*model.GetErrorParams, error) { params := &model.GetErrorParams{} @@ -502,6 +782,60 @@ func parseErrorRequest(r *http.Request) (*model.GetErrorParams, error) { return params, nil } +func parseTagFilterRequest(r *http.Request) (*model.TagFilterParams, error) { + + startTime, err := parseTime("start", r) + if err != nil { + return nil, err + } + endTime, err := parseTimeMinusBuffer("end", r) + if err != nil { + return nil, err + } + + params := &model.TagFilterParams{ + Start: startTime, + End: endTime, + ServiceName: []string{}, + HttpRoute: []string{}, + HttpCode: []string{}, + HttpUrl: []string{}, + HttpHost: []string{}, + HttpMethod: []string{}, + Component: []string{}, + Status: []string{}, + Operation: []string{}, + } + + params.ServiceName = fetchArrayValues("serviceName", r) + + params.Status = fetchArrayValues("status", r) + + params.Operation = fetchArrayValues("operation", r) + + params.HttpCode = fetchArrayValues("httpCode", r) + + params.HttpUrl = fetchArrayValues("httpUrl", r) + + params.HttpHost = fetchArrayValues("httpHost", r) + + params.HttpRoute = fetchArrayValues("httpRoute", r) + + params.HttpMethod = fetchArrayValues("httpMethod", r) + + params.Component = fetchArrayValues("component", r) + + minDuration, err := parseTimestamp("minDuration", r) + if err == nil { + params.MinDuration = *minDuration + } + maxDuration, err := parseTimestamp("maxDuration", r) + if err == nil { + params.MaxDuration = *maxDuration + } + + return params, nil +} func parseErrorsRequest(r *http.Request) (*model.GetErrorsParams, error) { startTime, err := parseTime("start", r) @@ -521,6 +855,19 @@ func parseErrorsRequest(r *http.Request) (*model.GetErrorsParams, error) { return params, nil } +func fetchArrayValues(param string, r *http.Request) []string { + valueStr := r.URL.Query().Get(param) + var values []string + if len(valueStr) == 0 { + return values + } + err := json.Unmarshal([]byte(valueStr), &values) + if err != nil { + zap.S().Error("Error in parsing service params", zap.Error(err)) + } + return values +} + func parseTags(param string, r *http.Request) (*[]model.TagQuery, error) { tags := new([]model.TagQuery) @@ -539,6 +886,24 @@ func parseTags(param string, r *http.Request) (*[]model.TagQuery, error) { return tags, nil } +func parseTagsV2(param string, r *http.Request) (*[]model.TagQueryV2, error) { + + tags := new([]model.TagQueryV2) + tagsStr := r.URL.Query().Get(param) + + if len(tagsStr) == 0 { + return tags, nil + } + err := json.Unmarshal([]byte(tagsStr), tags) + if err != nil { + zap.S().Error("Error in parsig tags", zap.Error(err)) + return nil, fmt.Errorf("error in parsing %s ", param) + } + // zap.S().Info("Tags: ", *tags) + + return tags, nil +} + func parseApplicationPercentileRequest(r *http.Request) (*model.ApplicationPercentileParams, error) { startTime, err := parseTime("start", r) diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 9b22a1a0fc..4732bbbb6b 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -78,6 +78,11 @@ type TagQuery struct { Operator string } +type TagQueryV2 struct { + Key string + Values []string + Operator string +} type SpanSearchAggregatesParams struct { ServiceName string OperationName string @@ -111,6 +116,82 @@ type SpanSearchParams struct { Tags []TagQuery } +type GetFilteredSpansParams struct { + ServiceName []string + Operation []string + Kind string + Status []string + HttpRoute []string + HttpCode []string + HttpUrl []string + HttpHost []string + HttpMethod []string + Component []string + Start *time.Time + End *time.Time + MinDuration string + MaxDuration string + Limit int64 + Order string + Offset int64 + Tags []TagQueryV2 +} + +type GetFilteredSpanAggregatesParams struct { + ServiceName []string + Operation []string + Kind string + Status []string + HttpRoute []string + HttpCode []string + HttpUrl []string + HttpHost []string + HttpMethod []string + Component []string + MinDuration string + MaxDuration string + Tags []TagQueryV2 + Start *time.Time + End *time.Time + StepSeconds int + Dimension string + AggregationOption string + GroupBy string + Function string +} + +type SpanFilterParams struct { + Status []string + ServiceName []string + HttpRoute []string + HttpCode []string + HttpUrl []string + HttpHost []string + HttpMethod []string + Component []string + Operation []string + GetFilters []string + MinDuration string + MaxDuration string + Start *time.Time + End *time.Time +} + +type TagFilterParams struct { + Status []string + ServiceName []string + HttpRoute []string + HttpCode []string + HttpUrl []string + HttpHost []string + HttpMethod []string + Component []string + Operation []string + MinDuration string + MaxDuration string + Start *time.Time + End *time.Time +} type TTLParams struct { Type string Duration string diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index d6f6e1fcc7..d3e15490f1 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -1,6 +1,7 @@ package model import ( + "database/sql" "encoding/json" "fmt" "strconv" @@ -128,6 +129,22 @@ type SearchSpansResult struct { Events [][]interface{} `json:"events"` } +type GetFilterSpansResponseItem struct { + Timestamp string `db:"timestamp" json:"timestamp"` + SpanID string `db:"spanID" json:"spanID"` + TraceID string `db:"traceID" json:"traceID"` + ServiceName string `db:"serviceName" json:"serviceName"` + Operation string `db:"name" json:"operation"` + DurationNano int64 `db:"durationNano" json:"durationNano"` + HttpCode string `db:"httpCode" json:"httpCode"` + HttpMethod string `db:"httpMethod" json:"httpMethod"` +} + +type GetFilterSpansResponse struct { + Spans []GetFilterSpansResponseItem `json:"spans"` + TotalSpans int `json:"totalSpans"` +} + type TraceResult struct { Data []interface{} `json:"data" db:"data"` Total int `json:"total" db:"total"` @@ -247,6 +264,9 @@ type TagItem struct { TagCount int `json:"tagCount" db:"tagCount"` } +type TagFilters struct { + TagKeys string `json:"tagKeys" db:"tagKeys"` +} type ServiceMapDependencyResponseItem struct { Parent string `json:"parent,omitempty" db:"parent,omitempty"` Child string `json:"child,omitempty" db:"child,omitempty"` @@ -259,6 +279,21 @@ type SpanSearchAggregatesResponseItem struct { Value float32 `json:"value,omitempty" db:"value"` } +type GetFilteredSpansAggregatesResponse struct { + Items map[int64]SpanAggregatesResponseItem `json:"items"` +} +type SpanAggregatesResponseItem struct { + Timestamp int64 `json:"timestamp,omitempty" ` + Value float32 `json:"value,omitempty"` + GroupBy map[string]float32 `json:"groupBy,omitempty"` +} +type SpanAggregatesDBResponseItem struct { + Timestamp int64 `json:"timestamp,omitempty" db:"timestamp" ` + Time string `json:"time,omitempty" db:"time"` + Value float32 `json:"value,omitempty" db:"value"` + GroupBy sql.NullString `json:"groupBy,omitempty" db:"groupBy"` +} + type SetTTLResponseItem struct { Message string `json:"message"` } @@ -272,6 +307,71 @@ type GetTTLResponseItem struct { TracesTime int `json:"traces_ttl_duration_hrs"` } +type DBResponseMinMaxDuration struct { + MinDuration int `db:"min(durationNano)"` + MaxDuration int `db:"max(durationNano)"` +} + +type DBResponseServiceName struct { + ServiceName string `db:"serviceName"` + Count int `db:"count"` +} + +type DBResponseHttpCode struct { + HttpCode string `db:"httpCode"` + Count int `db:"count"` +} + +type DBResponseHttpRoute struct { + HttpRoute string `db:"httpRoute"` + Count int `db:"count"` +} + +type DBResponseHttpUrl struct { + HttpUrl string `db:"httpUrl"` + Count int `db:"count"` +} + +type DBResponseHttpMethod struct { + HttpMethod string `db:"httpMethod"` + Count int `db:"count"` +} + +type DBResponseHttpHost struct { + HttpHost string `db:"httpHost"` + Count int `db:"count"` +} + +type DBResponseOperation struct { + Operation string `db:"name"` + Count int `db:"count"` +} + +type DBResponseComponent struct { + Component sql.NullString `db:"component"` + Count int `db:"count"` +} + +type DBResponseErrors struct { + NumErrors int `db:"numErrors"` +} + +type DBResponseTotal struct { + NumTotal int `db:"numTotal"` +} + +type SpanFiltersResponse struct { + ServiceName map[string]int `json:"serviceName"` + Status map[string]int `json:"status"` + Duration map[string]int `json:"duration"` + Operation map[string]int `json:"operation"` + HttpCode map[string]int `json:"httpCode"` + HttpUrl map[string]int `json:"httpUrl"` + HttpMethod map[string]int `json:"httpMethod"` + HttpRoute map[string]int `json:"httpRoute"` + HttpHost map[string]int `json:"httpHost"` + Component map[string]int `json:"component"` +} type Error struct { ExceptionType string `json:"exceptionType" db:"exceptionType"` ExceptionMsg string `json:"exceptionMessage" db:"exceptionMessage"` From be5d1f0090564a545abf65ebc20916a36a2fb02b Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Wed, 26 Jan 2022 21:40:44 +0530 Subject: [PATCH 30/81] feat: adding disable and anonymous functionality to telemetry collected (#637) * chore: changed lib * chore: changed lib * chore: changed lib * chore: changed lib * chore: changes in params * chore: changes in params * chore: moving telemetry to a separate package * feat: enabling telemetry via env var * chore: removing posthog api_key * feat: send heartbeat every 6hr * feat: enabled version in application * feat: added getter and setter apis and struct for user preferences * feat: added version to properties to event * feat: added apis to set and get user preferences and get version * chore: refactored get and set userPreferences apis to dao pattern * chore: added checks for telemetry enabled and anonymous during initialization * chore: changed anonymous user functionality * chore: sanitization * chore: added uuid for userPreferences to send when user is anonymous --- .../clickhouse-setup/docker-compose.yaml | 2 +- .../query-service/templates/statefulset.yaml | 2 + .../signoz-charts/query-service/values.yaml | 1 - deploy/kubernetes/platform/values.yaml | 1 + pkg/query-service/app/http_handler.go | 104 ++++++++---- pkg/query-service/app/parser.go | 12 ++ pkg/query-service/app/server.go | 59 ++++--- pkg/query-service/config/prometheus.yml | 2 +- pkg/query-service/constants/constants.go | 12 ++ pkg/query-service/dao/factory.go | 26 +++ pkg/query-service/dao/interfaces/interface.go | 5 + .../dao/interfaces/userPreference.go | 12 ++ pkg/query-service/dao/sqlite/connection.go | 70 ++++++++ .../dao/sqlite/userPreferenceImpl.go | 91 ++++++++++ pkg/query-service/go.mod | 11 +- pkg/query-service/go.sum | 37 ++--- pkg/query-service/model/userPreferences.go | 27 +++ pkg/query-service/telemetry/ignoredPaths.go | 9 + pkg/query-service/telemetry/telemetry.go | 155 ++++++++++++++++++ pkg/query-service/version/version.go | 24 +++ 20 files changed, 571 insertions(+), 91 deletions(-) create mode 100644 pkg/query-service/dao/factory.go create mode 100644 pkg/query-service/dao/interfaces/interface.go create mode 100644 pkg/query-service/dao/interfaces/userPreference.go create mode 100644 pkg/query-service/dao/sqlite/connection.go create mode 100644 pkg/query-service/dao/sqlite/userPreferenceImpl.go create mode 100644 pkg/query-service/model/userPreferences.go create mode 100644 pkg/query-service/telemetry/ignoredPaths.go create mode 100644 pkg/query-service/telemetry/telemetry.go create mode 100644 pkg/query-service/version/version.go diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 16851112da..6261cd66f3 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -45,8 +45,8 @@ services: environment: - ClickHouseUrl=tcp://clickhouse:9000 - STORAGE=clickhouse - - POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w - GODEBUG=netdns=go + - TELEMETRY_ENABLED=true depends_on: clickhouse: diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml index 179bb4d72f..7fbad5057d 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.configVars.ClickHouseUrl}} - name: GODEBUG value: netdns=go + - name: TELEMETRY_ENABLED + value: {{ .Values.configVars.TELEMETRY_ENABLED}} # livenessProbe: # httpGet: # path: / diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml index 82d438b51b..038d5802a5 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml @@ -19,7 +19,6 @@ configVars: DruidDatasource: flattened_spans ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password STORAGE: clickhouse - POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w" serviceAccount: diff --git a/deploy/kubernetes/platform/values.yaml b/deploy/kubernetes/platform/values.yaml index 14c369e09a..e37ea7a5d9 100644 --- a/deploy/kubernetes/platform/values.yaml +++ b/deploy/kubernetes/platform/values.yaml @@ -6,6 +6,7 @@ query-service: configVars: ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password STORAGE: clickhouse + TELEMETRY_ENABLED: true cloud: aws diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index e64a9ac1d6..e8a7ebca7f 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -10,10 +10,12 @@ import ( "github.com/gorilla/mux" jsoniter "github.com/json-iterator/go" _ "github.com/mattn/go-sqlite3" - "github.com/posthog/posthog-go" "github.com/prometheus/prometheus/promql" "go.signoz.io/query-service/app/dashboards" + "go.signoz.io/query-service/dao/interfaces" "go.signoz.io/query-service/model" + "go.signoz.io/query-service/telemetry" + "go.signoz.io/query-service/version" "go.uber.org/zap" ) @@ -33,28 +35,26 @@ func NewRouter() *mux.Router { type APIHandler struct { // queryService *querysvc.QueryService // queryParser queryParser - basePath string - apiPrefix string - reader *Reader - pc *posthog.Client - distinctId string - ready func(http.HandlerFunc) http.HandlerFunc + basePath string + apiPrefix string + reader *Reader + relationalDB *interfaces.ModelDao + ready func(http.HandlerFunc) http.HandlerFunc } // NewAPIHandler returns an APIHandler -func NewAPIHandler(reader *Reader, pc *posthog.Client, distinctId string) (*APIHandler, error) { +func NewAPIHandler(reader *Reader, relationalDB *interfaces.ModelDao) (*APIHandler, error) { aH := &APIHandler{ - reader: reader, - pc: pc, - distinctId: distinctId, + reader: reader, + relationalDB: relationalDB, } aH.ready = aH.testReady - errReadingDashboards := dashboards.LoadDashboardFiles() - if errReadingDashboards != nil { - return nil, errReadingDashboards - } + dashboards.LoadDashboardFiles() + // if errReadingDashboards != nil { + // return nil, errReadingDashboards + // } return aH, nil } @@ -184,6 +184,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) { router.HandleFunc("/api/v1/dashboards/{uuid}", aH.deleteDashboard).Methods(http.MethodDelete) router.HandleFunc("/api/v1/user", aH.user).Methods(http.MethodPost) + router.HandleFunc("/api/v1/feedback", aH.submitFeedback).Methods(http.MethodPost) // router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet) router.HandleFunc("/api/v1/services", aH.getServices).Methods(http.MethodGet) @@ -203,10 +204,16 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) { router.HandleFunc("/api/v1/serviceMapDependencies", aH.serviceMapDependencies).Methods(http.MethodGet) router.HandleFunc("/api/v1/settings/ttl", aH.setTTL).Methods(http.MethodPost) router.HandleFunc("/api/v1/settings/ttl", aH.getTTL).Methods(http.MethodGet) + + router.HandleFunc("/api/v1/userPreferences", aH.setUserPreferences).Methods(http.MethodPost) + router.HandleFunc("/api/v1/userPreferences", aH.getUserPreferences).Methods(http.MethodGet) + router.HandleFunc("/api/v1/version", aH.getVersion).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getSpanFilters", aH.getSpanFilters).Methods(http.MethodGet) router.HandleFunc("/api/v1/getTagFilters", aH.getTagFilters).Methods(http.MethodGet) router.HandleFunc("/api/v1/getFilteredSpans", aH.getFilteredSpans).Methods(http.MethodGet) router.HandleFunc("/api/v1/getFilteredSpans/aggregates", aH.getFilteredSpanAggregates).Methods(http.MethodGet) + router.HandleFunc("/api/v1/errors", aH.getErrors).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorWithId", aH.getErrorForId).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorWithType", aH.getErrorForType).Methods(http.MethodGet) @@ -656,11 +663,11 @@ func (aH *APIHandler) submitFeedback(w http.ResponseWriter, r *http.Request) { email := postData["email"] - (*aH.pc).Enqueue(posthog.Capture{ - DistinctId: distinctId, - Event: "InProduct Feeback Submitted", - Properties: posthog.NewProperties().Set("email", email).Set("message", message), - }) + data := map[string]interface{}{ + "email": email, + "message": message, + } + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_INPRODUCT_FEEDBACK, data) } @@ -673,11 +680,12 @@ func (aH *APIHandler) user(w http.ResponseWriter, r *http.Request) { } } - (*aH.pc).Enqueue(posthog.Identify{ - DistinctId: aH.distinctId, - Properties: posthog.NewProperties(). - Set("email", user.Email).Set("name", user.Name), - }) + telemetry.GetInstance().IdentifyUser(user) + data := map[string]interface{}{ + "name": user.Name, + "email": user.Email, + } + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_USER, data) } @@ -852,13 +860,12 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) { if aH.handleError(w, err, http.StatusBadRequest) { return } - // if len(*result) != 4 { - (*aH.pc).Enqueue(posthog.Capture{ - DistinctId: distinctId, - Event: "Different Number of Services", - Properties: posthog.NewProperties().Set("number", len(*result)), - }) - // } + + data := map[string]interface{}{ + "number": len(*result), + } + + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data) aH.writeJSON(w, r, result) } @@ -1062,6 +1069,39 @@ func (aH *APIHandler) getTTL(w http.ResponseWriter, r *http.Request) { aH.writeJSON(w, r, result) } +func (aH *APIHandler) getUserPreferences(w http.ResponseWriter, r *http.Request) { + + result, apiError := (*aH.relationalDB).FetchUserPreference(context.Background()) + if apiError != nil { + aH.respondError(w, apiError, "Error from Fetch Dao") + return + } + + aH.writeJSON(w, r, result) +} + +func (aH *APIHandler) setUserPreferences(w http.ResponseWriter, r *http.Request) { + userParams, err := parseUserPreferences(r) + if aH.handleError(w, err, http.StatusBadRequest) { + return + } + + apiErr := (*aH.relationalDB).UpdateUserPreferece(context.Background(), userParams) + if apiErr != nil && aH.handleError(w, apiErr.Err, http.StatusInternalServerError) { + return + } + + aH.writeJSON(w, r, map[string]string{"data": "user preferences set successfully"}) + +} + +func (aH *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) { + + version := version.GetVersion() + + aH.writeJSON(w, r, map[string]string{"version": version}) +} + // func (aH *APIHandler) getApplicationPercentiles(w http.ResponseWriter, r *http.Request) { // // vars := mux.Vars(r) diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 39113823b9..1c02d17652 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -1045,3 +1045,15 @@ func parseGetTTL(r *http.Request) (*model.GetTTLParams, error) { return &model.GetTTLParams{Type: typeTTL, GetAllTTL: getAllTTL}, nil } + +func parseUserPreferences(r *http.Request) (*model.UserPreferences, error) { + + var userPreferences model.UserPreferences + err := json.NewDecoder(r.Body).Decode(&userPreferences) + if err != nil { + return nil, err + } + + return &userPreferences, nil + +} diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 8e2b42b28a..62e44fc522 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -7,16 +7,18 @@ import ( "os" "time" - "github.com/google/uuid" "github.com/gorilla/handlers" "github.com/gorilla/mux" - "github.com/posthog/posthog-go" + "github.com/rs/cors" "github.com/soheilhy/cmux" "go.signoz.io/query-service/app/clickhouseReader" "go.signoz.io/query-service/app/dashboards" "go.signoz.io/query-service/app/druidReader" + "go.signoz.io/query-service/constants" + "go.signoz.io/query-service/dao" "go.signoz.io/query-service/healthcheck" + "go.signoz.io/query-service/telemetry" "go.signoz.io/query-service/utils" "go.uber.org/zap" ) @@ -66,38 +68,35 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { // if err != nil { // return nil, err // } - httpServer, err := createHTTPServer() - if err != nil { - return nil, err - } - - return &Server{ + s := &Server{ // logger: logger, // querySvc: querySvc, // queryOptions: options, // tracer: tracer, // grpcServer: grpcServer, serverOptions: serverOptions, - httpServer: httpServer, separatePorts: true, // separatePorts: grpcPort != httpPort, unavailableChannel: make(chan healthcheck.Status), - }, nil -} + } + httpServer, err := s.createHTTPServer() -var posthogClient posthog.Client -var distinctId string - -func createHTTPServer() (*http.Server, error) { - - posthogClient = posthog.New("H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w") - distinctId = uuid.New().String() - - localDB, err := dashboards.InitDB("./signoz.db") if err != nil { return nil, err } + s.httpServer = httpServer + + return s, nil +} + +func (s *Server) createHTTPServer() (*http.Server, error) { + + localDB, err := dashboards.InitDB(constants.RELATIONAL_DATASOURCE_PATH) + if err != nil { + return nil, err + } + localDB.SetMaxOpenConns(10) var reader Reader @@ -114,14 +113,19 @@ func createHTTPServer() (*http.Server, error) { return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage) } - apiHandler, err := NewAPIHandler(&reader, &posthogClient, distinctId) + relationalDB, err := dao.FactoryDao("sqlite") + if err != nil { + return nil, err + } + + apiHandler, err := NewAPIHandler(&reader, relationalDB) if err != nil { return nil, err } r := NewRouter() - r.Use(analyticsMiddleware) + r.Use(s.analyticsMiddleware) r.Use(loggingMiddleware) apiHandler.RegisterRoutes(r) @@ -152,15 +156,16 @@ func loggingMiddleware(next http.Handler) http.Handler { }) } -func analyticsMiddleware(next http.Handler) http.Handler { +func (s *Server) analyticsMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { route := mux.CurrentRoute(r) path, _ := route.GetPathTemplate() - posthogClient.Enqueue(posthog.Capture{ - DistinctId: distinctId, - Event: path, - }) + data := map[string]interface{}{"path": path} + + if _, ok := telemetry.IgnoredPaths()[path]; !ok { + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data) + } next.ServeHTTP(w, r) }) diff --git a/pkg/query-service/config/prometheus.yml b/pkg/query-service/config/prometheus.yml index c4834f9849..c515a46662 100644 --- a/pkg/query-service/config/prometheus.yml +++ b/pkg/query-service/config/prometheus.yml @@ -23,4 +23,4 @@ scrape_configs: remote_read: - - url: tcp://3.135.248.251:9001/?database=signoz_metrics + - url: tcp://localhost:9001/?database=signoz_metrics diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 1a77b4053b..422396bf90 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -2,14 +2,26 @@ package constants import ( "os" + "strconv" ) const HTTPHostPort = "0.0.0.0:8080" var DruidClientUrl = os.Getenv("DruidClientUrl") var DruidDatasource = os.Getenv("DruidDatasource") +var DEFAULT_TELEMETRY_ANONYMOUS = false + +func IsTelemetryEnabled() bool { + isTelemetryEnabledStr := os.Getenv("TELEMETRY_ENABLED") + isTelemetryEnabledBool, err := strconv.ParseBool(isTelemetryEnabledStr) + if err != nil { + return true + } + return isTelemetryEnabledBool +} const TraceTTL = "traces" const MetricsTTL = "metrics" const ALERTMANAGER_API_PREFIX = "http://alertmanager:9093/api/" +const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db" diff --git a/pkg/query-service/dao/factory.go b/pkg/query-service/dao/factory.go new file mode 100644 index 0000000000..92f2b7e534 --- /dev/null +++ b/pkg/query-service/dao/factory.go @@ -0,0 +1,26 @@ +package dao + +import ( + "fmt" + + "go.signoz.io/query-service/constants" + "go.signoz.io/query-service/dao/interfaces" + "go.signoz.io/query-service/dao/sqlite" +) + +func FactoryDao(engine string) (*interfaces.ModelDao, error) { + var i interfaces.ModelDao + var err error + + switch engine { + case "sqlite": + i, err = sqlite.InitDB(constants.RELATIONAL_DATASOURCE_PATH) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("RelationalDB type: %s is not supported in query service", engine) + } + + return &i, nil +} diff --git a/pkg/query-service/dao/interfaces/interface.go b/pkg/query-service/dao/interfaces/interface.go new file mode 100644 index 0000000000..e7d043fbf7 --- /dev/null +++ b/pkg/query-service/dao/interfaces/interface.go @@ -0,0 +1,5 @@ +package interfaces + +type ModelDao interface { + UserPreferenceDao +} diff --git a/pkg/query-service/dao/interfaces/userPreference.go b/pkg/query-service/dao/interfaces/userPreference.go new file mode 100644 index 0000000000..c4770ae2de --- /dev/null +++ b/pkg/query-service/dao/interfaces/userPreference.go @@ -0,0 +1,12 @@ +package interfaces + +import ( + "context" + + "go.signoz.io/query-service/model" +) + +type UserPreferenceDao interface { + UpdateUserPreferece(ctx context.Context, userPreferences *model.UserPreferences) *model.ApiError + FetchUserPreference(ctx context.Context) (*model.UserPreferences, *model.ApiError) +} diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go new file mode 100644 index 0000000000..8237cc21ee --- /dev/null +++ b/pkg/query-service/dao/sqlite/connection.go @@ -0,0 +1,70 @@ +package sqlite + +import ( + "context" + "fmt" + + "github.com/jmoiron/sqlx" + "go.signoz.io/query-service/constants" + "go.signoz.io/query-service/telemetry" +) + +type ModelDaoSqlite struct { + db *sqlx.DB +} + +// InitDB sets up setting up the connection pool global variable. +func InitDB(dataSourceName string) (*ModelDaoSqlite, error) { + var err error + + db, err := sqlx.Open("sqlite3", dataSourceName) + if err != nil { + return nil, err + } + db.SetMaxOpenConns(10) + + table_schema := `CREATE TABLE IF NOT EXISTS user_preferences ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + uuid TEXT NOT NULL, + isAnonymous INTEGER NOT NULL DEFAULT 0 CHECK(isAnonymous IN (0,1)), + hasOptedUpdates INTEGER NOT NULL DEFAULT 1 CHECK(hasOptedUpdates IN (0,1)) + );` + + _, err = db.Exec(table_schema) + if err != nil { + return nil, fmt.Errorf("Error in creating user_preferences table: ", err.Error()) + } + + mds := &ModelDaoSqlite{db: db} + + err = mds.initializeUserPreferences() + if err != nil { + return nil, err + } + return mds, nil + +} +func (mds *ModelDaoSqlite) initializeUserPreferences() error { + + // set anonymous setting as default in case of any failures to fetch UserPreference in below section + telemetry.GetInstance().SetTelemetryAnonymous(constants.DEFAULT_TELEMETRY_ANONYMOUS) + + ctx := context.Background() + userPreference, apiError := mds.FetchUserPreference(ctx) + + if apiError != nil { + return apiError.Err + } + if userPreference == nil { + userPreference, apiError = mds.CreateDefaultUserPreference(ctx) + } + if apiError != nil { + return apiError.Err + } + + // set telemetry fields from userPreferences + telemetry.GetInstance().SetTelemetryAnonymous(userPreference.GetIsAnonymous()) + telemetry.GetInstance().SetDistinctId(userPreference.GetUUID()) + + return nil +} diff --git a/pkg/query-service/dao/sqlite/userPreferenceImpl.go b/pkg/query-service/dao/sqlite/userPreferenceImpl.go new file mode 100644 index 0000000000..618a78ef1d --- /dev/null +++ b/pkg/query-service/dao/sqlite/userPreferenceImpl.go @@ -0,0 +1,91 @@ +package sqlite + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "go.signoz.io/query-service/model" + "go.signoz.io/query-service/telemetry" + "go.uber.org/zap" +) + +func (mds *ModelDaoSqlite) FetchUserPreference(ctx context.Context) (*model.UserPreferences, *model.ApiError) { + + userPreferences := []model.UserPreferences{} + query := fmt.Sprintf("SELECT id, uuid, isAnonymous, hasOptedUpdates FROM user_preferences;") + + err := mds.db.Select(&userPreferences, query) + + if err != nil { + zap.S().Debug("Error in processing sql query: ", err) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + // zap.S().Info(query) + if len(userPreferences) > 1 { + zap.S().Debug("Error in processing sql query: ", fmt.Errorf("more than 1 row in user_preferences found")) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + if len(userPreferences) == 0 { + return nil, nil + } + + return &userPreferences[0], nil + +} + +func (mds *ModelDaoSqlite) UpdateUserPreferece(ctx context.Context, userPreferences *model.UserPreferences) *model.ApiError { + + tx, err := mds.db.Begin() + if err != nil { + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + userPreferencesFound, apiError := mds.FetchUserPreference(ctx) + if apiError != nil { + return apiError + } + + stmt, err := tx.Prepare(`UPDATE user_preferences SET isAnonymous=$1, hasOptedUpdates=$2 WHERE id=$3;`) + defer stmt.Close() + + if err != nil { + zap.S().Errorf("Error in preparing statement for INSERT to user_preferences\n", err) + tx.Rollback() + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + query_result, err := stmt.Exec(userPreferences.GetIsAnonymous(), userPreferences.GetHasOptedUpdate(), userPreferencesFound.GetId()) + if err != nil { + zap.S().Errorf("Error in Executing prepared statement for INSERT to user_preferences\n", err) + tx.Rollback() // return an error too, we may want to wrap them + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + zap.S().Debug(query_result.RowsAffected()) + zap.S().Debug(userPreferences.GetIsAnonymous(), userPreferences.GetHasOptedUpdate(), userPreferencesFound.GetId()) + + err = tx.Commit() + if err != nil { + zap.S().Errorf("Error in commiting transaction for INSERT to user_preferences\n", err) + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + telemetry.GetInstance().SetTelemetryAnonymous(userPreferences.GetIsAnonymous()) + + return nil +} + +func (mds *ModelDaoSqlite) CreateDefaultUserPreference(ctx context.Context) (*model.UserPreferences, *model.ApiError) { + + uuid := uuid.New().String() + _, err := mds.db.ExecContext(ctx, `INSERT INTO user_preferences (uuid, isAnonymous, hasOptedUpdates) VALUES (?, 0, 1);`, uuid) + + if err != nil { + zap.S().Errorf("Error in preparing statement for INSERT to user_preferences\n", err) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return mds.FetchUserPreference(ctx) + +} diff --git a/pkg/query-service/go.mod b/pkg/query-service/go.mod index d64a824ab2..79dfc473bb 100644 --- a/pkg/query-service/go.mod +++ b/pkg/query-service/go.mod @@ -4,14 +4,19 @@ go 1.14 require ( cloud.google.com/go v0.88.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/ClickHouse/clickhouse-go v1.4.5 github.com/Microsoft/go-winio v0.5.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/aws/aws-sdk-go v1.27.0 // indirect + + github.com/beorn7/perks v1.0.1 // indirect + github.com/containerd/containerd v1.4.12 // indirect github.com/dhui/dktest v0.3.4 // indirect github.com/docker/docker v20.10.12+incompatible // indirect + github.com/frankban/quicktest v1.13.0 // indirect github.com/go-kit/log v0.1.0 github.com/golang-migrate/migrate/v4 v4.14.1 @@ -36,7 +41,6 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/oklog/oklog v0.3.2 github.com/oklog/run v1.1.0 // indirect github.com/onsi/gomega v1.14.0 // indirect @@ -45,7 +49,6 @@ require ( github.com/pascaldekloe/goe v0.1.0 // indirect github.com/pierrec/lz4 v2.4.1+incompatible // indirect github.com/pkg/errors v0.9.1 - github.com/posthog/posthog-go v0.0.0-20200525173953-e46dc8e6b89b github.com/prometheus/client_golang v0.9.0-pre1.0.20181001174001-0a8115f42e03 github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 github.com/prometheus/procfs v0.0.8 // indirect @@ -53,9 +56,11 @@ require ( github.com/rs/cors v1.7.0 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/segmentio/backo-go v1.0.0 // indirect github.com/smartystreets/goconvey v1.6.4 github.com/soheilhy/cmux v0.1.4 github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/pflag v1.0.3 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect @@ -68,7 +73,7 @@ require ( google.golang.org/grpc/examples v0.0.0-20210803221256-6ba56c814be7 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect - gotest.tools/v3 v3.0.3 // indirect + gopkg.in/segmentio/analytics-go.v3 v3.1.0 ) diff --git a/pkg/query-service/go.sum b/pkg/query-service/go.sum index 7daf49653f..034d9b992e 100644 --- a/pkg/query-service/go.sum +++ b/pkg/query-service/go.sum @@ -58,7 +58,6 @@ github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhH github.com/ClickHouse/clickhouse-go v1.4.5 h1:FfhyEnv6/BaWldyjgT2k4gDDmeNwJ9C4NbY/MXxJlXk= github.com/ClickHouse/clickhouse-go v1.4.5/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -76,11 +75,13 @@ github.com/aws/aws-sdk-go v1.13.44-0.20180507225419-00862f899353/go.mod h1:ZRmQr github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1 h1:OnJHjoVbY69GG4gclp0ngXfywigLhR6rrgUxmxQRWO4= github.com/beorn7/perks v0.0.0-20160229213445-3ac7bf7a47d1/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -104,16 +105,12 @@ github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68m github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.12 h1:V+SHzYmhng/iju6M5nFrpTTusrhidoxKTwdwLw+u4c4= -github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -123,15 +120,12 @@ github.com/dgrijalva/jwt-go v3.0.1-0.20161101193935-9ed569b5d1ac+incompatible/go github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bits v0.0.0-20160601073636-2ad8d707cc05/go.mod h1:/9UYwwvZuEgp+mQ4960SHWCU1FS+FgdFX+m5ExFByNs= +github.com/dhui/dktest v0.3.3 h1:DBuH/9GFaWbDRa42qsut/hbQu+srAQ0rPWnUoiGX7CA= github.com/dhui/dktest v0.3.3/go.mod h1:EML9sP4sqJELHn4jV7B0TY8oF6077nk83/tz7M56jcQ= -github.com/dhui/dktest v0.3.4 h1:VbUEcaSP+U2/yUr9d2JhSThXYEnDlGabRSHe2rIE46E= -github.com/dhui/dktest v0.3.4/go.mod h1:4m4n6lmXlmVfESth7mzdcv8nBI5mOb5UROPqjM02csU= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8= github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= -github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -177,9 +171,8 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.14.1 h1:qmRd/rNGjM1r3Ve5gHd5ZplytrD02UcItYNxJ3iUHHE= github.com/golang-migrate/migrate/v4 v4.14.1/go.mod h1:l7Ks0Au6fYHuUIxUhQ0rcVX1uLlJg54C/VvW7tvxSz0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -378,7 +371,6 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -421,8 +413,6 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -479,8 +469,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posthog/posthog-go v0.0.0-20200525173953-e46dc8e6b89b h1:a8lLvAV+8OQXnG18ZOV5ctFQY7jLHa3brA9BBhe1SVs= -github.com/posthog/posthog-go v0.0.0-20200525173953-e46dc8e6b89b/go.mod h1:s7IZAf1WuSPTb/R/agnboYa+gDnoKGdqIk7p2aFHDYs= github.com/prometheus/client_golang v0.9.0-pre1.0.20181001174001-0a8115f42e03 h1:hqNopISksxji/N5zEy1xMN7TrnSyVG/LymiwnkXi6/Q= github.com/prometheus/client_golang v0.9.0-pre1.0.20181001174001-0a8115f42e03/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= @@ -507,6 +495,8 @@ github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZj github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/backo-go v1.0.0 h1:kbOAtGJY2DqOR0jfRkYEorx/b18RgtepGtY3+Cpe6qA= +github.com/segmentio/backo-go v1.0.0/go.mod h1:kJ9mm9YmoWSkk+oQ+5Cj8DEoRCX2JT6As4kEtIIOp1M= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -795,7 +785,6 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -825,7 +814,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200806022845-90696ccdc692/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -839,7 +827,6 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -931,7 +918,6 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1004,6 +990,8 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1 gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/segmentio/analytics-go.v3 v3.1.0 h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U= +gopkg.in/segmentio/analytics-go.v3 v3.1.0/go.mod h1:4QqqlTlSSpVlWA9/9nDcPw+FkM2yv1NQoYjUbL9/JAw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1018,9 +1006,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/query-service/model/userPreferences.go b/pkg/query-service/model/userPreferences.go new file mode 100644 index 0000000000..0a6ca2d1e4 --- /dev/null +++ b/pkg/query-service/model/userPreferences.go @@ -0,0 +1,27 @@ +package model + +type UserPreferences struct { + Id int `json:"id" db:"id"` + Uuid string `json:"uuid" db:"uuid"` + IsAnonymous bool `json:"isAnonymous" db:"isAnonymous"` + HasOptedUpdates bool `json:"hasOptedUpdates" db:"hasOptedUpdates"` +} + +func (up *UserPreferences) SetIsAnonymous(isAnonymous bool) { + up.IsAnonymous = isAnonymous +} +func (up *UserPreferences) SetHasOptedUpdate(hasOptedUpdates bool) { + up.HasOptedUpdates = hasOptedUpdates +} +func (up *UserPreferences) GetIsAnonymous() bool { + return up.IsAnonymous +} +func (up *UserPreferences) GetHasOptedUpdate() bool { + return up.HasOptedUpdates +} +func (up *UserPreferences) GetId() int { + return up.Id +} +func (up *UserPreferences) GetUUID() string { + return up.Uuid +} diff --git a/pkg/query-service/telemetry/ignoredPaths.go b/pkg/query-service/telemetry/ignoredPaths.go new file mode 100644 index 0000000000..3fc2f1898a --- /dev/null +++ b/pkg/query-service/telemetry/ignoredPaths.go @@ -0,0 +1,9 @@ +package telemetry + +func IgnoredPaths() map[string]struct{} { + ignoredPaths := map[string]struct{}{ + "/api/v1/tags": struct{}{}, + } + + return ignoredPaths +} diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go new file mode 100644 index 0000000000..90632784e7 --- /dev/null +++ b/pkg/query-service/telemetry/telemetry.go @@ -0,0 +1,155 @@ +package telemetry + +import ( + "io/ioutil" + "net/http" + "sync" + "time" + + "go.signoz.io/query-service/constants" + "go.signoz.io/query-service/model" + "go.signoz.io/query-service/version" + "gopkg.in/segmentio/analytics-go.v3" +) + +const ( + TELEMETRY_EVENT_PATH = "API Call" + TELEMETRY_EVENT_USER = "User" + TELEMETRY_EVENT_INPRODUCT_FEEDBACK = "InProduct Feeback Submitted" + TELEMETRY_EVENT_NUMBER_OF_SERVICES = "Number of Services" + TELEMETRY_EVENT_HEART_BEAT = "Heart Beat" +) + +const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz" + +var telemetry *Telemetry +var once sync.Once + +type Telemetry struct { + operator analytics.Client + ipAddress string + isEnabled bool + isAnonymous bool + distinctId string +} + +func createTelemetry() { + telemetry = &Telemetry{ + operator: analytics.New(api_key), + ipAddress: getOutboundIP(), + } + + data := map[string]interface{}{} + + telemetry.SetTelemetryEnabled(constants.IsTelemetryEnabled()) + telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data) + ticker := time.NewTicker(6 * time.Hour) + go func() { + for { + select { + case <-ticker.C: + telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data) + } + } + }() + +} + +// Get preferred outbound ip of this machine +func getOutboundIP() string { + + ip := []byte("NA") + resp, err := http.Get("https://api.ipify.org?format=text") + + defer resp.Body.Close() + if err == nil { + ipBody, err := ioutil.ReadAll(resp.Body) + if err == nil { + ip = ipBody + } + } + + return string(ip) +} + +func (a *Telemetry) IdentifyUser(user *model.User) { + if !a.isTelemetryEnabled() || a.isTelemetryAnonymous() { + return + } + + a.operator.Enqueue(analytics.Identify{ + UserId: a.ipAddress, + Traits: analytics.NewTraits().SetName(user.Name).SetEmail(user.Email).Set("ip", a.ipAddress), + }) + +} +func (a *Telemetry) checkEvents(event string) bool { + sendEvent := true + if event == TELEMETRY_EVENT_USER && a.isTelemetryAnonymous() { + sendEvent = false + } + return sendEvent +} + +func (a *Telemetry) SendEvent(event string, data map[string]interface{}) { + + if !a.isTelemetryEnabled() { + return + } + + ok := a.checkEvents(event) + if !ok { + return + } + + // zap.S().Info(data) + properties := analytics.NewProperties() + properties.Set("version", version.GetVersion()) + + for k, v := range data { + properties.Set(k, v) + } + + userId := a.ipAddress + if a.isTelemetryAnonymous() { + userId = a.GetDistinctId() + } + + a.operator.Enqueue(analytics.Track{ + Event: event, + UserId: userId, + Properties: properties, + }) +} + +func (a *Telemetry) GetDistinctId() string { + return a.distinctId +} +func (a *Telemetry) SetDistinctId(distinctId string) { + a.distinctId = distinctId +} + +func (a *Telemetry) isTelemetryAnonymous() bool { + return a.isAnonymous +} + +func (a *Telemetry) SetTelemetryAnonymous(value bool) { + a.isAnonymous = value +} + +func (a *Telemetry) isTelemetryEnabled() bool { + return a.isEnabled +} + +func (a *Telemetry) SetTelemetryEnabled(value bool) { + a.isEnabled = value +} + +func GetInstance() *Telemetry { + + once.Do(func() { + createTelemetry() + }) + + return telemetry +} diff --git a/pkg/query-service/version/version.go b/pkg/query-service/version/version.go new file mode 100644 index 0000000000..cad1bfa6f0 --- /dev/null +++ b/pkg/query-service/version/version.go @@ -0,0 +1,24 @@ +package version + +import ( + "go.uber.org/zap" +) + +// These fields are set during an official build +// Global vars set from command-line arguments +var ( + version = "--" + buildhash = "--" + buildtime = "--" +) + +//PrintVersionInfo displays the kyverno version - git version +func PrintVersionInfo() { + zap.S().Info("Version: ", version) + zap.S().Info("BuildHash: ", buildhash) + zap.S().Info("BuildTime: ", buildtime) +} + +func GetVersion() string { + return version +} From 0f4e5c9ef0c88180d96bbf5ef4eeef5834a12f4f Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 26 Jan 2022 21:43:15 +0530 Subject: [PATCH 31/81] change migration file path (#630) * chore: Add migration file path in otel collector config * Update otel-collector-config.yaml --- deploy/docker/clickhouse-setup/otel-collector-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index eeaf7221d7..0519785309 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -45,6 +45,7 @@ extensions: exporters: clickhouse: datasource: tcp://clickhouse:9000 + migrations: /migrations clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: @@ -64,4 +65,4 @@ service: exporters: [clickhousemetricswrite] metrics/spanmetrics: receivers: [otlp/spanmetrics] - exporters: [prometheus] \ No newline at end of file + exporters: [prometheus] From 50a88a8726c046cb7c036da77579f40264952175 Mon Sep 17 00:00:00 2001 From: palash-signoz <88981777+palash-signoz@users.noreply.github.com> Date: Wed, 26 Jan 2022 21:46:59 +0530 Subject: [PATCH 32/81] BUG: refresh button is now fixed (#590) * chore: issue is fixed * chore: unused import is removed --- .../GridGraphLayout/Graph/FullView/index.tsx | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/frontend/src/container/GridGraphLayout/Graph/FullView/index.tsx b/frontend/src/container/GridGraphLayout/Graph/FullView/index.tsx index b2cbc0366d..61aae23128 100644 --- a/frontend/src/container/GridGraphLayout/Graph/FullView/index.tsx +++ b/frontend/src/container/GridGraphLayout/Graph/FullView/index.tsx @@ -9,18 +9,21 @@ import GridGraphComponent from 'container/GridGraphComponent'; import { timeItems, timePreferance, + timePreferenceType, } from 'container/NewWidget/RightContainer/timeItems'; +import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond'; import getChartData from 'lib/getChartData'; import GetMaxMinTime from 'lib/getMaxMinTime'; +import GetMinMax from 'lib/getMinMax'; import getStartAndEndTime from 'lib/getStartAndEndTime'; import React, { useCallback, useEffect, useState } from 'react'; import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; -import { GlobalTime } from 'types/actions/globalTime'; import { Widgets } from 'types/api/dashboard/getAll'; +import { GlobalReducer } from 'types/reducer/globalTime'; import EmptyGraph from './EmptyGraph'; -import { GraphContainer, NotFoundContainer, TimeContainer } from './styles'; +import { NotFoundContainer, TimeContainer } from './styles'; const FullView = ({ widget, @@ -29,9 +32,10 @@ const FullView = ({ noDataGraph = false, name, }: FullViewProps): JSX.Element => { - const { minTime, maxTime } = useSelector( - (state) => state.globalTime, - ); + const { minTime, maxTime, selectedTime: globalSelectedTime } = useSelector< + AppState, + GlobalReducer + >((state) => state.globalTime); const [state, setState] = useState({ error: false, @@ -59,20 +63,33 @@ const FullView = ({ minTime, }); - const { end, start } = getStartAndEndTime({ - type: selectedTime.enum, - maxTime: maxMinTime.maxTime, - minTime: maxMinTime.minTime, - }); + const getMinMax = (time: timePreferenceType) => { + if (time === 'GLOBAL_TIME') { + const minMax = GetMinMax(globalSelectedTime); + return { + min: convertToNanoSecondsToSecond(minMax.minTime / 1000), + max: convertToNanoSecondsToSecond(minMax.maxTime / 1000), + }; + } + + const minMax = getStartAndEndTime({ + type: selectedTime.enum, + maxTime: maxMinTime.maxTime, + minTime: maxMinTime.minTime, + }); + return { min: parseInt(minMax.start, 10), max: parseInt(minMax.end, 10) }; + }; + + const queryMinMax = getMinMax(selectedTime.enum); const response = await Promise.all( widget.query .filter((e) => e.query.length !== 0) .map(async (query) => { const result = await getQueryResult({ - end, + end: queryMinMax.max.toString(), query: query.query, - start: start, + start: queryMinMax.min.toString(), step: '60', }); return { From 6e6fd9b44ba1c85973ad7588111032883d939604 Mon Sep 17 00:00:00 2001 From: Anik Das Date: Wed, 26 Jan 2022 21:53:03 +0530 Subject: [PATCH 33/81] closes #569: critical css using critters (#570) * feat(ui): critical css inline using critters Signed-off-by: Anik Das * fix: remove duplicate preload key Signed-off-by: Anik Das --- frontend/package.json | 1 + frontend/webpack.config.prod.js | 9 ++++ frontend/yarn.lock | 76 +++++++++++++++++++++++++++++++-- 3 files changed, 83 insertions(+), 3 deletions(-) diff --git a/frontend/package.json b/frontend/package.json index 46ced969cf..fbde581ba4 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -116,6 +116,7 @@ "bundlesize": "^0.18.1", "compression-webpack-plugin": "^9.0.0", "copy-webpack-plugin": "^8.1.0", + "critters-webpack-plugin": "^3.0.1", "cypress": "^8.3.0", "eslint": "^7.30.0", "eslint-config-prettier": "^8.3.0", diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index 4e5db8bbf5..ffc6579085 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -11,6 +11,7 @@ const CssMinimizerPlugin = require('css-minimizer-webpack-plugin'); const MiniCssExtractPlugin = require('mini-css-extract-plugin'); const BundleAnalyzerPlugin = require('webpack-bundle-analyzer') .BundleAnalyzerPlugin; +const Critters = require('critters-webpack-plugin'); const plugins = [ new HtmlWebpackPlugin({ template: 'src/index.html.ejs' }), @@ -27,6 +28,14 @@ const plugins = [ 'process.env': JSON.stringify(process.env), }), new MiniCssExtractPlugin(), + new Critters({ + preload: 'swap', + // Base path location of the CSS files + path: resolve(__dirname, './build/css'), + // Public path of the CSS resources. This prefix is removed from the href + publicPath: resolve(__dirname, './public/css'), + fonts: true + }) ]; if (process.env.BUNDLE_ANALYSER === 'true') { diff --git a/frontend/yarn.lock b/frontend/yarn.lock index a11abcc5c5..5911c63739 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -3852,7 +3852,7 @@ chalk@2.1.0: escape-string-regexp "^1.0.5" supports-color "^4.0.0" -chalk@^2.0.0: +chalk@^2.0.0, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -4293,6 +4293,28 @@ create-require@^1.1.0: resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== +critters-webpack-plugin@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/critters-webpack-plugin/-/critters-webpack-plugin-3.0.1.tgz#05aabaceb871fa423ee9a7ceb1139e27f5665753" + integrity sha512-nme1bZ9RxMQLLC/sK6Wo3+fkL9FOu/3BTtVPNZbaV6nZiBBHW42YTekF9fMj7CDP5GYpZ+Rw/r6j1RM36jBRDA== + dependencies: + critters "^0.0.15" + minimatch "^3.0.4" + webpack-log "^3.0.1" + webpack-sources "^1.3.0" + +critters@^0.0.15: + version "0.0.15" + resolved "https://registry.yarnpkg.com/critters/-/critters-0.0.15.tgz#b1c8d18fd18e614471733d7d749deac0f386b738" + integrity sha512-AE7hkXb3eZUbEvS1SKZa+OU4o2kUOXtzVeE/2E/mjU/0mV1wpBT1HfUCWVRS4zwvkBNJ0AQYsVjAoFm+kIhfdw== + dependencies: + chalk "^4.1.0" + css-select "^4.1.3" + parse5 "^6.0.1" + parse5-htmlparser2-tree-adapter "^6.0.1" + postcss "^8.3.7" + pretty-bytes "^5.3.0" + cross-env@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-env/-/cross-env-7.0.3.tgz#865264b29677dc015ba8418918965dd232fc54cf" @@ -8163,6 +8185,11 @@ log-update@^4.0.0: slice-ansi "^4.0.0" wrap-ansi "^6.2.0" +loglevelnext@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/loglevelnext/-/loglevelnext-3.0.1.tgz#e3e4659c4061c09264f6812c33586dc55a009a04" + integrity sha512-JpjaJhIN1reaSb26SIxDGtE0uc67gPl19OMVHrr+Ggt6b/Vy60jmCtKgQBrygAH0bhRA2nkxgDvM+8QvR8r0YA== + loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -8453,6 +8480,11 @@ nan@^2.14.0: resolved "https://registry.yarnpkg.com/nan/-/nan-2.15.0.tgz#3f34a473ff18e15c1b5626b62903b5ad6e665fee" integrity sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ== +nanoid@^2.0.3: + version "2.1.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-2.1.11.tgz#ec24b8a758d591561531b4176a01e3ab4f0f0280" + integrity sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA== + nanoid@^3.1.30: version "3.1.30" resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.30.tgz#63f93cc548d2a113dc5dfbc63bfa09e2b9b64362" @@ -8998,7 +9030,14 @@ parse-json@^5.0.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse5@6.0.1: +parse5-htmlparser2-tree-adapter@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" + integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== + dependencies: + parse5 "^6.0.1" + +parse5@6.0.1, parse5@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== @@ -9456,6 +9495,15 @@ postcss@^8.3.5: picocolors "^1.0.0" source-map-js "^1.0.1" +postcss@^8.3.7: + version "8.4.5" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.5.tgz#bae665764dfd4c6fcc24dc0fdf7e7aa00cc77f95" + integrity sha512-jBDboWM8qpaqwkMwItqTQTiFikhs/67OYVvblFFTM7MrZjt6yMKd6r2kgXizEbTTljacm4NldIlZnhbjr84QYg== + dependencies: + nanoid "^3.1.30" + picocolors "^1.0.0" + source-map-js "^1.0.1" + prebuild-install@^5.3.3: version "5.3.6" resolved "https://registry.yarnpkg.com/prebuild-install/-/prebuild-install-5.3.6.tgz#7c225568d864c71d89d07f8796042733a3f54291" @@ -9504,7 +9552,7 @@ prettier@2.2.1: resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.2.1.tgz#795a1a78dd52f073da0cd42b21f9c91381923ff5" integrity sha512-PqyhM2yCjg/oKkFPtTGUojv7gnZAoG80ttl45O6x2Ug/rMJw4wcc9k6aaf2hibP7BGVCCM33gZoGjyvt9mm16Q== -pretty-bytes@^5.6.0: +pretty-bytes@^5.3.0, pretty-bytes@^5.6.0: version "5.6.0" resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.6.0.tgz#356256f643804773c82f64723fe78c92c62beaeb" integrity sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg== @@ -10919,6 +10967,11 @@ sockjs@^0.3.21: uuid "^3.4.0" websocket-driver "^0.7.4" +source-list-map@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" + integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== + source-map-js@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.1.tgz#a1741c131e3c77d048252adfa24e23b908670caf" @@ -12040,6 +12093,15 @@ webpack-dev-server@^4.3.1: webpack-dev-middleware "^5.2.1" ws "^8.1.0" +webpack-log@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-3.0.2.tgz#edf64fe4cabffeb04a03ca44d89f9908a4a9d238" + integrity sha512-ijm2zgqTY2omtlxRNrtDqxAQOrfAGMxWg9fQB/kuFSeZjx/OkYnfYLqsjf/JkrWOHINMzqxaJDXaog6Mx9KaHg== + dependencies: + chalk "^2.4.2" + loglevelnext "^3.0.1" + nanoid "^2.0.3" + webpack-merge@^5.7.3: version "5.8.0" resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-5.8.0.tgz#2b39dbf22af87776ad744c390223731d30a68f61" @@ -12048,6 +12110,14 @@ webpack-merge@^5.7.3: clone-deep "^4.0.1" wildcard "^2.0.0" +webpack-sources@^1.3.0: + version "1.4.3" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933" + integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ== + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + webpack-sources@^3.2.2: version "3.2.2" resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.2.tgz#d88e3741833efec57c4c789b6010db9977545260" From b55c362bbbcfa54d8c50781294d97e4889661812 Mon Sep 17 00:00:00 2001 From: Devesh Kumar <59202075+Devesh21700Kumar@users.noreply.github.com> Date: Wed, 26 Jan 2022 21:55:11 +0530 Subject: [PATCH 34/81] Fixed toggle Button contrast in Light Theme (#505) * fixed toggle Button contrast in Light Theme refactored to styled props and fixed theme set defaultChecked to isDarkMode value * Refactored boolean logic --- frontend/src/container/SideNav/index.tsx | 5 +++-- frontend/src/container/SideNav/styles.ts | 14 +++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/frontend/src/container/SideNav/index.tsx b/frontend/src/container/SideNav/index.tsx index 1078ca1123..c628d939f1 100644 --- a/frontend/src/container/SideNav/index.tsx +++ b/frontend/src/container/SideNav/index.tsx @@ -1,4 +1,5 @@ -import { Menu, Switch as ToggleButton, Typography } from 'antd'; +import { Menu, Typography } from 'antd'; +import { ToggleButton } from './styles'; import ROUTES from 'constants/routes'; import history from 'lib/history'; import React, { useCallback, useState } from 'react'; @@ -59,7 +60,7 @@ const SideNav = ({ toggleDarkMode }: Props): JSX.Element => { return ( - + diff --git a/frontend/src/container/SideNav/styles.ts b/frontend/src/container/SideNav/styles.ts index 750503e821..11583359e1 100644 --- a/frontend/src/container/SideNav/styles.ts +++ b/frontend/src/container/SideNav/styles.ts @@ -1,4 +1,4 @@ -import { Layout } from 'antd'; +import { Layout, Switch } from 'antd'; import styled from 'styled-components'; const { Sider: SiderComponent } = Layout; @@ -24,3 +24,15 @@ export const Sider = styled(SiderComponent)` color: white; } `; + +interface DarkModeProps { + checked?: boolean; + defaultChecked?: boolean; +} + +export const ToggleButton = styled(Switch)` + &&& { + background: ${({ checked }) => checked === false && 'grey'}; + } +`; + From f5abab676684a547e37f46e981e33aa5eda53e4e Mon Sep 17 00:00:00 2001 From: Devesh Kumar <59202075+Devesh21700Kumar@users.noreply.github.com> Date: Wed, 26 Jan 2022 21:55:48 +0530 Subject: [PATCH 35/81] Fixed svg color mismatch in light mode and dark mode (#504) * fixed svg color mismatch in light mode and dark mode Added props in parent file fixed and added fillColor as props to the highest order of parent * set React.CSSProperties props renamed and code reused --- frontend/src/assets/Dashboard/TimeSeries.tsx | 65 ++++++++++--------- frontend/src/assets/Dashboard/Value.tsx | 35 ++++++---- .../NewDashboard/ComponentsSlider/index.tsx | 8 ++- .../ComponentsSlider/menuItems.ts | 3 +- 4 files changed, 65 insertions(+), 46 deletions(-) diff --git a/frontend/src/assets/Dashboard/TimeSeries.tsx b/frontend/src/assets/Dashboard/TimeSeries.tsx index de50a3e512..439d99235b 100644 --- a/frontend/src/assets/Dashboard/TimeSeries.tsx +++ b/frontend/src/assets/Dashboard/TimeSeries.tsx @@ -1,35 +1,42 @@ import React from 'react'; -const TimeSeries = (): JSX.Element => ( - - - - - ( + + + + + + + + - - - - + + + ); +export interface TimeSeriesProps{ + fillColor: React.CSSProperties['color']; +} + + export default TimeSeries; diff --git a/frontend/src/assets/Dashboard/Value.tsx b/frontend/src/assets/Dashboard/Value.tsx index 61bf672fa5..02a0dda6ca 100644 --- a/frontend/src/assets/Dashboard/Value.tsx +++ b/frontend/src/assets/Dashboard/Value.tsx @@ -1,18 +1,25 @@ import React from 'react'; -const Value = (): JSX.Element => ( - - - -); +const Value = (props: ValueProps): JSX.Element => { + return( + + + + + +)}; + +interface ValueProps{ + fillColor: React.CSSProperties['color']; +} export default Value; diff --git a/frontend/src/container/NewDashboard/ComponentsSlider/index.tsx b/frontend/src/container/NewDashboard/ComponentsSlider/index.tsx index 078851260e..2e1107abff 100644 --- a/frontend/src/container/NewDashboard/ComponentsSlider/index.tsx +++ b/frontend/src/container/NewDashboard/ComponentsSlider/index.tsx @@ -1,5 +1,8 @@ import React, { useCallback } from 'react'; +import { useSelector } from 'react-redux'; import { useHistory, useLocation } from 'react-router'; +import { AppState } from 'store/reducers'; +import AppReducer from 'types/reducer/app'; import { v4 } from 'uuid'; import menuItems, { ITEMS } from './menuItems'; @@ -22,7 +25,8 @@ const DashboardGraphSlider = (): JSX.Element => { }, [push, pathname], ); - + const { isDarkMode } = useSelector((state) => state.app); + const fillColor:React.CSSProperties['color'] = isDarkMode?"white" : "black"; return ( {menuItems.map(({ name, Icon, display }) => ( @@ -33,7 +37,7 @@ const DashboardGraphSlider = (): JSX.Element => { key={name} draggable > - + {display} ))} diff --git a/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts b/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts index ced78bb14d..458f8db062 100644 --- a/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts +++ b/frontend/src/container/NewDashboard/ComponentsSlider/menuItems.ts @@ -1,5 +1,6 @@ import TimeSeries from 'assets/Dashboard/TimeSeries'; import ValueIcon from 'assets/Dashboard/Value'; +import { TimeSeriesProps as IconProps } from 'assets/Dashboard/TimeSeries'; const Items: ItemsProps[] = [ { @@ -18,7 +19,7 @@ export type ITEMS = 'TIME_SERIES' | 'VALUE'; interface ItemsProps { name: ITEMS; - Icon: () => JSX.Element; + Icon: (props: IconProps) => JSX.Element; display: string; } From e823987eb0aeb3ef60a90ea249218417050f27f5 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Thu, 27 Jan 2022 22:34:26 +0530 Subject: [PATCH 36/81] build(docker): Two compose files for arm and amd (#638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build(docker): 🔨 Two compose files for arm and amd * refactor(docker): ⚰️ remove env file from install script * refactor: ⚰️ remove .gitkeep files from data folder * chore(build): ⚰️ remove env files and update contributing docs Signed-off-by: Prashant Shahi * build: ♻️ use two compose files in Makefile Signed-off-by: Prashant Shahi * chore(docker): 🚚 revert back to using same dir and pin image tag * Revert "chore: Add migration file path in otel collector config (#628)" This reverts commit 8467d6a00c1322a2b6af3c474fde9131b8bad14b. Signed-off-by: Prashant Shahi --- CONTRIBUTING.md | 4 +- Makefile | 4 +- .../clickhouse-setup/data/clickhouse/.gitkeep | 0 .../clickhouse-setup/data/signoz/.gitkeep | 0 .../clickhouse-setup/docker-compose.arm.yaml | 119 ++++++++++++++++++ .../clickhouse-setup/docker-compose.yaml | 47 ++++--- deploy/docker/clickhouse-setup/env/arm64.env | 1 - deploy/docker/clickhouse-setup/env/x86_64.env | 1 - .../otel-collector-config.yaml | 3 +- deploy/install.sh | 16 +-- 10 files changed, 154 insertions(+), 41 deletions(-) delete mode 100644 deploy/docker/clickhouse-setup/data/clickhouse/.gitkeep delete mode 100644 deploy/docker/clickhouse-setup/data/signoz/.gitkeep create mode 100644 deploy/docker/clickhouse-setup/docker-compose.arm.yaml delete mode 100644 deploy/docker/clickhouse-setup/env/arm64.env delete mode 100644 deploy/docker/clickhouse-setup/env/x86_64.env diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 99764e34fd..b642b44e2d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,8 +21,8 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git - comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38` - run `cd deploy` to move to deploy directory - Install signoz locally without the frontend - - If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d` - - If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml up -d` + - If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` + - If you are on arm64 processors (Apple M1 Macbooks) run `sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d` - `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts` - `yarn install` - `yarn dev` diff --git a/Makefile b/Makefile index 67dbd9a522..b63d793131 100644 --- a/Makefile +++ b/Makefile @@ -91,7 +91,7 @@ dev-setup: @echo "------------------" run-x86: - @sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/x86_64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d + @sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d run-arm: - @sudo docker-compose --env-file ./deploy/docker/clickhouse-setup/env/arm64.env -f ./deploy/docker/clickhouse-setup/docker-compose.yaml up -d \ No newline at end of file + @sudo docker-compose -f ./deploy/docker/clickhouse-setup/docker-compose.arm.yaml up -d \ No newline at end of file diff --git a/deploy/docker/clickhouse-setup/data/clickhouse/.gitkeep b/deploy/docker/clickhouse-setup/data/clickhouse/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/deploy/docker/clickhouse-setup/data/signoz/.gitkeep b/deploy/docker/clickhouse-setup/data/signoz/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml new file mode 100644 index 0000000000..b1358fb78d --- /dev/null +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -0,0 +1,119 @@ +version: "2.4" + +services: + clickhouse: + image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm + expose: + - 8123 + - 9000 + ports: + - 9001:9000 + - 8123:8123 + volumes: + - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml + - ./data/clickhouse/:/var/lib/clickhouse/ + healthcheck: + # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" + test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] + interval: 30s + timeout: 5s + retries: 3 + + alertmanager: + image: signoz/alertmanager:0.5.0 + volumes: + - ./alertmanager.yml:/prometheus/alertmanager.yml + - ./data/alertmanager:/data + command: + - '--config.file=/prometheus/alertmanager.yml' + - '--storage.path=/data' + ports: + - 9093:9093 + + query-service: + image: signoz/query-service:0.5.4 + container_name: query-service + command: ["-config=/root/config/prometheus.yml"] + ports: + - "8080:8080" + volumes: + - ./prometheus.yml:/root/config/prometheus.yml + - ../dashboards:/root/config/dashboards + - ./data/signoz/:/var/lib/signoz/ + environment: + - ClickHouseUrl=tcp://clickhouse:9000 + - STORAGE=clickhouse + - GODEBUG=netdns=go + - TELEMETRY_ENABLED=true + depends_on: + clickhouse: + condition: service_healthy + + frontend: + image: signoz/frontend:0.5.4 + container_name: frontend + depends_on: + - query-service + links: + - "query-service" + ports: + - "3000:3000" + volumes: + - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf + + otel-collector: + image: signoz/otelcontribcol:0.4.3 + command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "1777:1777" # pprof extension + - "8887:8888" # Prometheus metrics exposed by the agent + - "14268:14268" # Jaeger receiver + - "55678" # OpenCensus receiver + - "55680:55680" # OTLP HTTP/2.0 legacy port + - "55681:55681" # OTLP HTTP/1.0 receiver + - "4317:4317" # OTLP GRPC receiver + - "55679:55679" # zpages extension + - "13133" # health_check + - "8889:8889" # prometheus exporter + mem_limit: 2000m + restart: always + depends_on: + clickhouse: + condition: service_healthy + + otel-collector-metrics: + image: signoz/otelcontribcol:0.4.3 + command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"] + volumes: + - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml + depends_on: + clickhouse: + condition: service_healthy + + hotrod: + image: jaegertracing/example-hotrod:1.30 + container_name: hotrod + ports: + - "9000:8080" + command: ["all"] + environment: + - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces + + load-hotrod: + image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" + container_name: load-hotrod + hostname: load-hotrod + ports: + - "8089:8089" + environment: + ATTACKED_HOST: http://hotrod:8080 + LOCUST_MODE: standalone + NO_PROXY: standalone + TASK_DELAY_FROM: 5 + TASK_DELAY_TO: 30 + QUIET_MODE: "${QUIET_MODE:-false}" + LOCUST_OPTS: "--headless -u 10 -r 1" + volumes: + - ../common/locust-scripts:/locust diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 6261cd66f3..3b96c05bc0 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -2,23 +2,23 @@ version: "2.4" services: clickhouse: - image: ${clickhouse_image} - expose: - - 8123 - - 9000 - ports: - - 9001:9000 - - 8123:8123 - volumes: - - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - - ./data/clickhouse/:/var/lib/clickhouse/ + image: yandex/clickhouse-server:21.12.3.32 + expose: + - 8123 + - 9000 + ports: + - 9001:9000 + - 8123:8123 + volumes: + - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml + - ./data/clickhouse/:/var/lib/clickhouse/ - healthcheck: - # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" - test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] - interval: 30s - timeout: 5s - retries: 3 + healthcheck: + # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" + test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] + interval: 30s + timeout: 5s + retries: 3 alertmanager: image: signoz/alertmanager:0.5.0 @@ -41,7 +41,6 @@ services: - ./prometheus.yml:/root/config/prometheus.yml - ../dashboards:/root/config/dashboards - ./data/signoz/:/var/lib/signoz/ - environment: - ClickHouseUrl=tcp://clickhouse:9000 - STORAGE=clickhouse @@ -51,11 +50,10 @@ services: depends_on: clickhouse: condition: service_healthy - + frontend: image: signoz/frontend:0.5.4 container_name: frontend - depends_on: - query-service links: @@ -66,7 +64,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/otelcontribcol:0.4.2 + image: signoz/otelcontribcol:0.4.3 command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml @@ -81,7 +79,6 @@ services: - "55679:55679" # zpages extension - "13133" # health_check - "8889:8889" # prometheus exporter - mem_limit: 2000m restart: always depends_on: @@ -89,18 +86,18 @@ services: condition: service_healthy otel-collector-metrics: - image: signoz/otelcontribcol:0.4.2 + image: signoz/otelcontribcol:0.4.3 command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml depends_on: clickhouse: condition: service_healthy - + hotrod: - image: jaegertracing/example-hotrod:latest + image: jaegertracing/example-hotrod:1.30 container_name: hotrod - ports: + ports: - "9000:8080" command: ["all"] environment: diff --git a/deploy/docker/clickhouse-setup/env/arm64.env b/deploy/docker/clickhouse-setup/env/arm64.env deleted file mode 100644 index 2ef96bdabc..0000000000 --- a/deploy/docker/clickhouse-setup/env/arm64.env +++ /dev/null @@ -1 +0,0 @@ -clickhouse_image=altinity/clickhouse-server:21.8.12.1.testingarm \ No newline at end of file diff --git a/deploy/docker/clickhouse-setup/env/x86_64.env b/deploy/docker/clickhouse-setup/env/x86_64.env deleted file mode 100644 index d2abdd17e8..0000000000 --- a/deploy/docker/clickhouse-setup/env/x86_64.env +++ /dev/null @@ -1 +0,0 @@ -clickhouse_image=yandex/clickhouse-server:21.10 \ No newline at end of file diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index 0519785309..eeaf7221d7 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -45,7 +45,6 @@ extensions: exporters: clickhouse: datasource: tcp://clickhouse:9000 - migrations: /migrations clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: @@ -65,4 +64,4 @@ service: exporters: [clickhousemetricswrite] metrics/spanmetrics: receivers: [otlp/spanmetrics] - exporters: [prometheus] + exporters: [prometheus] \ No newline at end of file diff --git a/deploy/install.sh b/deploy/install.sh index 5e52ebb3c0..0aaf20b2fb 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -272,9 +272,9 @@ bye() { # Prints a friendly good bye message and exits the script. echo "" if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a" + echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml ps -a" else - echo -e "sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml ps -a" + echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a" fi else echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a" @@ -417,9 +417,9 @@ echo "" echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n" if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull + sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml pull else - sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml pull + sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull fi else sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull @@ -433,9 +433,9 @@ echo # script doesn't exit because this command looks like it failed to do it's thing. if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true + sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml up --detach --remove-orphans || true else - sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true + sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true fi else sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true @@ -497,9 +497,9 @@ else if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/arm64.env -f docker/clickhouse-setup/docker-compose.yaml down -v" + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml down -v" else - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml down -v" + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v" fi else echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v" From 16fbbf8a0ee8a8a0db23d3ef7cf67c0221cc6be2 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Fri, 28 Jan 2022 22:56:54 +0530 Subject: [PATCH 37/81] exclude filter support and fix for not sending null string in groupby for aggregates API (#654) * feat: add support to exclude filter params * fix: null string in group by --- .../app/clickhouseReader/reader.go | 468 +++--------------- pkg/query-service/app/parser.go | 12 + pkg/query-service/constants/constants.go | 11 + pkg/query-service/go.mod | 7 +- pkg/query-service/go.sum | 27 +- pkg/query-service/model/queryParams.go | 4 + 6 files changed, 132 insertions(+), 397 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 4d418ce0e4..8e9353811e 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1329,121 +1329,62 @@ func (r *ClickHouseReader) SearchSpans(ctx context.Context, queryParams *model.S return &searchSpansResult, nil } +func buildFilterArrayQuery(ctx context.Context, excludeMap map[string]struct{}, params []string, filter string, query *string, args []interface{}) []interface{} { + for i, e := range params { + if i == 0 && i == len(params)-1 { + if _, ok := excludeMap[filter]; ok { + *query += fmt.Sprintf(" AND NOT (%s=?)", filter) + } else { + *query += fmt.Sprintf(" AND (%s=?)", filter) + } + } else if i == 0 && i != len(params)-1 { + if _, ok := excludeMap[filter]; ok { + *query += fmt.Sprintf(" AND NOT (%s=?", filter) + } else { + *query += fmt.Sprintf(" AND (%s=?", filter) + } + } else if i != 0 && i == len(params)-1 { + *query += fmt.Sprintf(" OR %s=?)", filter) + } else { + *query += fmt.Sprintf(" OR %s=?", filter) + } + args = append(args, e) + } + return args +} + func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *model.SpanFilterParams) (*model.SpanFiltersResponse, *model.ApiError) { var query string + excludeMap := make(map[string]struct{}) + for _, e := range queryParams.Exclude { + excludeMap[e] = struct{}{} + } + args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} if len(queryParams.ServiceName) > 0 { - for i, e := range queryParams.ServiceName { - if i == 0 && i == len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?)" - } else if i == 0 && i != len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?" - } else if i != 0 && i == len(queryParams.ServiceName)-1 { - query += " OR serviceName=?)" - } else { - query += " OR serviceName=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args) } if len(queryParams.HttpRoute) > 0 { - for i, e := range queryParams.HttpRoute { - if i == 0 && i == len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?)" - } else if i == 0 && i != len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?" - } else if i != 0 && i == len(queryParams.HttpRoute)-1 { - query += " OR httpRoute=?)" - } else { - query += " OR httpRoute=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args) } if len(queryParams.HttpCode) > 0 { - for i, e := range queryParams.HttpCode { - if i == 0 && i == len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?)" - } else if i == 0 && i != len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?" - } else if i != 0 && i == len(queryParams.HttpCode)-1 { - query += " OR httpCode=?)" - } else { - query += " OR httpCode=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpCode, constants.HttpCode, &query, args) } if len(queryParams.HttpHost) > 0 { - for i, e := range queryParams.HttpHost { - if i == 0 && i == len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?)" - } else if i == 0 && i != len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?" - } else if i != 0 && i == len(queryParams.HttpHost)-1 { - query += " OR httpHost=?)" - } else { - query += " OR httpHost=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args) } if len(queryParams.HttpMethod) > 0 { - for i, e := range queryParams.HttpMethod { - if i == 0 && i == len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?)" - } else if i == 0 && i != len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?" - } else if i != 0 && i == len(queryParams.HttpMethod)-1 { - query += " OR httpMethod=?)" - } else { - query += " OR httpMethod=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args) } if len(queryParams.HttpUrl) > 0 { - for i, e := range queryParams.HttpUrl { - if i == 0 && i == len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?)" - } else if i == 0 && i != len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?" - } else if i != 0 && i == len(queryParams.HttpUrl)-1 { - query += " OR httpUrl=?)" - } else { - query += " OR httpUrl=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args) } if len(queryParams.Component) > 0 { - for i, e := range queryParams.Component { - if i == 0 && i == len(queryParams.Component)-1 { - query += " AND (component=?)" - } else if i == 0 && i != len(queryParams.Component)-1 { - query += " AND (component=?" - } else if i != 0 && i == len(queryParams.Component)-1 { - query += " OR component=?)" - } else { - query += " OR component=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - for i, e := range queryParams.Operation { - if i == 0 && i == len(queryParams.Operation)-1 { - query += " AND (name=?)" - } else if i == 0 && i != len(queryParams.Operation)-1 { - query += " AND (name=?" - } else if i != 0 && i == len(queryParams.Operation)-1 { - query += " OR name=?)" - } else { - query += " OR name=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) } if len(queryParams.MinDuration) != 0 { @@ -1499,6 +1440,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode finalQuery += query finalQuery += " GROUP BY httpCode" var dBResponse []model.DBResponseHttpCode + fmt.Println(finalQuery) err := r.db.Select(&dBResponse, finalQuery, args...) if err != nil { zap.S().Debug("Error in processing sql query: ", err) @@ -1643,119 +1585,36 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpCode, httpMethod FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) + excludeMap := make(map[string]struct{}) + for _, e := range queryParams.Exclude { + excludeMap[e] = struct{}{} + } + var query string args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} if len(queryParams.ServiceName) > 0 { - for i, e := range queryParams.ServiceName { - if i == 0 && i == len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?)" - } else if i == 0 && i != len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?" - } else if i != 0 && i == len(queryParams.ServiceName)-1 { - query += " OR serviceName=?)" - } else { - query += " OR serviceName=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args) } if len(queryParams.HttpRoute) > 0 { - for i, e := range queryParams.HttpRoute { - if i == 0 && i == len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?)" - } else if i == 0 && i != len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?" - } else if i != 0 && i == len(queryParams.HttpRoute)-1 { - query += " OR httpRoute=?)" - } else { - query += " OR httpRoute=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args) } if len(queryParams.HttpCode) > 0 { - for i, e := range queryParams.HttpCode { - if i == 0 && i == len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?)" - } else if i == 0 && i != len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?" - } else if i != 0 && i == len(queryParams.HttpCode)-1 { - query += " OR httpCode=?)" - } else { - query += " OR httpCode=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpCode, constants.HttpCode, &query, args) } if len(queryParams.HttpHost) > 0 { - for i, e := range queryParams.HttpHost { - if i == 0 && i == len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?)" - } else if i == 0 && i != len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?" - } else if i != 0 && i == len(queryParams.HttpHost)-1 { - query += " OR httpHost=?)" - } else { - query += " OR httpHost=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args) } if len(queryParams.HttpMethod) > 0 { - for i, e := range queryParams.HttpMethod { - if i == 0 && i == len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?)" - } else if i == 0 && i != len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?" - } else if i != 0 && i == len(queryParams.HttpMethod)-1 { - query += " OR httpMethod=?)" - } else { - query += " OR httpMethod=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args) } if len(queryParams.HttpUrl) > 0 { - for i, e := range queryParams.HttpUrl { - if i == 0 && i == len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?)" - } else if i == 0 && i != len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?" - } else if i != 0 && i == len(queryParams.HttpUrl)-1 { - query += " OR httpUrl=?)" - } else { - query += " OR httpUrl=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args) } if len(queryParams.Component) > 0 { - for i, e := range queryParams.Component { - if i == 0 && i == len(queryParams.Component)-1 { - query += " AND (component=?)" - } else if i == 0 && i != len(queryParams.Component)-1 { - query += " AND (component=?" - } else if i != 0 && i == len(queryParams.Component)-1 { - query += " OR component=?)" - } else { - query += " OR component=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - for i, e := range queryParams.Operation { - if i == 0 && i == len(queryParams.Operation)-1 { - query += " AND (name=?)" - } else if i == 0 && i != len(queryParams.Operation)-1 { - query += " AND (name=?" - } else if i != 0 && i == len(queryParams.Operation)-1 { - query += " OR name=?)" - } else { - query += " OR name=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) } if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" @@ -1880,121 +1739,37 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model.TagFilterParams) (*[]model.TagFilters, *model.ApiError) { + excludeMap := make(map[string]struct{}) + for _, e := range queryParams.Exclude { + excludeMap[e] = struct{}{} + } + var query string args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)} if len(queryParams.ServiceName) > 0 { - for i, e := range queryParams.ServiceName { - if i == 0 && i == len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?)" - } else if i == 0 && i != len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?" - } else if i != 0 && i == len(queryParams.ServiceName)-1 { - query += " OR serviceName=?)" - } else { - query += " OR serviceName=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args) } if len(queryParams.HttpRoute) > 0 { - for i, e := range queryParams.HttpRoute { - if i == 0 && i == len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?)" - } else if i == 0 && i != len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?" - } else if i != 0 && i == len(queryParams.HttpRoute)-1 { - query += " OR httpRoute=?)" - } else { - query += " OR httpRoute=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args) } if len(queryParams.HttpCode) > 0 { - for i, e := range queryParams.HttpCode { - if i == 0 && i == len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?)" - } else if i == 0 && i != len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?" - } else if i != 0 && i == len(queryParams.HttpCode)-1 { - query += " OR httpCode=?)" - } else { - query += " OR httpCode=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpCode, constants.HttpCode, &query, args) } if len(queryParams.HttpHost) > 0 { - for i, e := range queryParams.HttpHost { - if i == 0 && i == len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?)" - } else if i == 0 && i != len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?" - } else if i != 0 && i == len(queryParams.HttpHost)-1 { - query += " OR httpHost=?)" - } else { - query += " OR httpHost=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args) } if len(queryParams.HttpMethod) > 0 { - for i, e := range queryParams.HttpMethod { - if i == 0 && i == len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?)" - } else if i == 0 && i != len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?" - } else if i != 0 && i == len(queryParams.HttpMethod)-1 { - query += " OR httpMethod=?)" - } else { - query += " OR httpMethod=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args) } if len(queryParams.HttpUrl) > 0 { - for i, e := range queryParams.HttpUrl { - if i == 0 && i == len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?)" - } else if i == 0 && i != len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?" - } else if i != 0 && i == len(queryParams.HttpUrl)-1 { - query += " OR httpUrl=?)" - } else { - query += " OR httpUrl=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args) } if len(queryParams.Component) > 0 { - for i, e := range queryParams.Component { - if i == 0 && i == len(queryParams.Component)-1 { - query += " AND (component=?)" - } else if i == 0 && i != len(queryParams.Component)-1 { - query += " AND (component=?" - } else if i != 0 && i == len(queryParams.Component)-1 { - query += " OR component=?)" - } else { - query += " OR component=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - for i, e := range queryParams.Operation { - if i == 0 && i == len(queryParams.Operation)-1 { - query += " AND (name=?)" - } else if i == 0 && i != len(queryParams.Operation)-1 { - query += " AND (name=?" - } else if i != 0 && i == len(queryParams.Operation)-1 { - query += " OR name=?)" - } else { - query += " OR name=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) } - if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" args = append(args, queryParams.MinDuration) @@ -2485,6 +2260,11 @@ func (r *ClickHouseReader) SearchSpansAggregate(ctx context.Context, queryParams func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, queryParams *model.GetFilteredSpanAggregatesParams) (*model.GetFilteredSpansAggregatesResponse, *model.ApiError) { + excludeMap := make(map[string]struct{}) + for _, e := range queryParams.Exclude { + excludeMap[e] = struct{}{} + } + SpanAggregatesDBResponseItems := []model.SpanAggregatesDBResponseItem{} aggregation_query := "" @@ -2552,116 +2332,28 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query } if len(queryParams.ServiceName) > 0 { - for i, e := range queryParams.ServiceName { - if i == 0 && i == len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?)" - } else if i == 0 && i != len(queryParams.ServiceName)-1 { - query += " AND (serviceName=?" - } else if i != 0 && i == len(queryParams.ServiceName)-1 { - query += " OR serviceName=?)" - } else { - query += " OR serviceName=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.ServiceName, constants.ServiceName, &query, args) } if len(queryParams.HttpRoute) > 0 { - for i, e := range queryParams.HttpRoute { - if i == 0 && i == len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?)" - } else if i == 0 && i != len(queryParams.HttpRoute)-1 { - query += " AND (httpRoute=?" - } else if i != 0 && i == len(queryParams.HttpRoute)-1 { - query += " OR httpRoute=?)" - } else { - query += " OR httpRoute=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpRoute, constants.HttpRoute, &query, args) } if len(queryParams.HttpCode) > 0 { - for i, e := range queryParams.HttpCode { - if i == 0 && i == len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?)" - } else if i == 0 && i != len(queryParams.HttpCode)-1 { - query += " AND (httpCode=?" - } else if i != 0 && i == len(queryParams.HttpCode)-1 { - query += " OR httpCode=?)" - } else { - query += " OR httpCode=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpCode, constants.HttpCode, &query, args) } if len(queryParams.HttpHost) > 0 { - for i, e := range queryParams.HttpHost { - if i == 0 && i == len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?)" - } else if i == 0 && i != len(queryParams.HttpHost)-1 { - query += " AND (httpHost=?" - } else if i != 0 && i == len(queryParams.HttpHost)-1 { - query += " OR httpHost=?)" - } else { - query += " OR httpHost=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpHost, constants.HttpHost, &query, args) } if len(queryParams.HttpMethod) > 0 { - for i, e := range queryParams.HttpMethod { - if i == 0 && i == len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?)" - } else if i == 0 && i != len(queryParams.HttpMethod)-1 { - query += " AND (httpMethod=?" - } else if i != 0 && i == len(queryParams.HttpMethod)-1 { - query += " OR httpMethod=?)" - } else { - query += " OR httpMethod=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpMethod, constants.HttpMethod, &query, args) } if len(queryParams.HttpUrl) > 0 { - for i, e := range queryParams.HttpUrl { - if i == 0 && i == len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?)" - } else if i == 0 && i != len(queryParams.HttpUrl)-1 { - query += " AND (httpUrl=?" - } else if i != 0 && i == len(queryParams.HttpUrl)-1 { - query += " OR httpUrl=?)" - } else { - query += " OR httpUrl=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.HttpUrl, constants.HttpUrl, &query, args) } if len(queryParams.Component) > 0 { - for i, e := range queryParams.Component { - if i == 0 && i == len(queryParams.Component)-1 { - query += " AND (component=?)" - } else if i == 0 && i != len(queryParams.Component)-1 { - query += " AND (component=?" - } else if i != 0 && i == len(queryParams.Component)-1 { - query += " OR component=?)" - } else { - query += " OR component=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - for i, e := range queryParams.Operation { - if i == 0 && i == len(queryParams.Operation)-1 { - query += " AND (name=?)" - } else if i == 0 && i != len(queryParams.Operation)-1 { - query += " AND (name=?" - } else if i != 0 && i == len(queryParams.Operation)-1 { - query += " OR name=?)" - } else { - query += " OR name=?" - } - args = append(args, e) - } + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) } if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" @@ -2788,12 +2480,12 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query SpanAggregatesDBResponseItems[i].Value = float32(SpanAggregatesDBResponseItems[i].Value) / float32(queryParams.StepSeconds) } if responseElement, ok := GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp]; !ok { - if queryParams.GroupBy != "" { + if queryParams.GroupBy != "" && SpanAggregatesDBResponseItems[i].GroupBy.String != "" { GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{ Timestamp: SpanAggregatesDBResponseItems[i].Timestamp, GroupBy: map[string]float32{SpanAggregatesDBResponseItems[i].GroupBy.String: SpanAggregatesDBResponseItems[i].Value}, } - } else { + } else if queryParams.GroupBy == "" { GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = model.SpanAggregatesResponseItem{ Timestamp: SpanAggregatesDBResponseItems[i].Timestamp, Value: SpanAggregatesDBResponseItems[i].Value, @@ -2801,7 +2493,7 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query } } else { - if queryParams.GroupBy != "" { + if queryParams.GroupBy != "" && SpanAggregatesDBResponseItems[i].GroupBy.String != "" { responseElement.GroupBy[SpanAggregatesDBResponseItems[i].GroupBy.String] = SpanAggregatesDBResponseItems[i].Value } GetFilteredSpansAggregatesResponse.Items[SpanAggregatesDBResponseItems[i].Timestamp] = responseElement diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 1c02d17652..87c1c00303 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -506,6 +506,7 @@ func parseSpanFilterRequest(r *http.Request) (*model.SpanFilterParams, error) { Status: []string{}, Operation: []string{}, GetFilters: []string{}, + Exclude: []string{}, } params.ServiceName = fetchArrayValues("serviceName", r) @@ -528,6 +529,8 @@ func parseSpanFilterRequest(r *http.Request) (*model.SpanFilterParams, error) { params.GetFilters = fetchArrayValues("getFilters", r) + params.Exclude = fetchArrayValues("exclude", r) + minDuration, err := parseTimestamp("minDuration", r) if err == nil { params.MinDuration = *minDuration @@ -565,6 +568,7 @@ func parseFilteredSpansRequest(r *http.Request) (*model.GetFilteredSpansParams, Operation: []string{}, Limit: 100, Order: "descending", + Exclude: []string{}, } params.ServiceName = fetchArrayValues("serviceName", r) @@ -585,6 +589,8 @@ func parseFilteredSpansRequest(r *http.Request) (*model.GetFilteredSpansParams, params.Component = fetchArrayValues("component", r) + params.Exclude = fetchArrayValues("exclude", r) + limitStr := r.URL.Query().Get("limit") if len(limitStr) != 0 { limit, err := strconv.ParseInt(limitStr, 10, 64) @@ -711,6 +717,7 @@ func parseFilteredSpanAggregatesRequest(r *http.Request) (*model.GetFilteredSpan StepSeconds: stepInt, Dimension: dimension, AggregationOption: aggregationOption, + Exclude: []string{}, } params.ServiceName = fetchArrayValues("serviceName", r) @@ -731,6 +738,8 @@ func parseFilteredSpanAggregatesRequest(r *http.Request) (*model.GetFilteredSpan params.Component = fetchArrayValues("component", r) + params.Exclude = fetchArrayValues("exclude", r) + tags, err := parseTagsV2("tags", r) if err != nil { return nil, err @@ -805,6 +814,7 @@ func parseTagFilterRequest(r *http.Request) (*model.TagFilterParams, error) { Component: []string{}, Status: []string{}, Operation: []string{}, + Exclude: []string{}, } params.ServiceName = fetchArrayValues("serviceName", r) @@ -825,6 +835,8 @@ func parseTagFilterRequest(r *http.Request) (*model.TagFilterParams, error) { params.Component = fetchArrayValues("component", r) + params.Exclude = fetchArrayValues("exclude", r) + minDuration, err := parseTimestamp("minDuration", r) if err == nil { params.MinDuration = *minDuration diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 422396bf90..4f150f358e 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -25,3 +25,14 @@ const MetricsTTL = "metrics" const ALERTMANAGER_API_PREFIX = "http://alertmanager:9093/api/" const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db" + +const ( + ServiceName = "serviceName" + HttpRoute = "httpRoute" + HttpCode = "httpCode" + HttpHost = "httpHost" + HttpUrl = "httpUrl" + HttpMethod = "httpMethod" + Component = "component" + Operation = "name" +) diff --git a/pkg/query-service/go.mod b/pkg/query-service/go.mod index 79dfc473bb..8de66373db 100644 --- a/pkg/query-service/go.mod +++ b/pkg/query-service/go.mod @@ -4,19 +4,15 @@ go 1.14 require ( cloud.google.com/go v0.88.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/ClickHouse/clickhouse-go v1.4.5 github.com/Microsoft/go-winio v0.5.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/aws/aws-sdk-go v1.27.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/containerd/containerd v1.4.12 // indirect github.com/dhui/dktest v0.3.4 // indirect github.com/docker/docker v20.10.12+incompatible // indirect - github.com/frankban/quicktest v1.13.0 // indirect github.com/go-kit/log v0.1.0 github.com/golang-migrate/migrate/v4 v4.14.1 @@ -41,6 +37,7 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/oklog/oklog v0.3.2 github.com/oklog/run v1.1.0 // indirect github.com/onsi/gomega v1.14.0 // indirect @@ -60,7 +57,6 @@ require ( github.com/smartystreets/goconvey v1.6.4 github.com/soheilhy/cmux v0.1.4 github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/pflag v1.0.3 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect @@ -74,6 +70,7 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/segmentio/analytics-go.v3 v3.1.0 + gotest.tools/v3 v3.1.0 // indirect ) diff --git a/pkg/query-service/go.sum b/pkg/query-service/go.sum index 034d9b992e..622b35ce3b 100644 --- a/pkg/query-service/go.sum +++ b/pkg/query-service/go.sum @@ -58,6 +58,7 @@ github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhH github.com/ClickHouse/clickhouse-go v1.4.5 h1:FfhyEnv6/BaWldyjgT2k4gDDmeNwJ9C4NbY/MXxJlXk= github.com/ClickHouse/clickhouse-go v1.4.5/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -105,12 +106,16 @@ github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68m github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.12 h1:V+SHzYmhng/iju6M5nFrpTTusrhidoxKTwdwLw+u4c4= +github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -120,12 +125,15 @@ github.com/dgrijalva/jwt-go v3.0.1-0.20161101193935-9ed569b5d1ac+incompatible/go github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bits v0.0.0-20160601073636-2ad8d707cc05/go.mod h1:/9UYwwvZuEgp+mQ4960SHWCU1FS+FgdFX+m5ExFByNs= -github.com/dhui/dktest v0.3.3 h1:DBuH/9GFaWbDRa42qsut/hbQu+srAQ0rPWnUoiGX7CA= github.com/dhui/dktest v0.3.3/go.mod h1:EML9sP4sqJELHn4jV7B0TY8oF6077nk83/tz7M56jcQ= +github.com/dhui/dktest v0.3.4 h1:VbUEcaSP+U2/yUr9d2JhSThXYEnDlGabRSHe2rIE46E= +github.com/dhui/dktest v0.3.4/go.mod h1:4m4n6lmXlmVfESth7mzdcv8nBI5mOb5UROPqjM02csU= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8= github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -171,8 +179,9 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.14.1 h1:qmRd/rNGjM1r3Ve5gHd5ZplytrD02UcItYNxJ3iUHHE= github.com/golang-migrate/migrate/v4 v4.14.1/go.mod h1:l7Ks0Au6fYHuUIxUhQ0rcVX1uLlJg54C/VvW7tvxSz0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -371,6 +380,7 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -413,6 +423,8 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -785,6 +797,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -814,6 +827,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200806022845-90696ccdc692/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -827,6 +841,7 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -918,6 +933,7 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1006,6 +1022,9 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= +gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 4732bbbb6b..2ae9411b4e 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -135,6 +135,7 @@ type GetFilteredSpansParams struct { Order string Offset int64 Tags []TagQueryV2 + Exclude []string } type GetFilteredSpanAggregatesParams struct { @@ -158,6 +159,7 @@ type GetFilteredSpanAggregatesParams struct { AggregationOption string GroupBy string Function string + Exclude []string } type SpanFilterParams struct { @@ -171,6 +173,7 @@ type SpanFilterParams struct { Component []string Operation []string GetFilters []string + Exclude []string MinDuration string MaxDuration string Start *time.Time @@ -187,6 +190,7 @@ type TagFilterParams struct { HttpMethod []string Component []string Operation []string + Exclude []string MinDuration string MaxDuration string Start *time.Time From 8f0df5e1e3f69ea1023b0c96e1f8544ceeabeab6 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Fri, 28 Jan 2022 22:59:07 +0530 Subject: [PATCH 38/81] =?UTF-8?q?ci(k3s):=20=F0=9F=A9=B9=20simple=20fix=20?= =?UTF-8?q?as=20per=20the=20helm=20chart=20changes=20(#651)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/e2e-k3s.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 41bea93c8e..97885d9b92 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -47,8 +47,8 @@ jobs: --set frontend.image.tag=$DOCKER_TAG # get pods, services and the container images - kubectl describe deploy/frontend -n platform | grep Image - kubectl describe statefulset/query-service -n platform | grep Image + kubectl describe deploy/my-release-frontend -n platform | grep Image + kubectl describe statefulset/my-release-query-service -n platform | grep Image kubectl get pods -n platform kubectl get svc -n platform From b0d68ac00fbd29de1e3070334c38aaad37118516 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sat, 29 Jan 2022 01:20:25 +0530 Subject: [PATCH 39/81] chore: install script improvements (#652) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(install): 🔨 install script improvement - remove ipify - migrate from PostHog to Segment - single function for sending event Signed-off-by: Prashant Shahi * chore: ⚰️ remove commented code * chore(install): 🛂 update the auth token * chore(install): 🔧 set context.default config true * Revert "chore(install): 🔧 set context.default config true" This reverts commit 0704013ac7ce0c823c423987ceb575442deb8108. * chore(install): 🔨 use uname sha for installation id * refactor(slack): 🚚 use signoz.io/slack URL Signed-off-by: Prashant Shahi --- CONTRIBUTING.md | 4 +- README.de-de.md | 8 +- README.md | 8 +- README.pt-br.md | 8 +- README.zh-cn.md | 6 +- deploy/install.sh | 199 ++++++++++++++++++++++------------------------ 6 files changed, 112 insertions(+), 121 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b642b44e2d..c07bc6cc61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,7 +29,7 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git ### Contribute to Frontend without installing SigNoz backend -If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `` +If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://signoz.io/slack) and we will DM you with `` - `git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend` - Create a file `.env` with `FRONTEND_API_ENDPOINT=` @@ -59,7 +59,7 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht ## General Instructions -You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA). +You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://signoz.io/slack). - If you find any bugs, please create an issue - If you find anything missing in documentation, you can create an issue with label **documentation** diff --git a/README.de-de.md b/README.de-de.md index f49a56bd03..b728cd094c 100644 --- a/README.de-de.md +++ b/README.de-de.md @@ -17,7 +17,7 @@ DokumentationReadMe auf ChinesischReadMe auf Portugiesisch • - Slack Community • + Slack CommunityTwitter @@ -39,7 +39,7 @@ SigNoz hilft Entwicklern, Anwendungen zu überwachen und Probleme in ihren berei ## Werde Teil unserer Slack Community -Sag Hi zu uns auf [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋 +Sag Hi zu uns auf [Slack](https://signoz.io/slack) 👋

@@ -130,7 +130,7 @@ Außerdem hat SigNoz noch mehr spezielle Funktionen im Vergleich zu Jaeger: Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md) durch, bevor du anfängst, Beiträge zu SigNoz zu machen. -Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA). +Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem `#contributing` Kanal in unserer [Slack Community](https://signoz.io/slack).

@@ -146,7 +146,7 @@ Du findest unsere Dokumentation unter https://signoz.io/docs/. Falls etwas unver ## Community -Werde Teil der [Slack Community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen. +Werde Teil der [Slack Community](https://signoz.io/slack) um mehr über verteilte Einzelschritt-Fehlersuche, Messung von Systemzuständen oder SigNoz zu erfahren und sich mit anderen Nutzern und Mitwirkenden in Verbindung zu setzen. Falls du irgendwelche Ideen, Fragen oder Feedback hast, kannst du sie gerne über unsere [Github Discussions](https://github.com/SigNoz/signoz/discussions) mit uns teilen. diff --git a/README.md b/README.md index 4f3f9a11c2..e7af0d5bef 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ ReadMe in ChineseReadMe in GermanReadMe in Portuguese • - Slack Community • + Slack CommunityTwitter @@ -41,7 +41,7 @@ SigNoz helps developers monitor applications and troubleshoot problems in their ## Join our Slack community -Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋 +Come say Hi to us on [Slack](https://signoz.io/slack) 👋

@@ -132,7 +132,7 @@ Moreover, SigNoz has few more advanced features wrt Jaeger: We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING.md) to get started with making contributions to SigNoz. -Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) +Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)

@@ -148,7 +148,7 @@ You can find docs at https://signoz.io/docs/. If you need any clarification or f ## Community -Join the [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors. +Join the [slack community](https://signoz.io/slack) to know more about distributed tracing, observability, or SigNoz and to connect with other users and contributors. If you have any ideas, questions, or any feedback, please share on our [Github Discussions](https://github.com/SigNoz/signoz/discussions) diff --git a/README.pt-br.md b/README.pt-br.md index 72b0af798a..e8113cd7ca 100644 --- a/README.pt-br.md +++ b/README.pt-br.md @@ -15,7 +15,7 @@

Documentação • - Comunidade no Slack • + Comunidade no SlackTwitter

@@ -38,7 +38,7 @@ SigNoz auxilia os desenvolvedores a monitorarem aplicativos e solucionar problem ## Junte-se à nossa comunidade no Slack -Venha dizer oi para nós no [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋 +Venha dizer oi para nós no [Slack](https://signoz.io/slack) 👋

@@ -129,7 +129,7 @@ Além disso, SigNoz tem alguns recursos mais avançados do que Jaeger: Nós ❤️ contribuições grandes ou pequenas. Leia [CONTRIBUTING.md](CONTRIBUTING.md) para começar a fazer contribuições para o SigNoz. -Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) +Não sabe como começar? Basta enviar um sinal para nós no canal `#contributing` em nossa [comunidade no Slack.](https://signoz.io/slack)

@@ -145,7 +145,7 @@ Você pode encontrar a documentação em https://signoz.io/docs/. Se você tiver ## Comunidade -Junte-se a [comunidade no Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores. +Junte-se a [comunidade no Slack](https://signoz.io/slack) para saber mais sobre rastreamento distribuído, observabilidade ou SigNoz e para se conectar com outros usuários e colaboradores. Se você tiver alguma ideia, pergunta ou feedback, compartilhe em nosso [Github Discussões](https://github.com/SigNoz/signoz/discussions) diff --git a/README.zh-cn.md b/README.zh-cn.md index 143139b4a5..fc25a1e0fb 100644 --- a/README.zh-cn.md +++ b/README.zh-cn.md @@ -29,7 +29,7 @@ SigNoz帮助开发人员监控应用并排查已部署应用中的问题。SigNo ## 加入我们的Slack社区 -来[Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 跟我们打声招呼👋 +来[Slack](https://signoz.io/slack) 跟我们打声招呼👋

@@ -120,7 +120,7 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们 我们 ❤️ 任何贡献无论大小。 请阅读 [CONTRIBUTING.md](CONTRIBUTING.md) 然后开始给Signoz做贡献。 -还不清楚怎么开始? 只需在[slack社区](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA)的`#contributing`频道里ping我们。 +还不清楚怎么开始? 只需在[slack社区](https://signoz.io/slack)的`#contributing`频道里ping我们。

@@ -136,7 +136,7 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们 ## 社区 -加入[slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。 +加入[slack community](https://signoz.io/slack),以及SigNoz。同时与其他用户和贡献者一起交流。 如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。 diff --git a/deploy/install.sh b/deploy/install.sh index 0aaf20b2fb..54bef5d067 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -116,15 +116,7 @@ check_ports_occupied() { fi if [[ -n $port_check_output ]]; then - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi + send_event "port_not_available" echo "+++++++++++ ERROR ++++++++++++++++++++++" echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports." @@ -207,15 +199,7 @@ install_docker_compose() { echo "" fi else - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi + send_event "docker_compose_not_found" echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++" echo "docker-compose not found! Please install docker-compose first and then continue with this installation." @@ -272,15 +256,15 @@ bye() { # Prints a friendly good bye message and exits the script. echo "" if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml ps -a" + echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml ps -a" else - echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a" + echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a" fi else - echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a" + echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a" fi # echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting" - echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA" + echo "or reach us for support in #help channel in our Slack Community https://signoz.io/slack" echo "++++++++++++++++++++++++++++++++++++++++" echo -e "\n📨 Please share your email to receive support with the installation" @@ -291,16 +275,7 @@ bye() { # Prints a friendly good bye message and exits the script. read -rp 'Email: ' email done - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi + send_event "installation_support" echo "" @@ -317,10 +292,19 @@ echo "" # Checking OS and assigning package manager desired_os=0 os="" +email="" echo -e "Detecting your OS ..." check_os -SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org') +# Obtain unique installation id +sysinfo="$(uname -a)" +if [ $? -ne 0 ]; then + uuid="$(uuidgen)" + uuid="${uuid:-$(cat /proc/sys/kernel/random/uuid)}" + SIGNOZ_INSTALLATION_ID="${uuid:-$(cat /proc/sys/kernel/random/uuid)}" +else + SIGNOZ_INSTALLATION_ID=$(echo "$sysinfo" | shasum | cut -d ' ' -f1) +fi # echo "" @@ -350,29 +334,78 @@ setup_type='clickhouse' # Run bye if failure happens trap bye EXIT +URL="https://api.segment.io/v1/track" +HEADER_1="Content-Type: application/json" +HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6" -DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }' -URL="https://app.posthog.com/capture" -HEADER="Content-Type: application/json" +send_event() { + error="" -if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 -elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 -fi + case "$1" in + 'install_started') + event="Installation Started" + ;; + 'os_not_supported') + event="Installation Error" + error="OS Not Supported" + ;; + 'docker_not_installed') + event="Installation Error" + error="Docker not installed" + ;; + 'docker_compose_not_found') + event="Installation Error" + event="Docker Compose not found" + ;; + 'port_not_available') + event="Installation Error" + error="port not available" + ;; + 'installation_error_checks') + event="Installation Error - Checks" + error="Containers not started" + if [ $setup_type == 'clickhouse' ]; then + others='"data": "some_checks",' + else + supervisors="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)" + datasources="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)" + others='"supervisors": "'"$supervisors"'", "datasources": "'"$datasources"'",' + fi + ;; + 'installation_support') + event="Installation Support" + others='"email": "'"$email"'",' + ;; + 'installation_success') + event="Installation Success" + ;; + 'identify_successful_installation') + event="Identify Successful Installation" + others='"email": "'"$email"'",' + ;; + *) + print_error "unknown event type: $1" + exit 1 + ;; + esac - -if [[ $desired_os -eq 0 ]];then - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 + if [ "$error" != "" ]; then + error='"error": "'"$error"'", ' fi + DATA='{ "anonymousId": "'"$SIGNOZ_INSTALLATION_ID"'", "event": "'"$event"'", "properties": { "os": "'"$os"'", '"$error $others"' "setup_type": "'"$setup_type"'" } }' + + if has_curl; then + curl -sfL -d "$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1 + elif has_wget; then + wget -q --post-data="$DATA" --header "$HEADER_1" --header "$HEADER_2" "$URL" > /dev/null 2>&1 + fi +} + +send_event "install_started" + +if [[ $desired_os -eq 0 ]]; then + send_event "os_not_supported" fi # check_ports_occupied @@ -387,15 +420,8 @@ if ! is_command_present docker; then echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS" echo "https://docs.docker.com/docker-for-mac/install/" echo "++++++++++++++++++++++++++++++++++++++++++++++++" - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi + send_event "docker_not_installed" exit 1 fi fi @@ -406,7 +432,6 @@ if ! is_command_present docker-compose; then fi - start_docker @@ -449,45 +474,20 @@ if [[ $status_code -ne 200 ]]; then echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:" echo "" if [ $setup_type == 'clickhouse' ]; then - echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a" + echo -e "sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml ps -a" else - echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a" + echo -e "sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a" fi echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues" - echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA" + echo "or reach us on SigNoz for support https://signoz.io/slack" echo "++++++++++++++++++++++++++++++++++++++++" - if [ $setup_type == 'clickhouse' ]; then - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }' - else - SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)" - - DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)" - - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }' - fi - - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi - + send_event "installation_error_checks" exit 1 else - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" + send_event "installation_success" - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++" echo "" echo "🟢 Your installation is complete!" @@ -497,19 +497,19 @@ else if [ $setup_type == 'clickhouse' ]; then if is_arm64; then - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml down -v" + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.arm.yaml down -v" else - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v" + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml down -v" fi else - echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v" + echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml down -v" fi echo "" echo "+++++++++++++++++++++++++++++++++++++++++++++++++" echo "" echo "👉 Need help Getting Started?" - echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA" + echo -e "Join us on Slack https://signoz.io/slack" echo "" echo -e "\n📨 Please share your email to receive support & updates about SigNoz!" read -rp 'Email: ' email @@ -519,16 +519,7 @@ else read -rp 'Email: ' email done - DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }' - URL="https://app.posthog.com/capture" - HEADER="Content-Type: application/json" - - if has_curl; then - curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1 - elif has_wget; then - wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1 - fi - + send_event "identify_successful_installation" fi echo -e "\n🙏 Thank you!\n" From 0d1526f6af6c8676b0d88d72e4187275ce3feac7 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sat, 29 Jan 2022 01:33:34 +0530 Subject: [PATCH 40/81] =?UTF-8?q?docs:=20=F0=9F=93=9D=20reverting=20minor?= =?UTF-8?q?=20docs=20changes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- README.zh-cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.zh-cn.md b/README.zh-cn.md index fc25a1e0fb..0e1add22d2 100644 --- a/README.zh-cn.md +++ b/README.zh-cn.md @@ -136,7 +136,7 @@ Jaeger只做分布式跟踪,SigNoz则是做了矩阵和跟踪两块,我们 ## 社区 -加入[slack community](https://signoz.io/slack),以及SigNoz。同时与其他用户和贡献者一起交流。 +加入[slack community](https://signoz.io/slack),了解更多关于分布式跟踪、可观察性(observability),以及SigNoz。同时与其他用户和贡献者一起交流。 如果你有任何想法、问题或者反馈,请在[Github Discussions](https://github.com/SigNoz/signoz/discussions)分享给我们。 From c7ffac46f527fc3e60111aedf4f6c8e256869453 Mon Sep 17 00:00:00 2001 From: Pranay Prateek Date: Sun, 30 Jan 2022 22:46:01 +0530 Subject: [PATCH 41/81] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4f3f9a11c2..9cbbfd4431 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ SigNoz helps developers monitor applications and troubleshoot problems in their ## Join our Slack community -Come say Hi to us on [Slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) 👋 +Come say Hi to us on [Slack](https://signoz.io/slack) 👋

From 24162f8f9671611e1416de327d43edacf9382b23 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sun, 30 Jan 2022 23:39:06 +0530 Subject: [PATCH 42/81] =?UTF-8?q?chore(log-option):=20=F0=9F=94=A7=20set?= =?UTF-8?q?=20hotrod=20log=20options=20for=20hotrod=20app=20(#659)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- deploy/docker/clickhouse-setup/docker-compose.arm.yaml | 6 +++++- deploy/docker/clickhouse-setup/docker-compose.yaml | 10 +++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml index b1358fb78d..9331701cb7 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -96,7 +96,11 @@ services: image: jaegertracing/example-hotrod:1.30 container_name: hotrod ports: - - "9000:8080" + - "9000:8080" + logging: + options: + max-size: 50m + max-file: 3 command: ["all"] environment: - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 3b96c05bc0..56fbceaae5 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -98,17 +98,21 @@ services: image: jaegertracing/example-hotrod:1.30 container_name: hotrod ports: - - "9000:8080" + - "9000:8080" + logging: + options: + max-size: 50m + max-file: 3 command: ["all"] environment: - - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces + - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces load-hotrod: image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" container_name: load-hotrod hostname: load-hotrod ports: - - "8089:8089" + - "8089:8089" environment: ATTACKED_HOST: http://hotrod:8080 LOCUST_MODE: standalone From d92a3e64f58477af46fc56781489a4be05ec42f0 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Mon, 31 Jan 2022 18:24:05 +0530 Subject: [PATCH 43/81] =?UTF-8?q?ci(k3s):=20=F0=9F=92=9A=20fix=20correct?= =?UTF-8?q?=20raw=20github=20URL=20for=20hotrod?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- .github/workflows/e2e-k3s.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 97885d9b92..132139615b 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -58,7 +58,7 @@ jobs: kubectl create ns sample-application # apply hotrod k8s manifest file - kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/blob/main/sample-apps/hotrod/hotrod.yaml + kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml # wait for all deployments in sample-application namespace to be READY kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s From e3c4bfce528eec2e5a6441608165baf9e1b46388 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Mon, 31 Jan 2022 19:07:45 +0530 Subject: [PATCH 44/81] =?UTF-8?q?ci(k3s):=20=F0=9F=92=9A=20fix=20correct?= =?UTF-8?q?=20raw=20github=20URL=20for=20hotrod=20(#661)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi (cherry picked from commit d92a3e64f58477af46fc56781489a4be05ec42f0) --- .github/workflows/e2e-k3s.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 41bea93c8e..8bc96056e1 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -58,7 +58,7 @@ jobs: kubectl create ns sample-application # apply hotrod k8s manifest file - kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/blob/main/sample-apps/hotrod/hotrod.yaml + kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml # wait for all deployments in sample-application namespace to be READY kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s From ebb1c2ac796fdec1cbd55b0f3190c4a049d01a4a Mon Sep 17 00:00:00 2001 From: Mustaque Ahmed Date: Tue, 1 Feb 2022 10:26:31 +0530 Subject: [PATCH 45/81] fix: remove table `default.signoz_spans` from the codebase (#656) * fix: remove table `default.signoz_spans` from the codebase * fix: remove spanTable and archiveSpanTable from clickHouseReader --- pkg/query-service/app/clickhouseReader/options.go | 4 ---- pkg/query-service/app/clickhouseReader/reader.go | 2 -- 2 files changed, 6 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index 979d0bf147..66a1eaacdc 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -19,9 +19,7 @@ const ( defaultDatasource string = "tcp://localhost:9000" defaultOperationsTable string = "signoz_operations" defaultIndexTable string = "signoz_index" - defaultSpansTable string = "signoz_spans" defaultErrorTable string = "signoz_error_index" - defaultArchiveSpansTable string = "signoz_archive_spans" defaultWriteBatchDelay time.Duration = 5 * time.Second defaultWriteBatchSize int = 10000 defaultEncoding Encoding = EncodingJSON @@ -90,7 +88,6 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s Datasource: datasource, OperationsTable: defaultOperationsTable, IndexTable: defaultIndexTable, - SpansTable: defaultSpansTable, ErrorTable: defaultErrorTable, WriteBatchDelay: defaultWriteBatchDelay, WriteBatchSize: defaultWriteBatchSize, @@ -107,7 +104,6 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s Datasource: datasource, OperationsTable: "", IndexTable: "", - SpansTable: defaultArchiveSpansTable, ErrorTable: "", WriteBatchDelay: defaultWriteBatchDelay, WriteBatchSize: defaultWriteBatchSize, diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 8e9353811e..2158408631 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -74,7 +74,6 @@ type ClickHouseReader struct { operationsTable string indexTable string errorTable string - spansTable string queryEngine *promql.Engine remoteStorage *remote.Storage ruleManager *rules.Manager @@ -98,7 +97,6 @@ func NewReader(localDB *sqlx.DB) *ClickHouseReader { localDB: localDB, operationsTable: options.primary.OperationsTable, indexTable: options.primary.IndexTable, - spansTable: options.primary.SpansTable, errorTable: options.primary.ErrorTable, } } From 48ac20885ffebad89ec3208fee3305dd212da29b Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 1 Feb 2022 23:03:16 +0530 Subject: [PATCH 46/81] =?UTF-8?q?refactor(query-service):=20=E2=99=BB?= =?UTF-8?q?=EF=B8=8F=20=20Update=20ldflags=20and=20Makefile=20for=20dynami?= =?UTF-8?q?c=20versioning=20(#655)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor(query-service): ♻️ Update ldflags and Makefile for dynamic versioning Signed-off-by: Prashant Shahi * chore: 🎨 Use blacnk spaces indentation in build details * chore(query-service): 🎨 small build details format changes * refactor(query-service): ♻️ refactor ldflags for go build --- Makefile | 31 +++++++++++++++++--- pkg/query-service/Dockerfile | 17 +++++++---- pkg/query-service/main.go | 3 +- pkg/query-service/version/version.go | 42 ++++++++++++++++++++++------ 4 files changed, 74 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index b63d793131..21edec847d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,14 @@ +# # Reference Guide - https://www.gnu.org/software/make/manual/make.html # + +# Build variables +BUILD_VERSION ?= $(shell git describe --always --tags) +BUILD_HASH ?= $(shell git rev-parse --short HEAD) +BUILD_TIME ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) + # Internal variables or constants. -# FRONTEND_DIRECTORY ?= frontend FLATTENER_DIRECTORY ?= pkg/processors/flattener QUERY_SERVICE_DIRECTORY ?= pkg/query-service @@ -13,6 +20,15 @@ FRONTEND_DOCKER_IMAGE ?= frontend QUERY_SERVICE_DOCKER_IMAGE ?= query-service FLATTERNER_DOCKER_IMAGE ?= flattener-processor +# Build-time Go variables +PACKAGE?=go.signoz.io/query-service +buildVersion=${PACKAGE}/version.buildVersion +buildHash=${PACKAGE}/version.buildHash +buildTime=${PACKAGE}/version.buildTime +gitBranch=${PACKAGE}/version.gitBranch + +LD_FLAGS="-X ${buildHash}=${BUILD_HASH} -X ${buildTime}=${BUILD_TIME} -X ${buildVersion}=${BUILD_VERSION} -X ${gitBranch}=${BUILD_BRANCH}" + all: build-push-frontend build-push-query-service build-push-flattener # Steps to build and push docker image of frontend .PHONY: build-frontend-amd64 build-push-frontend @@ -47,7 +63,9 @@ build-query-service-amd64: @echo "--> Building query-service docker image for amd64" @echo "------------------" @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" + docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . \ + --build-arg TARGETPLATFORM="linux/amd64" \ + --build-arg LD_FLAGS=$(LD_FLAGS) # Step to build and push docker image of query in amd64 and arm64 (used in push pipeline) build-push-query-service: @@ -56,10 +74,15 @@ build-push-query-service: @echo "------------------" ifndef DOCKER_SECOND_TAG @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . + docker buildx build --file Dockerfile --progress plane --no-cache --push \ + --platform linux/arm64,linux/amd64 . \ + --build-arg LD_FLAGS=$(LD_FLAGS) \ + --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) else @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 . \ + docker buildx build --file Dockerfile --progress plane --no-cache \ + --push --platform linux/arm64,linux/amd64 . \ + --build-arg LD_FLAGS=$(LD_FLAGS) \ --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \ --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) endif diff --git a/pkg/query-service/Dockerfile b/pkg/query-service/Dockerfile index 26352bf734..fe93291f79 100644 --- a/pkg/query-service/Dockerfile +++ b/pkg/query-service/Dockerfile @@ -1,8 +1,7 @@ FROM golang:1.14-buster AS builder -# Add Maintainer Info -LABEL maintainer="signoz" - +# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed +ARG LD_FLAGS ARG TARGETPLATFORM ENV CGO_ENABLED=1 @@ -21,15 +20,22 @@ RUN go mod download -x # Add the sources and proceed with build ADD . . -RUN go build -a -ldflags "-linkmode external -extldflags '-static' -s -w" -o ./bin/query-service ./main.go +RUN go build -a -ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" -o ./bin/query-service ./main.go RUN chmod +x ./bin/query-service + # use a minimal alpine image FROM alpine:3.7 + +# Add Maintainer Info +LABEL maintainer="signoz" + # add ca-certificates in case you need them RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* + # set working directory WORKDIR /root + # copy the binary from builder COPY --from=builder /go/src/github.com/signoz/signoz/pkg/query-service/bin/query-service . @@ -37,7 +43,8 @@ COPY config/prometheus.yml /root/config/prometheus.yml # run the binary ENTRYPOINT ["./query-service"] + CMD ["-config", "/root/config/prometheus.yml"] # CMD ["./query-service -config /root/config/prometheus.yml"] -EXPOSE 8080 +EXPOSE 8080 diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index b8e614868e..3418154d79 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -10,6 +10,7 @@ import ( "go.signoz.io/query-service/app" "go.signoz.io/query-service/constants" + "go.signoz.io/query-service/version" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -31,7 +32,7 @@ func main() { defer loggerMgr.Sync() // flushes buffer, if any logger := loggerMgr.Sugar() - logger.Debug("STARTING!") + version.PrintVersion() serverOptions := &app.ServerOptions{ // HTTPHostPort: v.GetString(app.HTTPHostPort), diff --git a/pkg/query-service/version/version.go b/pkg/query-service/version/version.go index cad1bfa6f0..f53f42c544 100644 --- a/pkg/query-service/version/version.go +++ b/pkg/query-service/version/version.go @@ -1,24 +1,48 @@ package version import ( + "fmt" + "runtime" + "go.uber.org/zap" ) // These fields are set during an official build // Global vars set from command-line arguments var ( - version = "--" - buildhash = "--" - buildtime = "--" + buildVersion = "--" + buildHash = "--" + buildTime = "--" + gitBranch = "--" ) -//PrintVersionInfo displays the kyverno version - git version -func PrintVersionInfo() { - zap.S().Info("Version: ", version) - zap.S().Info("BuildHash: ", buildhash) - zap.S().Info("BuildTime: ", buildtime) +// BuildDetails returns a string containing details about the SigNoz query-service binary. +func BuildDetails() string { + licenseInfo := `Licensed under the MIT License` + + return fmt.Sprintf(` +SigNoz version : %v +Commit SHA-1 : %v +Commit timestamp : %v +Branch : %v +Go version : %v + +For SigNoz Official Documentation, visit https://signoz.io/docs +For SigNoz Community Slack, visit http://signoz.io/slack +For discussions about SigNoz, visit https://community.signoz.io + +%s. +Copyright 2022 SigNoz +`, + buildVersion, buildHash, buildTime, gitBranch, + runtime.Version(), licenseInfo) +} + +// PrintVersion prints version and other helpful information. +func PrintVersion() { + zap.S().Infof("\n%s\n", BuildDetails()) } func GetVersion() string { - return version + return buildVersion } From 24f1404741f48d953482982b8c2776c15f35629c Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 2 Feb 2022 03:40:11 +0530 Subject: [PATCH 47/81] =?UTF-8?q?fix(compose-yaml):=20=F0=9F=A9=B9=20infer?= =?UTF-8?q?=20max-file=20logging=20option=20as=20string?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- deploy/docker/clickhouse-setup/docker-compose.arm.yaml | 2 +- deploy/docker/clickhouse-setup/docker-compose.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml index 9331701cb7..6f7a8912a7 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -100,7 +100,7 @@ services: logging: options: max-size: 50m - max-file: 3 + max-file: "3" command: ["all"] environment: - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 56fbceaae5..8621c77e12 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -102,7 +102,7 @@ services: logging: options: max-size: 50m - max-file: 3 + max-file: "3" command: ["all"] environment: - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces From fdd9287847e69ecddc058cdce934dd4a60cabed2 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 2 Feb 2022 11:40:30 +0530 Subject: [PATCH 48/81] Fix 414 errors trace filter API (#660) * fix: change trace filter APIs method from GET to POST * fix: error filter param * fix: json of aggregate params --- .../app/clickhouseReader/reader.go | 39 +- pkg/query-service/app/http_handler.go | 10 +- pkg/query-service/app/parser.go | 343 +++++------------- pkg/query-service/model/queryParams.go | 131 +++---- 4 files changed, 185 insertions(+), 338 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 2158408631..e99e1ada0c 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1622,13 +1622,12 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - if len(queryParams.Status) != 0 { - for _, e := range queryParams.Status { - if e == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if e == "ok" { - query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" - } + // status can only be two and if both are selected than they are equivalent to none selected + if len(queryParams.Status) == 1 { + if queryParams.Status[0] == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if queryParams.Status[0] == "ok" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" } } if len(queryParams.Kind) != 0 { @@ -1776,13 +1775,12 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - if len(queryParams.Status) != 0 { - for _, e := range queryParams.Status { - if e == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if e == "ok" { - query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" - } + // status can only be two and if both are selected than they are equivalent to none selected + if len(queryParams.Status) == 1 { + if queryParams.Status[0] == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if queryParams.Status[0] == "ok" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" } } tagFilters := []model.TagFilters{} @@ -2361,13 +2359,12 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - if len(queryParams.Status) != 0 { - for _, e := range queryParams.Status { - if e == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if e == "ok" { - query += " AND (NOT ( has(tags, 'error:true') AND statusCode<500 AND statusCode!=2))" - } + // status can only be two and if both are selected than they are equivalent to none selected + if len(queryParams.Status) == 1 { + if queryParams.Status[0] == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if queryParams.Status[0] == "ok" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" } } if len(queryParams.Kind) != 0 { diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index e8a7ebca7f..d76e8c6ca1 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -209,10 +209,10 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) { router.HandleFunc("/api/v1/userPreferences", aH.getUserPreferences).Methods(http.MethodGet) router.HandleFunc("/api/v1/version", aH.getVersion).Methods(http.MethodGet) - router.HandleFunc("/api/v1/getSpanFilters", aH.getSpanFilters).Methods(http.MethodGet) - router.HandleFunc("/api/v1/getTagFilters", aH.getTagFilters).Methods(http.MethodGet) - router.HandleFunc("/api/v1/getFilteredSpans", aH.getFilteredSpans).Methods(http.MethodGet) - router.HandleFunc("/api/v1/getFilteredSpans/aggregates", aH.getFilteredSpanAggregates).Methods(http.MethodGet) + router.HandleFunc("/api/v1/getSpanFilters", aH.getSpanFilters).Methods(http.MethodPost) + router.HandleFunc("/api/v1/getTagFilters", aH.getTagFilters).Methods(http.MethodPost) + router.HandleFunc("/api/v1/getFilteredSpans", aH.getFilteredSpans).Methods(http.MethodPost) + router.HandleFunc("/api/v1/getFilteredSpans/aggregates", aH.getFilteredSpanAggregates).Methods(http.MethodPost) router.HandleFunc("/api/v1/errors", aH.getErrors).Methods(http.MethodGet) router.HandleFunc("/api/v1/errorWithId", aH.getErrorForId).Methods(http.MethodGet) @@ -978,7 +978,7 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getSpanFilters(w http.ResponseWriter, r *http.Request) { - query, err := parseSpanFilterRequest(r) + query, err := parseSpanFilterRequestBody(r) if aH.handleError(w, err, http.StatusBadRequest) { return } diff --git a/pkg/query-service/app/parser.go b/pkg/query-service/app/parser.go index 87c1c00303..89d1a6c07f 100644 --- a/pkg/query-service/app/parser.go +++ b/pkg/query-service/app/parser.go @@ -482,183 +482,76 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) { return params, nil } -func parseSpanFilterRequest(r *http.Request) (*model.SpanFilterParams, error) { +func parseSpanFilterRequestBody(r *http.Request) (*model.SpanFilterParams, error) { + + var postData *model.SpanFilterParams + err := json.NewDecoder(r.Body).Decode(&postData) - startTime, err := parseTime("start", r) - if err != nil { - return nil, err - } - endTime, err := parseTimeMinusBuffer("end", r) if err != nil { return nil, err } - params := &model.SpanFilterParams{ - Start: startTime, - End: endTime, - ServiceName: []string{}, - HttpRoute: []string{}, - HttpCode: []string{}, - HttpUrl: []string{}, - HttpHost: []string{}, - HttpMethod: []string{}, - Component: []string{}, - Status: []string{}, - Operation: []string{}, - GetFilters: []string{}, - Exclude: []string{}, + postData.Start, err = parseTimeStr(postData.StartStr, "start") + if err != nil { + return nil, err + } + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") + if err != nil { + return nil, err } - params.ServiceName = fetchArrayValues("serviceName", r) - - params.Status = fetchArrayValues("status", r) - - params.Operation = fetchArrayValues("operation", r) - - params.HttpCode = fetchArrayValues("httpCode", r) - - params.HttpUrl = fetchArrayValues("httpUrl", r) - - params.HttpHost = fetchArrayValues("httpHost", r) - - params.HttpRoute = fetchArrayValues("httpRoute", r) - - params.HttpMethod = fetchArrayValues("httpMethod", r) - - params.Component = fetchArrayValues("component", r) - - params.GetFilters = fetchArrayValues("getFilters", r) - - params.Exclude = fetchArrayValues("exclude", r) - - minDuration, err := parseTimestamp("minDuration", r) - if err == nil { - params.MinDuration = *minDuration - } - maxDuration, err := parseTimestamp("maxDuration", r) - if err == nil { - params.MaxDuration = *maxDuration - } - - return params, nil + return postData, nil } func parseFilteredSpansRequest(r *http.Request) (*model.GetFilteredSpansParams, error) { - startTime, err := parseTime("start", r) - if err != nil { - return nil, err - } - endTime, err := parseTimeMinusBuffer("end", r) + var postData *model.GetFilteredSpansParams + err := json.NewDecoder(r.Body).Decode(&postData) + if err != nil { return nil, err } - params := &model.GetFilteredSpansParams{ - Start: startTime, - End: endTime, - ServiceName: []string{}, - HttpRoute: []string{}, - HttpCode: []string{}, - HttpUrl: []string{}, - HttpHost: []string{}, - HttpMethod: []string{}, - Component: []string{}, - Status: []string{}, - Operation: []string{}, - Limit: 100, - Order: "descending", - Exclude: []string{}, - } - - params.ServiceName = fetchArrayValues("serviceName", r) - - params.Status = fetchArrayValues("status", r) - - params.Operation = fetchArrayValues("operation", r) - - params.HttpCode = fetchArrayValues("httpCode", r) - - params.HttpUrl = fetchArrayValues("httpUrl", r) - - params.HttpHost = fetchArrayValues("httpHost", r) - - params.HttpRoute = fetchArrayValues("httpRoute", r) - - params.HttpMethod = fetchArrayValues("httpMethod", r) - - params.Component = fetchArrayValues("component", r) - - params.Exclude = fetchArrayValues("exclude", r) - - limitStr := r.URL.Query().Get("limit") - if len(limitStr) != 0 { - limit, err := strconv.ParseInt(limitStr, 10, 64) - if err != nil { - return nil, errors.New("Limit param is not in correct format") - } - params.Limit = limit - } else { - params.Limit = 100 - } - - offsetStr := r.URL.Query().Get("offset") - if len(offsetStr) != 0 { - offset, err := strconv.ParseInt(offsetStr, 10, 64) - if err != nil { - return nil, errors.New("Offset param is not in correct format") - } - params.Offset = offset - } - - tags, err := parseTagsV2("tags", r) + postData.Start, err = parseTimeStr(postData.StartStr, "start") if err != nil { return nil, err } - if len(*tags) != 0 { - params.Tags = *tags + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") + if err != nil { + return nil, err } - minDuration, err := parseTimestamp("minDuration", r) - if err == nil { - params.MinDuration = *minDuration - } - maxDuration, err := parseTimestamp("maxDuration", r) - if err == nil { - params.MaxDuration = *maxDuration + if postData.Limit == 0 { + postData.Limit = 100 } - kind := r.URL.Query().Get("kind") - if len(kind) != 0 { - params.Kind = kind - } - - return params, nil + return postData, nil } func parseFilteredSpanAggregatesRequest(r *http.Request) (*model.GetFilteredSpanAggregatesParams, error) { - startTime, err := parseTime("start", r) + var postData *model.GetFilteredSpanAggregatesParams + err := json.NewDecoder(r.Body).Decode(&postData) + if err != nil { return nil, err } - endTime, err := parseTimeMinusBuffer("end", r) + postData.Start, err = parseTimeStr(postData.StartStr, "start") + if err != nil { + return nil, err + } + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") if err != nil { return nil, err } - stepStr := r.URL.Query().Get("step") - if len(stepStr) == 0 { + step := postData.StepSeconds + if step == 0 { return nil, errors.New("step param missing in query") } - stepInt, err := strconv.Atoi(stepStr) - if err != nil { - return nil, errors.New("step param is not in correct format") - } - - function := r.URL.Query().Get("function") + function := postData.Function if len(function) == 0 { return nil, errors.New("function param missing in query") } else { @@ -702,71 +595,17 @@ func parseFilteredSpanAggregatesRequest(r *http.Request) (*model.GetFilteredSpan aggregationOption = "max" } - params := &model.GetFilteredSpanAggregatesParams{ - Start: startTime, - End: endTime, - ServiceName: []string{}, - HttpRoute: []string{}, - HttpCode: []string{}, - HttpUrl: []string{}, - HttpHost: []string{}, - HttpMethod: []string{}, - Component: []string{}, - Status: []string{}, - Operation: []string{}, - StepSeconds: stepInt, - Dimension: dimension, - AggregationOption: aggregationOption, - Exclude: []string{}, - } + postData.AggregationOption = aggregationOption + postData.Dimension = dimension + // tags, err := parseTagsV2("tags", r) + // if err != nil { + // return nil, err + // } + // if len(*tags) != 0 { + // params.Tags = *tags + // } - params.ServiceName = fetchArrayValues("serviceName", r) - - params.Status = fetchArrayValues("status", r) - - params.Operation = fetchArrayValues("operation", r) - - params.HttpCode = fetchArrayValues("httpCode", r) - - params.HttpUrl = fetchArrayValues("httpUrl", r) - - params.HttpHost = fetchArrayValues("httpHost", r) - - params.HttpRoute = fetchArrayValues("httpRoute", r) - - params.HttpMethod = fetchArrayValues("httpMethod", r) - - params.Component = fetchArrayValues("component", r) - - params.Exclude = fetchArrayValues("exclude", r) - - tags, err := parseTagsV2("tags", r) - if err != nil { - return nil, err - } - if len(*tags) != 0 { - params.Tags = *tags - } - - minDuration, err := parseTimestamp("minDuration", r) - if err == nil { - params.MinDuration = *minDuration - } - maxDuration, err := parseTimestamp("maxDuration", r) - if err == nil { - params.MaxDuration = *maxDuration - } - - kind := r.URL.Query().Get("kind") - if len(kind) != 0 { - params.Kind = kind - } - groupBy := r.URL.Query().Get("groupBy") - if len(groupBy) != 0 { - params.GroupBy = groupBy - } - - return params, nil + return postData, nil } func parseErrorRequest(r *http.Request) (*model.GetErrorParams, error) { @@ -792,61 +631,24 @@ func parseErrorRequest(r *http.Request) (*model.GetErrorParams, error) { } func parseTagFilterRequest(r *http.Request) (*model.TagFilterParams, error) { + var postData *model.TagFilterParams + err := json.NewDecoder(r.Body).Decode(&postData) - startTime, err := parseTime("start", r) - if err != nil { - return nil, err - } - endTime, err := parseTimeMinusBuffer("end", r) if err != nil { return nil, err } - params := &model.TagFilterParams{ - Start: startTime, - End: endTime, - ServiceName: []string{}, - HttpRoute: []string{}, - HttpCode: []string{}, - HttpUrl: []string{}, - HttpHost: []string{}, - HttpMethod: []string{}, - Component: []string{}, - Status: []string{}, - Operation: []string{}, - Exclude: []string{}, + postData.Start, err = parseTimeStr(postData.StartStr, "start") + if err != nil { + return nil, err + } + postData.End, err = parseTimeMinusBufferStr(postData.EndStr, "end") + if err != nil { + return nil, err } - params.ServiceName = fetchArrayValues("serviceName", r) + return postData, nil - params.Status = fetchArrayValues("status", r) - - params.Operation = fetchArrayValues("operation", r) - - params.HttpCode = fetchArrayValues("httpCode", r) - - params.HttpUrl = fetchArrayValues("httpUrl", r) - - params.HttpHost = fetchArrayValues("httpHost", r) - - params.HttpRoute = fetchArrayValues("httpRoute", r) - - params.HttpMethod = fetchArrayValues("httpMethod", r) - - params.Component = fetchArrayValues("component", r) - - params.Exclude = fetchArrayValues("exclude", r) - - minDuration, err := parseTimestamp("minDuration", r) - if err == nil { - params.MinDuration = *minDuration - } - maxDuration, err := parseTimestamp("maxDuration", r) - if err == nil { - params.MaxDuration = *maxDuration - } - - return params, nil } func parseErrorsRequest(r *http.Request) (*model.GetErrorsParams, error) { @@ -957,6 +759,45 @@ func parseApplicationPercentileRequest(r *http.Request) (*model.ApplicationPerce } +func parseTimeStr(timeStr string, param string) (*time.Time, error) { + + if len(timeStr) == 0 { + return nil, fmt.Errorf("%s param missing in query", param) + } + + timeUnix, err := strconv.ParseInt(timeStr, 10, 64) + if err != nil || len(timeStr) == 0 { + return nil, fmt.Errorf("%s param is not in correct timestamp format", param) + } + + timeFmt := time.Unix(0, timeUnix) + + return &timeFmt, nil + +} + +func parseTimeMinusBufferStr(timeStr string, param string) (*time.Time, error) { + + if len(timeStr) == 0 { + return nil, fmt.Errorf("%s param missing in query", param) + } + + timeUnix, err := strconv.ParseInt(timeStr, 10, 64) + if err != nil || len(timeStr) == 0 { + return nil, fmt.Errorf("%s param is not in correct timestamp format", param) + } + + timeUnixNow := time.Now().UnixNano() + if timeUnix > timeUnixNow-30000000000 { + timeUnix = timeUnix - 30000000000 + } + + timeFmt := time.Unix(0, timeUnix) + + return &timeFmt, nil + +} + func parseTime(param string, r *http.Request) (*time.Time, error) { timeStr := r.URL.Query().Get(param) diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 2ae9411b4e..8ceaddd9a6 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -117,85 +117,94 @@ type SpanSearchParams struct { } type GetFilteredSpansParams struct { - ServiceName []string - Operation []string - Kind string - Status []string - HttpRoute []string - HttpCode []string - HttpUrl []string - HttpHost []string - HttpMethod []string - Component []string + ServiceName []string `json:"serviceName"` + Operation []string `json:"operation"` + Kind string `json:"kind"` + Status []string `json:"status"` + HttpRoute []string `json:"httpRoute"` + HttpCode []string `json:"httpCode"` + HttpUrl []string `json:"httpUrl"` + HttpHost []string `json:"httpHost"` + HttpMethod []string `json:"httpMethod"` + Component []string `json:"component"` + StartStr string `json:"start"` + EndStr string `json:"end"` + MinDuration string `json:"minDuration"` + MaxDuration string `json:"maxDuration"` + Limit int64 `json:"limit"` + Order string `json:"order"` + Offset int64 `json:"offset"` + Tags []TagQueryV2 `json:"tags"` + Exclude []string `json:"exclude"` Start *time.Time End *time.Time - MinDuration string - MaxDuration string - Limit int64 - Order string - Offset int64 - Tags []TagQueryV2 - Exclude []string } type GetFilteredSpanAggregatesParams struct { - ServiceName []string - Operation []string - Kind string - Status []string - HttpRoute []string - HttpCode []string - HttpUrl []string - HttpHost []string - HttpMethod []string - Component []string - MinDuration string - MaxDuration string - Tags []TagQueryV2 + ServiceName []string `json:"serviceName"` + Operation []string `json:"operation"` + Kind string `json:"kind"` + Status []string `json:"status"` + HttpRoute []string `json:"httpRoute"` + HttpCode []string `json:"httpCode"` + HttpUrl []string `json:"httpUrl"` + HttpHost []string `json:"httpHost"` + HttpMethod []string `json:"httpMethod"` + Component []string `json:"component"` + MinDuration string `json:"minDuration"` + MaxDuration string `json:"maxDuration"` + Tags []TagQueryV2 `json:"tags"` + StartStr string `json:"start"` + EndStr string `json:"end"` + StepSeconds int `json:"step"` + Dimension string `json:"dimension"` + AggregationOption string `json:"aggregationOption"` + GroupBy string `json:"groupBy"` + Function string `json:"function"` + Exclude []string `json:"exclude"` Start *time.Time End *time.Time - StepSeconds int - Dimension string - AggregationOption string - GroupBy string - Function string - Exclude []string } type SpanFilterParams struct { - Status []string - ServiceName []string - HttpRoute []string - HttpCode []string - HttpUrl []string - HttpHost []string - HttpMethod []string - Component []string - Operation []string - GetFilters []string - Exclude []string - MinDuration string - MaxDuration string + Status []string `json:"status"` + ServiceName []string `json:"serviceName"` + HttpRoute []string `json:"httpRoute"` + HttpCode []string `json:"httpCode"` + HttpUrl []string `json:"httpUrl"` + HttpHost []string `json:"httpHost"` + HttpMethod []string `json:"httpMethod"` + Component []string `json:"component"` + Operation []string `json:"operation"` + GetFilters []string `json:"getFilters"` + Exclude []string `json:"exclude"` + MinDuration string `json:"minDuration"` + MaxDuration string `json:"maxDuration"` + StartStr string `json:"start"` + EndStr string `json:"end"` Start *time.Time End *time.Time } type TagFilterParams struct { - Status []string - ServiceName []string - HttpRoute []string - HttpCode []string - HttpUrl []string - HttpHost []string - HttpMethod []string - Component []string - Operation []string - Exclude []string - MinDuration string - MaxDuration string + Status []string `json:"status"` + ServiceName []string `json:"serviceName"` + HttpRoute []string `json:"httpRoute"` + HttpCode []string `json:"httpCode"` + HttpUrl []string `json:"httpUrl"` + HttpHost []string `json:"httpHost"` + HttpMethod []string `json:"httpMethod"` + Component []string `json:"component"` + Operation []string `json:"operation"` + Exclude []string `json:"exclude"` + MinDuration string `json:"minDuration"` + MaxDuration string `json:"maxDuration"` + StartStr string `json:"start"` + EndStr string `json:"end"` Start *time.Time End *time.Time } + type TTLParams struct { Type string Duration string From c372eac3e373fb24162a84bd50e8ef93c733f0b6 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 2 Feb 2022 15:22:35 +0530 Subject: [PATCH 49/81] =?UTF-8?q?docs(contributing):=20=F0=9F=93=9D=20Add?= =?UTF-8?q?=20Helm=20Chart=20contribute=20instructions=20(#668)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- CONTRIBUTING.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c07bc6cc61..b215b9d478 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,6 +56,40 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht > If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080` +# Contribute to SigNoz Helm Chart + +Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/charts). + +### To run helm chart for local development + +- run `git clone https://github.com/SigNoz/charts.git` followed by `cd charts` +- it is recommended to use lightweight kubernetes (k8s) cluster for local development: + - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) + - [k3d](https://k3d.io/#installation) + - [minikube](https://minikube.sigs.k8s.io/docs/start/) +- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster +- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart +- run `kubectl -n platform port-forward svc/my-release-frontend 3000:3000` to make SigNoz UI available at [localhost:3000](http://localhost:3000) + +**To load data with HotROD sample app:** + +```sh +kubectl create ns sample-application + +kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml + +kubectl -n sample-application run strzal --image=djbingham/curl \ +--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ +'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm +``` + +**To stop the load generation:** + +```sh +kubectl -n sample-application run strzal --image=djbingham/curl \ + --restart='OnFailure' -i --tty --rm --command -- curl \ + http://locust-master:8089/stop +``` ## General Instructions From cd04a39d3dcc1579dc5807ad9cece4eed4437f0d Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 2 Feb 2022 16:41:13 +0530 Subject: [PATCH 50/81] =?UTF-8?q?chore:=20=F0=9F=9A=9A=20rename=20config?= =?UTF-8?q?=20.yaml=20to=20yml=20for=20behaviorbot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- .github/{config.yaml => config.yml} | 2 ++ 1 file changed, 2 insertions(+) rename .github/{config.yaml => config.yml} (96%) diff --git a/.github/config.yaml b/.github/config.yml similarity index 96% rename from .github/config.yaml rename to .github/config.yml index 9105934e67..d31bb878b4 100644 --- a/.github/config.yaml +++ b/.github/config.yml @@ -27,3 +27,5 @@ firstPRMergeComment: > # Comment to be posted in issues or pull requests, when no description is provided. requestInfoReplyComment: > We would appreciate it if you could provide us with more info about this issue/pr! + +requestInfoLabelToAdd: request-more-info From e0a7002a291dd363ac54574eb13c99fa4b482c95 Mon Sep 17 00:00:00 2001 From: Sonia Manoubi Date: Fri, 4 Feb 2022 15:49:09 +0100 Subject: [PATCH 51/81] fix: api call on apply button --- frontend/src/container/NewWidget/index.tsx | 28 +++++++++++++--------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/frontend/src/container/NewWidget/index.tsx b/frontend/src/container/NewWidget/index.tsx index f2067c1bd0..832aec9f4b 100644 --- a/frontend/src/container/NewWidget/index.tsx +++ b/frontend/src/container/NewWidget/index.tsx @@ -17,6 +17,7 @@ import { SaveDashboard, SaveDashboardProps, } from 'store/actions/dashboard/saveDashboard'; +import { UpdateQuery, UpdateQueryProps } from 'store/actions/dashboard/updateQuery'; import { AppState } from 'store/reducers'; import AppActions from 'types/actions'; import { GlobalTime } from 'types/actions/globalTime'; @@ -39,6 +40,7 @@ const NewWidget = ({ applySettingsToPanel, saveSettingOfPanel, getQueryResults, + updateQuery }: Props): JSX.Element => { const { dashboards } = useSelector( (state) => state.dashboards, @@ -120,7 +122,16 @@ const NewWidget = ({ dashboardId, ]); - const onClickApplyHandler = useCallback(() => { + const onClickApplyHandler = () => { + selectedWidget?.query.forEach((element, index) => { + updateQuery({ + widgetId: selectedWidget?.id || '', + query: element.query || '', + legend: element.legend || '', + currentIndex: index + }); + }) + applySettingsToPanel({ description, isStacked: stacked, @@ -130,16 +141,7 @@ const NewWidget = ({ title, widgetId: selectedWidget?.id || '', }); - }, [ - applySettingsToPanel, - description, - opacity, - selectedTime, - selectedWidget?.id, - selectedNullZeroValue, - stacked, - title, - ]); + } const onClickDiscardHandler = useCallback(() => { push(generatePath(ROUTES.DASHBOARD, { dashboardId })); @@ -220,6 +222,9 @@ interface DispatchProps { getQueryResults: ( props: GetQueryResultsProps, ) => (dispatch: Dispatch) => void; + updateQuery: ( + props: UpdateQueryProps, + ) => (dispatch: Dispatch) => void; } const mapDispatchToProps = ( @@ -228,6 +233,7 @@ const mapDispatchToProps = ( applySettingsToPanel: bindActionCreators(ApplySettingsToPanel, dispatch), saveSettingOfPanel: bindActionCreators(SaveDashboard, dispatch), getQueryResults: bindActionCreators(GetQueryResults, dispatch), + updateQuery: bindActionCreators(UpdateQuery, dispatch) }); type Props = DispatchProps & NewWidgetProps; From 57992134bc6cd90ebb4a6e04c9c4cec1d6672403 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Mon, 7 Feb 2022 12:52:10 +0530 Subject: [PATCH 52/81] =?UTF-8?q?chore:=20=F0=9F=9A=9A=20rename=20config?= =?UTF-8?q?=20.yaml=20to=20yml=20for=20behaviorbot=20(#673)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi (cherry picked from commit cd04a39d3dcc1579dc5807ad9cece4eed4437f0d) --- .github/{config.yaml => config.yml} | 2 ++ 1 file changed, 2 insertions(+) rename .github/{config.yaml => config.yml} (96%) diff --git a/.github/config.yaml b/.github/config.yml similarity index 96% rename from .github/config.yaml rename to .github/config.yml index 9105934e67..d31bb878b4 100644 --- a/.github/config.yaml +++ b/.github/config.yml @@ -27,3 +27,5 @@ firstPRMergeComment: > # Comment to be posted in issues or pull requests, when no description is provided. requestInfoReplyComment: > We would appreciate it if you could provide us with more info about this issue/pr! + +requestInfoLabelToAdd: request-more-info From 07183d518923016473f956d3ecc8c7b3ee462ad8 Mon Sep 17 00:00:00 2001 From: Siddhant Khare Date: Tue, 8 Feb 2022 10:27:52 +0530 Subject: [PATCH 53/81] Gitpodify the Signoz (#634) * chore(docs): updated lines of frontend & query sec * fix: update baseURL for local & gitpod * chore: allow all for dev to run on https * chore(docs): add maintainer note at docker-compose * chore: update gitignore to ignore .db & logs * chore: upd lines of fe & query-service & notes * feat: gitpodify the signoz with all envs. & ports * fix: relative path of .scripts dir * chore(ci): distribute tasks in gitpod.yml * fix: run docker image while init * fix: add empty url option for `baseURL` --- .gitpod.yml | 36 +++++++++++++++++++ .scripts/commentLinesForSetup.sh | 7 ++++ CONTRIBUTING.md | 20 +++++++++-- .../clickhouse-setup/docker-compose.yaml | 3 ++ frontend/src/constants/env.ts | 2 +- frontend/webpack.config.js | 1 + 6 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 .gitpod.yml create mode 100644 .scripts/commentLinesForSetup.sh diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 0000000000..db4801ba35 --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,36 @@ +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + + +tasks: + - name: Run Script to Comment ut required lines + init: | + cd ./.scripts + sh commentLinesForSetup.sh + + - name: Run Docker Images + init: | + cd ./deploy + sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d + # command: + + - name: Run Frontend + init: | + cd ./frontend + yarn install + command: + yarn dev + +ports: + - port: 3000 + onOpen: open-browser + - port: 8080 + onOpen: ignore + - port: 9000 + onOpen: ignore + - port: 8123 + onOpen: ignore + - port: 8089 + onOpen: ignore + - port: 9093 + onOpen: ignore diff --git a/.scripts/commentLinesForSetup.sh b/.scripts/commentLinesForSetup.sh new file mode 100644 index 0000000000..7ea6b468ad --- /dev/null +++ b/.scripts/commentLinesForSetup.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +# It Comments out the Line Query-Service & Frontend Section of deploy/docker/clickhouse-setup/docker-compose.yaml +# Update the Line Numbers when deploy/docker/clickhouse-setup/docker-compose.yaml chnages. +# Docs Ref.: https://github.com/SigNoz/signoz/blob/main/CONTRIBUTING.md#contribute-to-frontend-with-docker-installation-of-signoz + +sed -i 38,70's/.*/# &/' .././deploy/docker/clickhouse-setup/docker-compose.yaml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 99764e34fd..aa6f728b2f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,7 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git ### Contribute to Frontend with Docker installation of SigNoz - `git clone https://github.com/SigNoz/signoz.git && cd signoz` -- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38` +- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L59` - run `cd deploy` to move to deploy directory - Install signoz locally without the frontend - If you are using x86_64 processors (All Intel/AMD processors) run `sudo docker-compose --env-file ./docker/clickhouse-setup/env/x86_64.env -f docker/clickhouse-setup/docker-compose.yaml up -d` @@ -27,6 +27,8 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://git - `yarn install` - `yarn dev` +> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` + ### Contribute to Frontend without installing SigNoz backend If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `` @@ -46,16 +48,28 @@ Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](ht - git clone https://github.com/SigNoz/signoz.git - run `sudo make dev-setup` to configure local setup to run query-service -- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38` -- comment out query-service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L22` +- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L59` +- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L38` - Install signoz locally without the frontend and query-service - If you are using x86_64 processors (All Intel/AMD processors) run `sudo make run-x86` - If you are on arm64 processors (Apple M1 Macbooks) run `sudo make run-arm` +> Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` + **_Query Service should now be available at `http://localhost:8080`_** > If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to `http://localhost:8080` in file `src/constants/env.ts` as the query-service is now running at port `8080` +--- +Instead of configuring a local setup, you can also use [Gitpod](https://www.gitpod.io/), a VSCode-based Web IDE. + +Click the button below. A workspace with all required environments will be created. + +[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/SigNoz/signoz) + +> To use it on your forked repo, edit the 'Open in Gitpod' button url to `https://gitpod.io/#https://github.com//signoz` + +--- ## General Instructions diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 2a9d3db67a..8defcf6956 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -32,6 +32,9 @@ services: ports: - 9093:9093 +# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` + + query-service: image: signoz/query-service:0.5.4 container_name: query-service diff --git a/frontend/src/constants/env.ts b/frontend/src/constants/env.ts index 9ec41e25c2..6e12260df1 100644 --- a/frontend/src/constants/env.ts +++ b/frontend/src/constants/env.ts @@ -1,3 +1,3 @@ export const ENVIRONMENT = { - baseURL: process?.env?.FRONTEND_API_ENDPOINT || '', + baseURL: process?.env?.FRONTEND_API_ENDPOINT || process.env.GITPOD_WORKSPACE_URL.replace('://','://8080-') ||'http://localhost:8080'|| '', }; diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js index 4e27524146..824b3e75b8 100644 --- a/frontend/webpack.config.js +++ b/frontend/webpack.config.js @@ -41,6 +41,7 @@ const config = { publicPath: '/', watch: true, }, + allowedHosts: 'all', }, target: 'web', output: { From c1d4dc2ad658f99f2ecf6581487ad94d253c0670 Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Tue, 8 Feb 2022 13:28:56 +0530 Subject: [PATCH 54/81] fix: exclude added for status field (#681) * fix: exclude added for status field * chore: extracted status filtering to a function --- .../app/clickhouseReader/reader.go | 60 +++++++++---------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index e99e1ada0c..184127c2dc 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1394,14 +1394,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode args = append(args, queryParams.MaxDuration) } - // status can only be two and if both are selected than they are equivalent to none selected - if len(queryParams.Status) == 1 { - if queryParams.Status[0] == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if queryParams.Status[0] == "ok" { - query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" - } - } + query = getStatusFilters(query, queryParams.Status, excludeMap) traceFilterReponse := model.SpanFiltersResponse{ Status: map[string]int{}, @@ -1579,6 +1572,27 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode return &traceFilterReponse, nil } +func getStatusFilters(query string, statusParams []string, excludeMap map[string]struct{}) string { + + // status can only be two and if both are selected than they are equivalent to none selected + if _, ok := excludeMap["status"]; ok { + if len(statusParams) == 1 { + if statusParams[0] == "error" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" + } else if statusParams[0] == "ok" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } + } + } else if len(statusParams) == 1 { + if statusParams[0] == "error" { + query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" + } else if statusParams[0] == "ok" { + query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" + } + } + return query +} + func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *model.GetFilteredSpansParams) (*model.GetFilterSpansResponse, *model.ApiError) { baseQuery := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpCode, httpMethod FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable) @@ -1622,14 +1636,8 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - // status can only be two and if both are selected than they are equivalent to none selected - if len(queryParams.Status) == 1 { - if queryParams.Status[0] == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if queryParams.Status[0] == "ok" { - query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" - } - } + query = getStatusFilters(query, queryParams.Status, excludeMap) + if len(queryParams.Kind) != 0 { query = query + " AND kind = ?" args = append(args, queryParams.Kind) @@ -1775,14 +1783,9 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - // status can only be two and if both are selected than they are equivalent to none selected - if len(queryParams.Status) == 1 { - if queryParams.Status[0] == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if queryParams.Status[0] == "ok" { - query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" - } - } + + query = getStatusFilters(query, queryParams.Status, excludeMap) + tagFilters := []model.TagFilters{} finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagsKeys) as tagKeys FROM %s WHERE timestamp >= ? AND timestamp <= ?`, r.indexTable) @@ -2359,14 +2362,7 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query query = query + " AND durationNano <= ?" args = append(args, queryParams.MaxDuration) } - // status can only be two and if both are selected than they are equivalent to none selected - if len(queryParams.Status) == 1 { - if queryParams.Status[0] == "error" { - query += " AND ( ( has(tags, 'error:true') OR statusCode>=500 OR statusCode=2))" - } else if queryParams.Status[0] == "ok" { - query += " AND ((NOT ( has(tags, 'error:true')) AND statusCode<500 AND statusCode!=2))" - } - } + query = getStatusFilters(query, queryParams.Status, excludeMap) if len(queryParams.Kind) != 0 { query = query + " AND kind = ?" args = append(args, queryParams.Kind) From d41502df98373e479b42243987e08a0f37c70974 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 8 Feb 2022 15:35:40 +0530 Subject: [PATCH 55/81] =?UTF-8?q?chore(helm-charts):=20=F0=9F=9A=9A=20migr?= =?UTF-8?q?ate=20helm=20charts=20to=20SigNoz/charts=20repository=20(#667)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- deploy/kubernetes/platform/Chart.lock | 15 - deploy/kubernetes/platform/Chart.yaml | 37 - .../platform/charts/alertmanager-0.5.0.tgz | Bin 5529 -> 0 bytes .../platform/charts/frontend-0.5.4.tgz | Bin 3848 -> 0 bytes .../platform/charts/query-service-0.5.4.tgz | Bin 4122 -> 0 bytes .../platform/charts/zookeeper-6.0.0.tgz | Bin 22052 -> 0 bytes .../crds/clickhouse-operator-install.yaml | 1223 ----------------- .../signoz-charts/alertmanager/.helmignore | 25 - .../signoz-charts/alertmanager/Chart.yaml | 7 - .../alertmanager/ci/config-reload-values.yaml | 2 - .../alertmanager/templates/NOTES.txt | 21 - .../alertmanager/templates/_helpers.tpl | 63 - .../alertmanager/templates/configmap.yaml | 15 - .../alertmanager/templates/ingress.yaml | 61 - .../alertmanager/templates/pdb.yaml | 13 - .../templates/serviceaccount.yaml | 12 - .../alertmanager/templates/services.yaml | 48 - .../alertmanager/templates/statefulset.yaml | 152 -- .../__snapshot__/ingress_test.yaml.snap | 48 - .../alertmanager/unittests/ingress_test.yaml | 81 -- .../signoz-charts/alertmanager/values.yaml | 189 --- .../signoz-charts/frontend/.helmignore | 23 - .../signoz-charts/frontend/Chart.yaml | 21 - .../frontend/templates/NOTES.txt | 21 - .../frontend/templates/_helpers.tpl | 63 - .../frontend/templates/config.yaml | 40 - .../frontend/templates/deployment.yaml | 64 - .../frontend/templates/ingress.yaml | 41 - .../frontend/templates/service.yaml | 15 - .../frontend/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 - .../signoz-charts/frontend/values.yaml | 77 -- .../signoz-charts/query-service/.helmignore | 23 - .../signoz-charts/query-service/Chart.yaml | 21 - .../query-service/templates/NOTES.txt | 21 - .../query-service/templates/_helpers.tpl | 63 - .../query-service/templates/ingress.yaml | 41 - .../templates/prometheus-configmap.yaml | 33 - .../query-service/templates/service.yaml | 15 - .../templates/serviceaccount.yaml | 12 - .../query-service/templates/statefulset.yaml | 89 -- .../templates/tests/test-connection.yaml | 15 - .../signoz-charts/query-service/values.yaml | 77 -- .../platform/templates/clickhouse-config.yaml | 33 - .../templates/clickhouse-instance.yaml | 107 -- .../clickhouse-operator/clusterrole.yaml | 157 --- .../clusterrolebinding.yaml | 18 - .../clickhouse-operator/configmap.yaml | 418 ------ .../clickhouse-operator/deployment.yaml | 129 -- .../clickhouse-operator/service.yaml | 26 - .../clickhouse-operator/serviceaccount.yaml | 15 - .../otel-collector-metrics-config.yaml | 53 - .../otel-collector-metrics-deployment.yaml | 72 - .../otel-collector-metrics-service.yaml | 31 - .../otel-collector/otel-collector-config.yaml | 67 - .../otel-collector-deployment.yaml | 73 - .../otel-collector-service.yaml | 33 - deploy/kubernetes/platform/values.yaml | 16 - 58 files changed, 4062 deletions(-) delete mode 100644 deploy/kubernetes/platform/Chart.lock delete mode 100644 deploy/kubernetes/platform/Chart.yaml delete mode 100644 deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz delete mode 100644 deploy/kubernetes/platform/charts/frontend-0.5.4.tgz delete mode 100644 deploy/kubernetes/platform/charts/query-service-0.5.4.tgz delete mode 100644 deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz delete mode 100644 deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/.helmignore delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/NOTES.txt delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/_helpers.tpl delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/ingress.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/service.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/serviceaccount.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/templates/tests/test-connection.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/frontend/values.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/.helmignore delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/NOTES.txt delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/_helpers.tpl delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/ingress.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/service.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/serviceaccount.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/tests/test-connection.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/values.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-config.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-instance.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml delete mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml delete mode 100644 deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml delete mode 100644 deploy/kubernetes/platform/values.yaml diff --git a/deploy/kubernetes/platform/Chart.lock b/deploy/kubernetes/platform/Chart.lock deleted file mode 100644 index 982d2930db..0000000000 --- a/deploy/kubernetes/platform/Chart.lock +++ /dev/null @@ -1,15 +0,0 @@ -dependencies: -- name: zookeeper - repository: https://charts.bitnami.com/bitnami - version: 6.0.0 -- name: query-service - repository: file://./signoz-charts/query-service - version: 0.5.4 -- name: frontend - repository: file://./signoz-charts/frontend - version: 0.5.4 -- name: alertmanager - repository: file://./signoz-charts/alertmanager - version: 0.5.0 -digest: sha256:b75aaa30cee8c67d7194fec3543e02389d4df0806982cce55d848b564ae9aad7 -generated: "2021-12-24T13:23:16.211336+05:30" diff --git a/deploy/kubernetes/platform/Chart.yaml b/deploy/kubernetes/platform/Chart.yaml deleted file mode 100644 index 71cd2cc459..0000000000 --- a/deploy/kubernetes/platform/Chart.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v2 -name: signoz-platform -description: SigNoz Observability Platform Helm Chart - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.5.3 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.5.3 - -dependencies: - - name: zookeeper - repository: "https://charts.bitnami.com/bitnami" - version: 6.0.0 - - name: query-service - repository: "file://./signoz-charts/query-service" - version: 0.5.4 - - name: frontend - repository: "file://./signoz-charts/frontend" - version: 0.5.4 - - name: alertmanager - repository: "file://./signoz-charts/alertmanager" - version: 0.5.0 \ No newline at end of file diff --git a/deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz b/deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz deleted file mode 100644 index c673c463eccac871a101489bb84cb138d40b37b9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5529 zcmV;K6=v!miwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH>OZW}q0^Shs-PWB?U&uU5j%53AjfY+HF7@H3v&SbGzEHYGg zN#eMh?2v5P8e4~ZmV2~&k^{;9{*dg%!|#Tp!;|CTcQAZZDmq#s z6%pSJzk00th8G0{L`fx>kdiO_?xKsss}>qi9| zXHli@TeI&E-zxgQAYp>?(G9?Q{U41+2S+viKN`M#`K

(YD|fPDv6fu!|2F{MMRXs0H!oV-QgAbArS}*A}EA}VH zEmvcK+W0sCTkvjyLeKz>tpS4-6q&*GoJ)itoiDXN`+CoV!tS{a=*}MWjD_hr4&Eq1 z&csZ5aJx#W94BFTiM~K3J^0tZjqS>Y^`O^trb!q!9y;38Q6JBI-%Q$2dl{oorxYbz z&$UGqKp^c0#C8NZ=Sdi7j`{+L!oY#_lLty9Xb_z-##L%;6$cUo`aeC#8afAn6n)LC zG`u$Mnq=V9HYklOk?5Mf7gJzdfkXv_1t~3tr6R$7AQlmoFhhnyQw)iuY-SK=yve1( z0n^`eoR|lf=qG}zXsZjI-9udl1ydhB>V%{xUx&f1}g?D8#ozffgBabcj z3WR3+VgXk&eFj#iJm?+tYTYnka_GqBRJXtvCdB9#{PL?T>MHO~am z)nlB#P5aq(PrrDML)lEAG#o>wZBXFBl!Q{x&<}}}Zw=#ny;kxyva4i*f}u7}ln+vT zu7UM{%_zHR?WjW8C%(b08LIKmxm1P{y;}N9)va+;S@U^h`T)d4%~Jpiq#t-N$e-9Q z9}JT(w17s)>4P-($xho?xqd3#eHG>+nv=Sf4c$elBT({0_*hts zf9OR@NEj|bpwFWSSzzpf;_1RJb90+R8-t-Zgh&<$N^dPhZ!JD&(x*JJr1^8MZI9lK zw8=;$3fJyKB$ON|(GudDvsaD|zJEj0DSl2+HX!N% zhX-${<7h3q#1MVOh27&654F{{3ZiwNOsAAlwX{D=MHEhx@Df#@ za^Z}~%}kB*8xfRrrqinzfStibkfjK-yK-U1uSmkkA7I zrL`hl;3q{Rmcxpz4?0)3>JkCE-m1##TZ>HTXBP%#m9s%Cz_&?ut8Qs(v`B5_|}$VuRAZ8`a~_vVq8>x6L! z_N~#a!RZHzP=*4oX&9PKY@$Uy4*Al!+eW-=o`uD{2A39*4AzQEgW!!g%y}b2k{^+( zhop5JF>9(1Gz%LtG0sHS@u`EFY(L8P6|-V?Q?elWTD4nqEu#HdLIWK5Jc<*A{d|ub zWRt5p7*3&`TK$cr8eR0@uAj>4?QLpkYx2BWr5*ltdkgNRA}Wz^2a5j`$0%U6y1ms7 zDSpm>>`kelY^Fgy4P&BQ(*2ux3U?49vtZiAS*`x9hj!0-6me$vHP@)KIo7OJ#&2o# z_=Qgj%5HD_nuM8x9gVxvb2oivobv2ZKL^#oJ?O1gJEaGG>*iEPd~e;1G9p)-cnKwB z7FuXcgl)TL6b55Lg{;t!@}EvqAXu&TA!ksnTR+8UMu*kvw0C=( zJ^H8*Ng8#pMp6w^_NYw6uG7)IOo+Bynw=%rtNGb`XQTNOfZ_g}n$}8p^&wA-MIV0FK+Ru%F6hlhc~uI$HpeQ3Wvf7n&`^4NC9Y)?Nk=WO=V}H2_dov& zdm4MXGTxr0(XPsB^>Z0hCyOiJzjaLOx!u22e4X6l4&J`|@WZ96Zq#FpvElybaC9=N z#ea_vkB*M*Wrq}Hnb zF_fssNIfnIy)4Ak%SkRrpe#PTiMdel_T8%=K8@eK|8Tm!qum}~g)l<}{k{oF#>7Ws zy5D1nM55h%%~QP_aQH=Xrinc5-MaeN?sY`4r0lK`Pt3DaZ| zRqLJa*66z!*QnFDLhC z!Cy&8n2%x|`vE|kC=V_aL}UqyTpJEU%exr&i{%<$I{*_}!~dZ_nQR zP~%z5gZqko2t5+VZuKam0Z%*mS@3ed2}+q%nrEl=AvLQywJbtK0-}h)UE*i&Bb8KT zY#klEbcgQH9j#$%dV6DU;YyZX``j6e?!_66uRrDDnuq|lGe$%A$lmwEvdR6{8{TuP zv=0C8(;UMXg>+T?^{D`x{J-Ply8m}_a5Q@M|DK}V-VXL)K_kzEg{L$`bITTKw)k`O zU~gb7@?g*T;U+d$H??bGbMfLjX}^ActZuQ5{i2JBAiFl<%$UK^v<`USEqBw&d=w>QxwA4BhQOZc3SkWMKE+V&apUFRQYv77!%kD;d`VS+x< zrc&|<;qSQ>x09Yy3`a!1A+O$E>b}}h&@SDs>s(AhSX*k5;xkPTbuy^KN+?sAlh|C2;veOh;gI^V8(b-a~(uq#~d!?wxaE5|N# z4aopZPkqYGu9l29WZ_!zH-nJhbp1Q@KZ~Bt>@-ydq< zx0E&O>O)(W&ix}*>BrsOLJO(QU*4G$`FC74#w}&EL2pf7Xs?8F`;(rfK1F{qw(j|F zxWk}RXk*Qv@5`Fhw=66^Lv_u?6=gH`>U--pk1BE5w&9K*b+`#CGOGGyWe~hk$n-~9 z?Q5B0x2p@w#`evs5b4L*l*%a>YnI>iEayu$cC59#hK$ko3K8C*GGC`HcJLH!DHE|2 z>1~c#Cpagy{Q@sdT6U^QD4l(?bm~&7bP1|-pJ-d(&{{syb!L~8bEP#<@g4HBKy6a_ z7mKPdy%C~o23t`)Y_*cjRuktV_ywlX(i_-d70Hqfjs;$dbRGWFxq45-eCD8x2s4p_)UWZnWvNmQ7xi~D1ZLCgn1qAW~{jw z<0Oqz*WwIq?7M>$=Aa%P{MKv#UAO+HJ4zbS{Yq1?QU6Ev_}|Nu(c!cHKSjHDxV<$V zcvdIZTPK6?3hRUAD^ie*0 z&0BEAeI9!7;e70r&zPl7DN}az@q7nAw^LT+C^v33^UUGWkvxA-zK!(k$LhOD`GK(N zL4BFKWZ2*D_t)+sUw)_5pCbuEG_T)2lHcfO*`^-6v01P&7kl@cgIY_Q|d~~s#{ng^h-(ozGd0+@*mY(n3niS+N}j$ zC2rkV>dL)tHSZ%Y1yFuLEhEC8qkkonC{1~)aCOIISi-jZA!VEUS0Zst)Es)X6e;b8 z!5zpsIXFI%N!030HJNf&7vTb?$(plPd2QoQCbMm~lC|<>#juUSX07DZM{d1UXrAuB zEELc}e_s5n7_F!W_N5dzMk$xXsk+gZ75%=?Ser-7s_T@*JFBD8)VGVOKDta0^bi*Z12 z4Up;L7Nt6}@)B#iE24$FS08CyD%!Dm%%a3c4HpqJS-r08L%+W;#ftaBB`t3Bf zRx50?s9NCqTD9K3x9ekCM$05loUYeKE5h!ShS=2IK|M>hEfeJHqYM>#?8)9G!2*S% z68q_k;iua>cByy0Qiz}l7@7iWIn>Y?mSGd7BYEUDudLHGp|m69-Fy{B<@C-CkgYQ{IPOnp z$K&;x+j$k=Hj0WoZbQ!^t$nk39&&0sRfJ-*<=qx*fgqO@?x(uiy5eTL?4csqJ2ay#@0I^6k7U zHDWtv;%Dz@RN3>;uubbc>j&7PTxRuXZK*vE35`C~Yb@4-4$#WA;smQ(qkv?m43$YL zL6s$HSYEeQ=`2@IEm}%o#p5XtC+V+vFIJUHWHtbH@UwXda}!`?4{56xT3*9if?>0J z*OWRbZHPs^qE3rj+|&zefelmr$F2+SShxhv45TIrVicyFw(GdBEkzuVP^S%^u8W1N~3-RW8_|Kr|u#RT7)p7t+~t z<#p`rxf62bn9{$_p34QDIeY)@Y{5q&C{H9@y!t^pZY=mO=qtyi0g{33F8D8wyO2H) zaNsxzqe|(WWbk{nXchhUX&Ti|>%R6M=MTbc(*NO6UH@N>UOw0Vdy>{tEV)|s{96Dc zVQyr3R8em|NM&qo0PK8wbK5wQ`25XJ(LXs`d2B>c@*`QM@~iSW**VuHjxJ8JH8nNm zg24e!SG=Br(n1n3=e*S;MO_mXqnPP{uF#St@h;p zO&Uq*CzOf`9>QYeku)v82L8Uk=f$WZNmFgR4>MB@h{1fvJ#iN@pkH5BPhoMR1C8 zK_m1yNic*$xjem0F$9vPj7G$u@)k`hKUxg`XIp9i3&Ju~{}Kmqjs5SA_Jg+lkM{TP z?f)*?4!puC$(RPG0M@DtrSQES_%Npmr~(4N9KLql^CVhVa3nIWLl}AV`IT+v<%^RdLr~#qC&271mldcQ;Z~P6~eE-SvT}+l|{=RZA!so&Bzylx* zrA`PZGn5v%-Vy#S_}i}MoNlB1=OU1wmkB*$ozvluRh2nxb+N28g9di|0NZzIkJxC6f3ZXUq3O=)Y5jYN<3P zp`C&Q$oQc;5!?W@yFV(FAq;a?oF;exE)E{COlg$IW24YQ>JWA5R0vjII{`IQeG(bf?is#&9>82EZ7CZWm@lCV^L@M| zNy^Y?A|lMb2S|yYI|~EQta#cnLzOkZ6KgjO1bxj^F64^Xj>FgQq~%FeB9jqTO88HN zOl5?zB}j}!l9;5!sG1g_tf?#^&K+7g+UlSdz>q8uv~g&}IJ8J`6;Y8nwxYQ(>TLX< z(ZWoj^qu!3ObMXeXt6?NSmlxXlvmMB_|E#Ud|H|k*IC=Ns^{KBf^=;9( zF8>P-hOPW>Z+|qr&;RbC4So0-ol~i8&*0tPqR`B0N^1w_5dqPQm}= zHVsAG$%7WG*A_w&K0|n5oIQjuH=u10Ft7j>v~NR;>z!+zu2zQ7gZ5a>42)W>zyyHx zIvfmED@V%Wh4p&10>8rUz`XV&Q)uu+c&Y>;loQ4wc+I5 ztJi1acke&EeDKJ4@<=n7p@x3nWGgBq5rSdjJu(wG{FAbGy@tMk&q{DJ+n2qSZ#=$l zu&Q5!-+Z1kvF;UFmNJ+qk60EX^ve9aNR@has*hW|9gka_8NBM7_@f#V%bLmFNM$xTg?~H6&ySnw%pL znBoN_lxI5Mz!-I|qlDvezAGUMLYdK0KTlw`K+pzj~R~ zB7*+KO&0HAwRoqm-+wwj>R3OE?IvVzKo9!;!sSD6wd$=_xdlF=JjP2XR+`nsSePFK zkFA0E*2*p$AXA*O#f?NVm>` z>^9ioA8t( zixm)wZg-6N2^v-KXtK1zcy)4WU`?`MQo6^!cRU5@Jg!4h1~_+#;ts+zTu{bLev&D} zr!qNB##qkncG*@jy%0$)EpH!omM~iZZX|OPsf#ZUEdSxln=fmP&r%c)OEXlKyf%8j zn!!|3uBXuZi|YSH^;*~*-LFx!ZO;t_tgWmunEa~Ctg2nzTvZ$tBNJp-t*`~_ZJkeD z-0nHQzk@FzF=Zyc_x{?0-dXQkEJ`Ga;EqXk1(OcIVVNUKd^I0f#>4JZG@q33vcC=h zl^+eg$r1ZdQ*SrV!}6-qyxYve${R+L^lS|I0>5WMV{1>iiN&4J$NkANY-CUQxj%G@ ztvJ}aUU2Ps_B)wyw-DDPq8;OkL^Rw;{JQzh^|$TeyHEPBoK^iI<7AtEYJ&OsEkQru z=&e?zQ+agdX6ErWguFh{!YQ;+_wt90O-3h@4$J@10sK2#NB-w7xNh+#_?rB0G#qZ^ zf1}~veg1bBtx^9m^v)@dLpZV;@Rp=ru_>+YLTmf@Tu}-Of!y7>OQbyM>2^H@%G~%Y zrVzgL?MP#QC}EW!&@ier^8BD?`zgYU9NdjQvrNI+3oq}U{XtU@{6+2SnbJhl$n=`7 z`Wur2E6{kU2Pq?zyT?eW`X+l|w(*UQ%l>DwAg*4<4Y^EM6JqoGU!}@PRf8LBrczv* z>=g)KHeM`x8{1q1;iD`|!=H>ahaSa(QMp9ts|0lG;B57*t~l z$n^$4B&CD;J$l8Sk=gPrB}x?sC!2cPx{`CQvmgNbK>+)~2=;?r*bnxg8ug+*wU^bq zuUus(rS*??WcTfxT1WhkF=b+zAiu40(Cf~B4n_y9_qrO zYQ(pZ-`r+aY#cbugco^Yv{{GMMFLZ9ZVgPO&>|8ngbzpK+B@WnI`M+^``3p@AI=WPt;hVAmQP!kv$)75cf>w*kF=2h-`#p;1h3hE(6Ls_X-h8N+KhZ!lF< zyc-&}d~ky`d%#nj{Mh>UhG*X_U#w>9qHk4@ShWd>&XLDqE9c1Vb%45j&43nW zSA%TGrdwt|+7I2gAJaPGf8nI>D*gxi`+FPle-zxu|GQ|N_dmt2cz@uzkNc%m$M&~sJ@An)Xd2?$vx0pWvX(~WE>|dkO?r%R5oMU7J^gV9i zI{QBeTK7MrV6=N*|8Xa+)2zoL?y%+F4T68ar8d3qwdoBL?G^Qfb0N-qAy|Sciz2f% zFS3XBRX}mfdoe>@zc~qElBwlHT$Wb|_7`p;{QA&D}Lx=(MdE4 zr;Bs^6I`JDfi9%9f8WTJ&bW|x+cd6?|GWDe|NmojAOG*9?ZB95jne&72)F)lF-H!Q zjIx;W8KfjSCo@##e*;ljYIAQ;b7Tx=OibLBE9JANAdwMG7idpCYW;-A-VSh_xq_2N zsl+M0#Mp)G|M%F3cbqLja68hl1Sv}U<96TkU!9(vY9Z0vfxIaB)6pr!R4UJ(Q9ZDK z9s8a?`9luu-=bqa8<;=ESGC}S3eki_=UHlh`=GohzPd=gC;o(-dry3wm}epB%zN_R z-VS^sl8Q{h@vGO$^HV84W28Nw#z+PZT#C=0zfh5gaq#b*8Qiz~cHjQ&?SBFQ0RR7A KM6&<@PyhfqBAT24 diff --git a/deploy/kubernetes/platform/charts/query-service-0.5.4.tgz b/deploy/kubernetes/platform/charts/query-service-0.5.4.tgz deleted file mode 100644 index 7ab84614851b39853e27fe154bc61ff22e109f79..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4122 zcmV+#5asV5iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH;Na@#nP^P5l6M`zcQ+=!C=6)&rNTVC6jaZO_D;v`d3sVNsk zwj_)}fCGTCHHz=quK-F}(gWf|Dv8rWC6Zu{5 zPtVm{+`mX8DgA&_QNcr)?sz0k%b&eqKiKnPRFR~qwy%!iM`Q^^MoeHNB*=_&%Exd; z#~0!c_%9<(aOI>4D2Jc2Axe%KmB&efA=Ko})3X#qAZf~IM9dW4v_AW{X3zimR@eV2 zVHv82HUMk%e|LXpN&k2Ddr$iR7;OtqaYQnv!R@`(>Y7jl-WI%@Pz6*0fnSc_yzY#I zOo-MP!-z5qaEcKl5@AXtB}0Y^w16Q(N|eGFDAxjJB9ovoNg2_o0?(6Z_5MU;T!+y2 zJerU(h8}>#R4A&2oQ0t1n2SHUb;ALWW{eF4qtPsc^U;OS1BnW`&an(K#;!1us8t9b zf3?=Dct9O@gk6)dIuz>Rb2A5awe!P5jeLOh7{^{(O z5d0rI6McF@@a4(d-M1h1Uyk4ZkqCBhe*BY8_Rrs(Oovf&J-X$uJGX{c`DPqPk=;QL zU<g4+q|2}&T4!vS%Cpe7#`~iqVL=}vY z8}?!hnKFLQATA`Slx+_zeCT7=46lw9?f#cV&?XyG@Uvf5kD}??}%23Ue#w4^$Z~z%URu_UBfadG>3S|iW zoD|C_GlTQbCoEGM<@vy7W zM5z$?nDV<;K+RN#L?(Rj4cWd2FcC`InGFlfkI+RW9lRq+$}nIeBFy#!q(o1g8353% zc-k^Um9-HUHa4jt=xC;L9xo=_a`-x}b9oY-(l2E2XM{{;gs~Y&j6{+kkFBI>0m^F1 z4C37WmZPl*Y5@$%6hRx)ON{A_1XmFinIkKj2qVqLTpJ-Yb|P?gnlL4RawEhFl_8Z! z?yuC!Y&QrF$fr~ao*>r>ZmFICqgu0EBnnb~0F^}(GyNONso9jk=*moF5DT~^ytb0c zu#CIaYNsKBF>y=z*kWgt8xRuzmMPnxm^BA<871!B7&*n~_Ed z+Y4522RYJ&F7_x&Gb_zW{yV|M`jY<6;TxJS@%IeXrXxHqV!T3zkrvYKuBQbvmcR-4 z$6q~hWLcox2y8@6N6QeWxE_j&<>0zqj9r|MvHLdr$G-W3+w% zuh3~}B@Ki3Z(qZ3R;G7)f(pT?G~uxdz4^QYbOgZ%moF5CJof@{e{Ufq;bVkn#(F|{ zv;=Lfs(}SqKnK=VySFAXn$OKd&zfU3GcamC2V+t1@563)K6g`Dyl{U%pF^;~?!dg} zBU5PbM}liYxdQ(}#5fRA`z5(W7;Jo@!-sdq3&O?a>Dl$*^6k5$XWtvs7-i?e=SJ-55kPQlAq&002uQW!nuZo%s`VH1Og+xg@p1<=UI%A=_*P%ALLmHnG(v3 zs0Mj}VB0SugNSgrB|4h8kkPKw1dtV7*#6?ZR?`SNw;S)^_F@OG&fb1FKWW)Si*l2> zH=PtZox=EoKcD;ad9IM(Qy$|T6noAp3@x+~de5!C1y<>tdinP)lCW4#Xb5g>uwC&= z#Vmu+4K#Cbd3t?u{N}8|v{b~K3O)m$q-jtOLv0?mnxPA3{;)ija-|K^j+%2CUaeHL z1T~3?CKh*vq5cP|s4n>0-+3AIf?m)MO)xRY&hGzv& zZR{T&_Nu1zTRX9*_SJ1w{C7P;mZDUFPXBZ%aBcnXV7C$f9qc{T{~xE#=iL`DrAcUu zA0x`pSkt4#uqB#c2rs(UBzIqUXLqU1OdE9-n^y*24%adHEafYVV`l+lbKbxmI9WSb zSoLaA4nBdF8IRy#*FMtZDjSXH9r&FEGGm0yv!n9F)ow^Rhw*bW_&p5Q#45#Jns;6?{LOS&p1uTp3ssESR|N zdElLoKspoakd$uCous(xl7?H#n2Ap^W#}|Hzac)Sb`66U?!4ejB|vW$J4=|A02?vh zIO*cYGrNfJX!E_T&{^`sVW~UFlGfVWFD5XRl3oXd-YVq}PH?KWFjf90a8Ja^CeaUOgEi77L#-T!wV{A>SfbW0?O;EqzY zj#3BZP)(2}ftqwJ_2FlN(tLS=ivw3dsk{>NI-eqdipaZVAD0K2rf)eOE6*8C__H+R z6a1bDjg5?P8B0Pq8DrcT&ca$8Rh)pfa(^I3MYvdH!s=i_UZU^h3E_NR+MVy$=R40=Vdd2&7sjK3-_IYG@{m>} zEf(TiyZl;P75^25mWMbBUK9WA^mcZZ#Y9`<#7n-jzM0LnTN$;pD_^&E?KkT?0c9S zkj^T;vC@lu0#131T*Ka?a%m4|n)_2ZUUQJu^R{wAEq);hTZK4ZmkCiRtu1#+!DY zeQS&K)E={~^8cw63F--Es^dP1Nl5y&!N3~--+y_~IRD$}_n+!NkJD=3fA(`HcVc(^ z4M|(v+>&5;MgO8Fm?bQPPx-eT$qdF!3<-mLfO+lS6xL2flB9S|xkfo9EVM886>Py( zemLnMpnx#B1x``UpucZ?gW$0W%1_y-DmwE6q2iner-WtJOIGIU+lkIx7dns8g8bbC z%N$_k1Y5;YG)+)s7E+-uzFf-rP^K#Vo_Zd5IiHRhO6@XAnX@W$Ul>>bV!A0G7d#^s z@X`Zy?3opfuHE>GXX=0^a*SGqHHTEi68^3Cx7~u8*Mh`3F9eTV{USw4MNA|6K?@cP zb6;>ok&v-HE3PedyM^W3j#~!#^@!&F+0EJke?+Cy*LG$jM}D=>ZN_qE2e$1(?91(v zp@r1cm{}B?46S%aBF$zXI2!kt3Yve>V#kF!o*z*nP`#~SP>MKEt&ok}O7cB#@Z6Ch zF%cTCC6aitNtt9J=xFtKf8D@7&UJprRbNc45m!%Ekh}_14?AM*s6t-k&oAYxTe1{Qm3Z%l)VN z&!e=BtE`_h6gl`8*^9Qa?*mzmmbyt}T@P43`CnVv-K?$De@!%wGN$m)-v?Zy|NFh2 zepCN8RkaG_3X^%bvi?}K=G~W-SW9DGgRjih-)SK@1ivB^_EoUBUvE?OuTVE%Lw@4WU4Gxp zd|a1A64QV3EK|X<1m75|tlC6xp%gXdBxbUB0IOY7^4jKWH_Q#Qyfj;Jw`nn=yzR8{ zEbHc|T&SvK>6KenUprGFaFrtIN;!*)%6E0gRu#V23JfNxo}JSC##lke2_rOlS47%o zqRvvN&?d)KcTb`Sm3kv$G`PPvj2*H>wxsB zJ!)H(|7jy5_Fp6I93!KHUzrK6tN$GAES>-D>^$XvkJ4JpIs*~6qf8e6Ny0Ewv!S>v^C^4RF@%rX26Vfy zsChKMy(up~bJ5A-lxXBn7g@yxPEkH)9jUJWfnlj}IuajnjPWer2mi6 zwqQWCM(O^O4Y&1hJ3$V^j52$mo08~;j8T>U5sb=Go1B80AY=BV*OgKzAO8Rn8PRl# zHYcdOCp`AHfaBPm!hWAh9ML&Q^6moBkVH3GYVW=%??s?)Q}0DE zBsboRKqux|NILdj{I|CSABdzPQ*eHIro14P;x~-67tk0<*MUp%n-@$~Bx3CTE7pUb Y+EaUK|9{&51ONd4{|k@2BLGqW04T%`4gdfE diff --git a/deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz b/deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz deleted file mode 100644 index 453520ca311c92047cd1de95bb0cbd3098233cee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22052 zcmV)-K!?8{iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ(dfc{>FnE6VRn$tEvE-xfmgJ9k`kl0-p3( zKNHwIu%yRDjDK%ILSmNmw;BMc;6oPb;vthtux_R_$Wamu1W(xSwgO2)uV`QJEOqTl zY}l*a03c}^vB1#XY7BTxJ1|fxm7R9G&(t96wgMivyG&_J+JT-MvaJNA?U+bKMgA$D zKn)gX(j6509l;POsgiA}NN_8Q8f_N$i(gIr+}zpO{13fF(hc8Szb1nuIh77?oSO?580p4w*Fd`vw(cMcwdzCIn3skRf3a=|*{p3CMQL zn+w|4(td=i0=_xVqUe$af+|f=W8K2(G}Zyibvh7_+~21prbP)4hN%b8^a}q-N9Jo@ zODZl%pYBIQN)7hX``yswS)mO7aFfsyWTmDqB51EzQ?yt%A{eD}%_7uRq%Yf=M1-W5 zbdeKgt1^ynQ!bgprG9)V15l)ISN3K|yNo2wo$aQ0^`_APSYLfR8{>nP?G5t_;!G;&Qiw^CA&fv!qSY0aAbab@SEeMOtqNVT zo-jq^t!(F0rJ3@unXmJ0O^G3{nF=-}iZ)y`o%P^wbbB1D3MV;W+R@# z9nYc=B6dsl7fC75k4g|=HR>=5QU|_@V^X< z>19!*M+p>-Q_Y{Sfl|)tTsVw^qCCXm+-$7l|7R9fr_!1RIA2SRb#U*>z<$gb1NNg# zE1z({H9~rt2j-1=(E7g94Xzmq7|6gSKQTn(tO{DT_L;YSju5ry}X!akAI zFsLmZP@z~)n~xNygW(5Ik!+MP&}u>xDkfL+mX5S$V08qf$LSNHSvOC3NIS6m=8b(V zbD}4}8E-_1Oe%Q~tr$SIow44WgF@`HbD{g^G}kh_B!IA^&0nD zTBX(?cWH1NRWQR|IA$@^N|~PJS=^;U3yhS9(g{;RP1`i|JtRiu*F6#Py96YQvq+JI z@=Qh}BS-brNfZ$y2$>WaiYS~&R|5*?2j5#;A&Ev>^yCt!=UUvyiNz%J@srR^Ca^P4 z7X~~DXsTrUj*HuflTfxbN$oNsjg*H6`J3;vu&?I>KukWqPjd6313TDJ=UPWV(#s=0 z(jOPYrQo(=jBy-O*M_x7gg;z40-d?oSI~%j1U8Bs7NCIT0j^ zX!JnyK(8_dBF)s8A7-Ra^7n2H&5_aAoaW5`56*uymRG_GeZ;dR)MO5}~@3nCYolaC^nHWZT`|-bS!R@l2YfB+@e!r(#5t zz8c`f)XnNwMR7flT7hB-{FZT%#V{a4)Xf8)Bs4(Hhd@M0NCjGh2~#-xJFsKge#h^i zM+ERp=|+t7vVb8ViJqr8wS6#hlmr&ctH}~Y&jM?>TXc-#h*AV3OZpLoAY!^eOhynB zV^`$gV(LJZ$)<^k! zEf4)pYcFws6C#E=x*PB_-Vit)$lqX#ZYWPoG7scBICEFF|A4z3zZo_t&8ID zr&JFSPf%SxI!7ijpU;xLC^|ZqzJX$3>0Su52r2a(jzgA^ObzrbH3C{&y$BN(n!SW4 zqnKy%nD@0IU{*f8tfiSS%IW$#Tu`l)nembdNzHE~d}A&Z2pZ^7(ck2ND3|DC zWy%xu>IZ7wPA zxCl237lEsEX|+Uzc4&I4a82o!TH3HjS#`O0dE7o=eJWM+Lx;3$6YwKCQ>eom>W$)x0I}(SZOBo zx=)kE@zo$-8l)lrd)NM5M}X^GX>}F$yPkk>@;w69y!iwdGC2!D?yDk7fWc*qF+?Ze62@F6w4nF=0efnHG~jsvBoBu?ds_m4G39n&zPc|epd*oTk_8YnJC`Ea4M z)xBeS&OkTC$_vk(V9E%#if~3>-hk>Ao*eW30TUny+PkP@yH5v9w2Z>eavPaINJG2a zAbJ${am_@~I*-|R9ndJ1K!%)!dQDBK)t$Rk-BF52pwCa2)r)~%SX!Qyjka}xUI%LY z+V->VLBHhMn(06@bU8ct!)_qHnLX8?L{-YCXA*TO}OQrdC;!=rN}ZX&Pz(Rq1}XaX+n#{CAjKBp}R zBA$iT@x+47Mg8+`OTN9X5o&4#x(?xdJu0K6vDa2k2X=Sf>@)y+TyrNC=jP>`H*fU9 z6B%X6ynJC^rdc;)a&XEOS|xj7D5#X$SV8p&Kt3QH4DEuKlXnbpd zdw-9CFV*%M@cUz7)W_>pP`3q^M7^$dsR2@PM}xM`!iY-C0M+lfl3DgAL4X>vo!y1Z z%_OR4D>_Jv_jYzPZG&xZ4)2*)9e?|cp>0o5mJ^4qEeLXedp3o}l50fxv!tP2S+}pV zxGJC=rDkWR2MJdwje@1BglJ(}W6ZshCx4`u2yO`RMs`rm^kka~{H_ zb^y^#@wI&Lg zK&u!hJfv2tG*LPHNvmdONuVoAR8Zgiz-ncZr`lkBzw~mwc%S{aNKbm(I@R7pm5GPc zZA+rBgooCAiD{x5=&G15Q$3|3;h~nDRs%q(sb5{<8!mEre#Ijy>@MbVjpqK{lCsv; zf?PF85;j%d1fo&=lpXl_7h{tk)eE)8hgu-hJ9I0vq~FxqX_I$*l68O@2R(%q=Io2-hrMgp7o%OuEK^%c__E4g%e6cS+=|_lMYA{t}tYJ z33hEYQ$cB*no)BvOCs9zX0OK*joQ8Pjsb!yfo9tFI$|UBpBQl&q?!J?9aoSvp)1Z4 zp$9W!!(t~yICEaFgk37{Q^9i$HSN#OL`Ij! z4cUPg+uHV_g7Gkax1--#$7TNVl~$7oi!wp427<}~kHQYT*2`3eOPTj}$HZcHsdbL$ zj>z3cLwFvL1@L_J$Y^T0M3t6l`L@+U6W1B9&BQ=qNxyCXY&w8u^)WB)-qU22{nRfv zS!1s_RSS*AR*ZQ_BXj_!X*6oVPXn4bbr7p;nVnbM=|#sz?$Hly+?(}H<5FVQecVv8 zgjmJKd6Kj_3l}tGQtJR_OT)!p0Z%Lf#vUl-`=oshl8nI2Y6D=SiXntl>knotTDQH~ zDpWP}R2_a)R3s$wHvB#NLGW02cqZZSqpnWHn6IS~QzckX7u{o!nhDNKbwn>WwA@?c zllYIFmpd+c(i)a!G(4xOb=u6)v^!g&Mrldy8u#}s0L)U614|16oF3{0gpIpa>tNd+ zc5V3JD`!x!#TQVCun^zf$X-ymTjItAgzD&PSZmu# zYj&VKLe5gH$?@DNcES^;e@+`iTgDJIwl#t+lzoSu@$KMWxFvNX^tOtO-`RvA<{&ox zPR$*Tokng3xP3OFtjU=VN-la#SBLenUtU;dzy;;9UG!p;+YV}XBi?nVWDam&Q^W{>1ie^_5=-{XoDmn4Idv}gFwE#_&8TaP+B|0 z+B>sQo#V_=4H@g+yDCfiSUa02kFP80bs*NNu=_b>3KZ`yvS@-BOG{-In*tU1_c#SA zkpIL(V>pe+)NS2?$RcXPJ@HSpI4!IukcestnG5O5Q4-Mbt@WkLywP!KY@!1prc#nV z?HF%i9_%3zPVSU1mID(tf^jv#65rK^QNtKL6pxxNm|ANRsACLB0=srgFq78TL5T|> zk-V@h3z!(a14Pi|nZme^EWy~Jq_2B1r9$gPM?GK)LQbVFrEQ65NP#IyBeTgD5UsY1 z;Fe)1mQ{8_2^&12+FIoa-E0+|LO-UW12OryVS3>@?}blx1^8~QAhGEdmoJ;}pRiki z>2cA4S8?&eaAT?WB^^?Mc3&WvVgZRJ5A9i|&{M~f{)M%XmL6VSz$%QovLjrZ!k<3D zHwNpL{`uw0cr4$x9aG)OEHT@PK(qmXX9$u&^MK4~p`o7h5cCRbgWd_;4R}Py`rkwG z+MzhT|KY!kp|M*3f9<1N_wslrhph8XwolKl4li5kqq?UyW}N?i_4@Uz%K5*S-|lWd zo&S4`=hLSqu%5G3WUf6sty$Xg?3P9&Eo!)=VND#S zHkhb2K%)^#FH~5Qkuh$HpgnEB#xxj^gvq#N2zZMMqTSsRlt`Y)Ex5yoW1jS*5y-6H zr&5{K4?|Jzn9^D>hqc_kTLW&+jjXxYfVCFvNgxp-rr3~f$y%c&_S)#izUy!U;`RP+}SZr;4 zgL4g9@a0QuLaAIuwZ-HxXn8hl>2w$N#8oN87tyJP1%t?{w75uDi!w!{9L~=Uu21() z4!`?m<2LKkKt<4}%4j^zW{%n<3M;rnGlDgermeCIO_%y^3ItPmYrG;$^r{|U<(5_S zTez=KTRh-#$`hKX?{fHS(B$x|FL?Q z;q}pZWk`*Byv&$R=s>B6%4CbUh#LLQ{&3ytBXoXtaaBQSb!4;zL$40Dp4e{svR35% zm}_bpY~Ap}H?{{m-{hE^LBT`HrI9JGFjGcHD^wjgGSTje+-kkINADnJNv3FFGWh`E z=-lpm<)>3x)^!E&6H(3&f&+o#sRPga4wl1UsZPF|uiKuL*zWT1;=|E?4d*S1#at*r zvso1S^l9zWClGjCbb}=!{Roar{dlq#OZu8iVS5w4eEIaLrT?+(%-R>ny9GBCgmp0d zYyvBI3-Q!h>bgdETC>N+9TDL&{Ue7#d%ix>SR?B2?`6_LC`+^_?9IhtTn|y{~7-Eo6mAU zc3-{jtZi;00#?b(53ym+fRHL0C>p|sw?tM^fG=M$&~bSdJ73dXdW_$5(bf(LJi4e~{`jwVAUIPL|v8h>R;kLUiQkO#8mX9Fq@K(`0{ zuBmuCOF}BphHctbn!1&ySF$ud_o)BZ*?-pq8l_aomP+p>70h(|Z|BwPS5^D()$5(7 z{4bC4eEQUW4nr1qaKo_2BC6%pcd_o4U_d+Yyp6he`+4K=W9m)=`N4vye7735T+>Dx zjjMUh;ycEfwM@b})acx7^^+fHks-1qfY&eZPZnQhy&n4rYs~_fo)AqVvD(iY`z9EX z(6RJ ziM-6j0&{h`UjE#VZ;*h_ck_>{hiBdw21;4Y&v7liT^PYsutfD>?cZec-{e{anrU|t zPMh?1i37c%)l426NwFj=s2gB$8kxiCD4AKWiGv*RJ_}ln zy-0F&umwbe(Xu{2cg%XZ{Ex`*BNyI^*(oKu4@qJJ$BH5IIM*g8%ekrMW2>B?%ocUU zi>clIH`&42J*{?iaB<+8Uh^gHp4>>8sb)j<*}Xh}R)D_fM6xO1j&K&`U1H9#nhoKZ z)5=Sev-VXP+PJ3gfuj-71M&0}e)c+2mmE)*r|TNqe2(gn=YY|MT*Vjc+uvMNWR#ad zDE~5+!l|b0oH*B3oQZ82#+>NP?t|(ieCm%$IvM68b0Hp6WP&MSBtg9+8(Xi@ItqVF zV?d+0B?l%(WKwyn(P4$jo^L~I#r0iN-kt<$c1PtJF3fO(Kt-oRA;<08`IFc@M))U%aX8T4^f|xwBaJ?I{6JlbA_Wc z{8Kq|2D>!!fP6DI$1PgtxM{&>NLUhT1>xlcCDYGwUn;VMccTQ$b8NuoBt-0-o`98q zsuD|;GlZ;!r4M@ssve?lh;0J24t#Dl{PjukMdOHkUKPOPen{z2&`*)*=4{T#rg`%!Ia2x|6aU5{L10& z^;A!~`?s|$u)x&OJPmXaG_gz%OoLq7Bbe16^RA_<4%Pw-Xw8V(A2Ovuk&bgpwgMM5 zY?3eJ>BP(&Q$gy@AjmT`%+7vbiz_1fRGGXK1t3`@%&PCH(#FFov+L7?#Iim;i$8Vi zzs>q$ZdAAj4Pe&#|LyCl{rBzm>nHo~QJ$)|!_P?MYXN52FC{E0X$D0~q}J^locP0? zdhK9((B6}c_@}cG-I|v2@UoP{!dosDUs7J1cIgNcrqB^09pFzBy7aqQcsmJVj z2RyIs05=zSrp6}&9zXrPoE@e91X11kpYpJ{JgbXWWv!$I%<})fdQmz5v%9_f^2z>x zl*dI$6qm4dz5|vN(ysKa*kA3dO0}J;ufV8}?cq+BD&qVG=i|4q{fS)3|0=$NKZ`~o z?_Hf(XcRFb5Muy~z07g`^%~i&#HD`i$vwwYEC2IEVHD;O4s@M{*A z#p;%|6RgyVm6OGjJqjfv&*VjU;YnP*(?h)$(esc3!=BlK+qKe3~cs#*8Q+TY7#;UzZ5vE6IGpBl_gKyT^6lNvi&-o+5DiAC#f|@-y$7@#@ab7e^*EtoHE4Mt& zMrZ<@5>~m)s0o)s!QmoHn)B+w%PU>A>%St3_OeK>8290x>FfWC*DuHJ|9JiC>Hd$$ zdHf}tq^a~)Xv+nce<~l|0t@7m>Gxpk7K+5v1jtWBu1>3J9;mh|7_ z*ACv#Up^J?jclpxIT4CsYL!|@`QmF!mHTTdpb~R^Wi7)ohr7kO5HtXb3LAi>gvT2{ zLUaKEQX??(JJ_Le)_?k3tk0d6!P@wgX?jojqIPef?n7!+fX~Laf~}JrHF%e9 z9rqQp(q-g+Ja2u4`u-WGbv!ZX1P&i}VlLzFtfSao2eofH(KoPN%cTNE9CA%v0C$<*$XJC5NYtK^^}Tl#XRsWKX`AR8%ffnJGsc?EEU+@x`)+-SlFA%=-sSj6?ETpImH=w_uut1dMyK z6n0>{F%hxxVG{NlVp(o0e_b0&H4Th+{$sZcvz90W>O?OgVlzCl8%k=3y|KxSJ&o@68UBzJmKAm&E&f=g6BBV>?peiwF zP&wH74~G}mM+dq|idRjZ zpB=z}ONG}UR9kp=c6o&t6yCTZ(3Cf0#aIu1{u#b;rQy5(4I8cJo6U`%x10a*%U?IY zfnR?4U!Vpw8LJ9#c5(3CH=n-WyL@+jad@(Kwg2wt-Cw@c6tp*;mFW2Hn~jaLi-W)J zY;KO#pzezE%IjdjVeKtpkx6OTrwYtSCltw9tHpyU*ctoKW8)Znqye;x!D-0> zv_+P*O2b{@+oh!3(lU)E!+KGsyZK=6YVZ2^?1$@vql*f0r~*wAFtyXO4Ep@+;_CYJ z{mJ)-vmuubiOvE0>S+JR>#L)a!Flm8T9gS%FQ^v&#%DJt$6Ov>ULKvD;`BLt zKfA%0e7vNnSh+2ic~~CBca#rjQhnCZWDiW~DDng_^~aSf#NcLhu@{Y? zYV*OD9q?n2^1^Se%WVRvT8~^kq<7Zids#S(HPFiCRy8fJIbU(u0au-o*sE@8??7Yy+-|a;`)qGUxzFAm?wwy>9{ppsdO4sZy=1@7rCw?^ z`}pv~;cTrU;{9Vfq%+lsz2oDvpRV^#&rbjQ4&Ps1pB!FY z9PLj)6?<*6u`>o`A^XRnox5Sqrx-%>^+cZKE|OMUUz<*uYF8a!I%#dXXKu@FyqS5- zxT3iq5C8k}>g?k1`t8wjt=`g+R9w*Ol&Hb7jZa&m7C^w>j}-ucs9lv~LgNLnve_MQ zu^z24-*07-(T_@y!({k@h_R)-#u-^EKG`2RVXdkpjk6tCNde~%NXVp~iPv2s>9rLz zX^m0tI@}bgbWLe(og+nEj##aTT4|}Z#(|3D!Z>r(C;($AYcwykv z+$_tfcjegW*_hO;U-PhX1;2<5X+otu7ra}ugRY-ak@6&O zvZUQ5a`0>mo;3sguh}vY8NWrBk9G9)s#62 zpP?^k3M}Tg&wi=tqxy@99)oC}(nQJ#r0sub{+2PSpc#m$2ThwZu%NOt*Q}mR@c1-m z2~&(j2Q(t1OB(PbEJt{j$*Cz_1(;I7csLtWsazF^?bq2*$_1-nWEKQe%Bz8(a=@c- z3ZQDyDySY|Q6}g-psGdZ*l!!#>?0wYArs1F{yajVLfgDjVZLgAT(K}8{(m01P@!if z!BFj+B_%@z%nG8R0%;}LP=U3CaQL4RU{s4var0FjD(34H(s`=J$;X@2+V<|GN@baK zP1U;3^P!A}&IfPM<~5E=br!Oa42yW&F|!vZ_F&-d5-YRX;t~I~-BlA|S{FZB`^|0u z>_>#fb)-~gL0(sxe#Lraq5}OYkC-j?IM-kUZz-RNMQd}6UVC)JUuUOA$WFwc#;F<| zFySTWzV9wACg*&;TbcC_)58;yn%vo%_KlfczBxlEO`&wbv8b8VV<%503jjISS&})2 z9TSCZOe|II-~~ow6E+czVKMSo?xbp`QLApwoXpL!b4E}%HO4MSg1L^>`_0$fm*l|I z9jc!u)BH0s00cFK6CPHDvB9ktm!e7;7ZYh+=RU0Aydn^=WF!2RQCZa*>}9XzQv&{F zuoLzsC!B*Q-%?$BfU7bvP$VQDJUSHp35h1DZfLV^PSGKBOeLC4DMejdi#h zFbOhCarpo_piu<yf@_K}8g$J7A+ zkIfdGCD90Yf;BZtkWv8=OK7XnI=H;PR9w);I_%?e4j=X}A!I@}T79P4_^&~~(dzy# z+W4<~G3dATKkla-ChY=Hmjt(23OyE4*?8WPcWL8!t4nSh&s!=^8_)lDV;w#a!FVR& z=-^N`TB+c_(m*v@ETp7u$_f7KpHWU0^Z(*-@8IOH6^HlU#?<-0v-5IyyPE%f_sReJ zNY6U_gYzHFGT-<)|NEDXfl{gLw2NSt7D-7kpe-)??M+}32m~}yf<)|h8oG3N(BlGP z!V;JF#e{ieCP5jnq{l^!e{X3G2qi-*_>iHf37J%abu*;_j*?&?c*1_S#kD3~6vd10 zT&uDLdr<_u2Wmhi1#QMk{nLF1A}C~%hQJb#JVrcYZEkTjpaQz1QrjfO)7@w+v>J`| zb+|hIUk4YB#?8%5z!S+M+V}?af(e4A97q^~js5}GalwEHC0q72X1}+8yQ(aaibN5_ zqozIe4dW41H#awkz)_+E4>P0uHBb&?3()0UNhL_CN2G29`%De8u8)BGzZvr2mWpQH z+9rfFjrb_mS|U$0z#nt<&_L|8+a-;0(|I=`lX_g#fWpu3XcQL+XkuG(u<2G3h%qs% zXf+z&+fFl`1p!H*YX=IK1#F(AsVtz~+BZnhCi0tyv}Tex&-fazG?>DNR68sZ4ev#ZwL9XNP9d8E~GD23%x`VlgeFmF7Xe^6px8+?CSUu0xA^iX}+U!3nJd{>kdVV^2-#E-Vx744Ue7+D(G(+lT13p^6b%jF12Hr}`PK97ZP8d+X zI~~v4UUF&vO2STa7=@qDi6Al6EceUCdYb>(1ZgZeh?rCo!4zLX4Ty5n!9k@Vgqbiy zfuxyc8jb&g=g+TL`uuqZj+s=TskS6bJ1uXpOLS94oV-uiq_NA~r*=pqswjT;2g;jo zwPR+h6*cl+c6|(j#(b#7vgHxKYn=rufk?>%hVvt5AHz)=hG|CnOl9Xm-i@YFV|XK> zq#IG`^>{vD-Kf<4W+m}hplDVNC=>aJqpL}xrfSF2Goc%e&jm6t@ADZBs0;+dJI3d( zb-$0s!4SA`&Bo{E;?FX5?)zCDhtG}AaHGv25}9Oy7qqXniP6pUKK=o#zyYqpCIuk~ zTXmJuTVS-{CSlQ3G$+-p((L6-o(!)_b55y2$RUpuHjN&rV%TZ7)z*#Iwpc zH`dp4TRHVa>yvDIXt{EK&x*25Ps_6*6@rCyE++a6mkKo%#mx-`B#K7BEeIIjIb3ES zXX3DuY;%~+O}9_8uKd{a!s;}a5{%6QrI<@K%5iA&ls)uQ;N8OWYV%WXX!bU3=j3g> z!>f#CqW_DcVy)oaUumG^DkRN;!_B8J%M7zmv$rEU3)E-uw@<$g;VaTyx))+T-u8Gp z#N#PxZa!~hlKPfrGsk6fQcbgN|HDZhYr#A%;E%a@-ruw00&Y^6i+Riv*T;qJ z`!x&`gTsw+P_`IfY~Gh>PPt@?&q1|M^AXbgfGhPx9B#&(RP)j9(X2>+-u@b=S&@e^ zL8jqw^XY2u2TVh>n1J_aF4FTaG|rygmzw1MkMn$no1@+-SLcFCny3Y+uFyQw!a(6*9>jBnt?m9W(SrkH-f~1SDA`8;oBaxiXYAV;% zG`kG(b65Wi-xJ9KOkV(5!qAZ)vnXQH<|n|-m9FpJ?r{MFN`&fCav$p3XSmtj-rin9 zRWsx4($~)~!!U_{*9rTC*p5ZoENE8KP$#y#jc%{Vs5jlp#&8wwIHZ+$L{?Db8oMv&C3QcanlOPj< zCd!Scu}lU;>0V$-KcY5-4`MQcnA}n`;`Uoi5!4bizg}US$MV(BmfI)fBa5>bq^XzZ ze5uZBBwfz|%K~>SiU4!xnw<{Q7m7nd`&==iXls6&Uv4iop^@f^mjX>Y7V>CDBaPwjX-kxyv!ZgSD)qonQo!@EA5*YAI7?SS>EHFD@hHypXEpAyW zu4lRutly`ChO5!M9BW&LS^d2l(`eqqtvEBE<5){CZJ#O{!dspRIHpS5ray5J%3^y~ zalMAEKxY|;8!3ij3*to0sc5^Jn&$F79&^dCJ)Dw-U9LD7C^*c%Fhl9hl!TPxo@9dV z7BVC(!u`slbFD#{vtaYiWmsJ&smaGZYXij4ti^8-&{U06nI$As1DYs|ehBpdi+gn> zeTJJ6ozK5lt!d|TH8cDyn%_$bu7*>xH%tVggjCQW1{AvFt&`k1t2`?@gZ&E{=5&l6 zn&A9G3)3)`Xn*-^=I|LD+m$MlMrd~_m>a-XS#lS8?vG|Se$(kLXm(P{45*Z>pV(zsNE0po zP+hSi&F%_}1?*?MF@q&=@;xrdTI2c~84`F$NeVnQi`K^94u~QU z@jir1;CZ=6+`(`-+^nrFO>%xvdR8F9XDBWW)4W^^ah`9f1rCTDi^au?i@OPxY5EliWX2UZB0w_0zp=lZQ7KFHb2NRm@og> zn4=9kBvFP24v|3egeFr|e@F8IK~@!-^@#gvH6H6(k!I5Cv4p94q?c=N&sIU zn8@gl^Q=g-USuw9I)-`IDjrhD(qD({6=_yHqGE2wHacNZZjy(!syh-C5^C=JSWjlT)`$N(oLz+#>oY@}X+zsq%g3Y!A8wF`G z!4Tn;KZxgBkFyfl5?0S#{9#sqL-dcvEzd{lOv{ku`- z(!(xXm9IwhiZuI){^#Ak(^cFZANNATqR)zU)!6MObA;F64gS=leB=H-`ZI7?)s{S9 zQe^1f`aTQ$RJCIA@qLm93fxrNJ0Tw-$$cfLWrX}L0m9-H74(>N|OU`sxq;U-TDy~Kfr`s9vNJ!S7D zH5)votdci=R>>Q0Pgg3USO7=o`kgKWu9QhRk<8=5AES9WYsLR-rkW={GXFr-|n|a-&Nu#U}WsrTIaRsH7%0&5i3% zChah70WpA>gg#BEm^Vu2GyISjFEV@GjGtt`Dbjn7(>$KJcd|FZj%J&Gw{9D!AeSk5(pV9?b8L=1HldZ_4dmVD2Y3z9_OW2H5dxEA2Uj(@@J5j`*W;xL&L7K_~HOzNI6nxS-c5QG=Sb>PI@ll&!{Pp$oO( zg7!@GWa%105{EE;F)3z})PG{yI~Gw1EK%H?ftYLY`V2R16-kZhHKN+i|GL%yr;ONa znrk9uJ(}J37W_BQ@Hkc`DbQrd1UI`KLn0WaBfDQFU(uUTL$eZ@R1-(~GhFHKUv;G2 zI3uQZq`kkVjP+bJkBxMR=C3r;_c+dWt@|QPEUE9XVr{8Q*N58g<4PZJ7+>a3h^t_oKSVg#jS}Mo9A>^ZeyJKHJ8VKnP)|P zyG%14KRo=Xs7NLqsQnBFJb9*YUzUm7)WB$)u~U{0^sM41!5xnC;+ET|O^!$1V7R|$ z#Z8Wyd)8+O_2LAk^@fE+5%2(6XU_SD{T0S?Iu0v22q#|NKG}(Kj+X*ZAW0Enb(2pY z^`W>qez`5jIWTu=+7^y08>4xyyt1Eh{4Q-i+5J49Y~dHiVK@8P4@~nCHLEliw~9{> z`~I*Gdsq~7*+-|?gt}7_{v?>9XGySVsC)JD`#DaX`r~|I!i*l7?$2=Z<_C5^<2;XY zTCb!%oV=>XVYRUD$rsd5Qdnk=^E3ItC)w_CoIRQ+i}1zdw(X1C`|XQWuM}v$_t$}H-8LB(zk~+`JH}ElF;6HWVZzK z38%ccHT1fqoS7}cn>>cMQy;;+qG2{p^Nc*BpW&!Xtbx!>z4`Od*+y5)Dmt0w33*26 zr}^Qsje1!{W0tUxcIOUwo<{S;Jfri|{BYSuHO&}nwJ>u}rFmkW(fMh9xNM`J;bt$o zBcsI=9ZjHlMu7Yn%@ac6?}N69n6G-Z#R*k{1SoKnGKss=QZF1eYMUL=wQ2s8=Jn*0FI@=Lg>?h~HR<4MtXpFw|ty8#sx z2>gvIH7ezsq#si*sn+aWzqmfQ_vbU*{KwAAC8NkIgwKp7E=3PZKYDn>3+dg0n^PWA z(|cT0;%v=bg{3~j?m~Cm0v)z|6!B!jC&sUQu0u`w(JYVmJj94kbV(F3JEY={I4@87 zOz~#OWWa~ixGL^{)_;bZPlc#yG2WJeASvy@ni3gZ+k)~}bA$Yv?yR*hH?@SHm>VT) zWZh?Ay*zl(#1m?h*rWO^rGh9fTn|}Tut6fp-KLAIk*DUItARb&8JqhQZWY4wls7j; zW99rg-hQZ~@WQD#5B)5iKgU*|LO2`CMXR+qn{t*Yn`kzl>P4n>WV!HxLh=e+tVgDf z-%6J*)-upE!VLj_-er?GoM#G1PiuNbj$Kn07|!BH$?*4(gpXw=C`tAFWQQyFxS2MV z*WXQbpYTEqUSv_-p4kGz?YsnQG2v!{u_s_9vWUu?&3hGYl_usBZWRjul)|k-`2Sc| zH%wJ-%p)8J*bA(N4M{0@2aAy@=tmN#5&a`&v^3Ql4R@0VCBdMu)cunQ&8DOZZt3Vd zBw^i!kg(u_28{TR`By_fC#YlpeZ`CH4}tj$?b- ziJR`;^~Uyk2y>f04fVo@j_A^j(0F0kQr*++l~`*+FL<|B`;m73ecC^JV1C)~er)1J z&-5(>Z)AbL35mGWM=p}nY{ zCNk3t58nOmU3i~gren4Y+?Dbyz#BK2Jw8NY<7#Enrc4W6-p`QrdKB-`F#gTrPJT4# z-H;MU=$*Yo4k6$*37^h_VoucAGt+S#++SNWpkAMwb10Oix7~ zm#+dBdUBhQOD8lANMM{2!6e7j$R@jHKZ}ScCva$0n4L+3iRN5NLf2<*2Jl1=Npo^) zLvsVW9>qoz{G5p$*lGN=^5%t^dCeOCm0ed#;3_&!2T)&W{B`_;)_M$VJw|yt;YPy| zo8Mtyk#bR|2R9lK0f>5`uOm};qgsv>}?CaAGuE20%qnLHS*mi67JG~=sxB> zvucP+Mh`QXxcz){bo%z}24X5D=~FPnYa;|vt`NBSY475+R0NHFSIXWzUu=RKDgU#Z zOgY7cwCOW7$hs(JoP@OhH|-r=xxn1CI|mrSTwN$I07N5MVo3@1I+&Yw&4qF)Q3J+bdvqEDp z{56vb{H++?8{a|owoe<4J=4#2J2FdvXgQk!tQAx0GzI4hx?{|j&_J_>7@6BqvAulv zRu0(YeOr*cxV)#>O3}0CPB3M=83X8u*tY=ekbRt^OmpYBk2LkO)F56-$Vnx=u<_dmIzlx72EPq{WK~ zQJJ#Uuf-2qfR^zvOht>s0~e-JG?wAA8q(0-c@k1|H)?e);(ZAl-jYxVM`VUP=w(y4 zIG|AqWXM^l3+Z);+VrShs_rOFG`zm)%n)x<8gQJY96}C~ZyCv{Wrd$#yU?;xRWK@~ z{-elNhZhs+Ra+w`NFs@uq#FMgRJ2vo2$NO7y5HkejkLbxMqGiQQm=n@(DX9VrTVLG zPSJdgEq4dz9m`{c+#1@Y12SY>Y_=MWp9VB>oHe=cw{n%h43d^Hdigg~*I_i)h4lfu z1y4XB=DdL1vdAkwH0#8lIa(vMzR=?$CdJxwC3x12s2p&v$D`4BtF@VGS#5V@3I^sn zo{*Br41&248mJIlNUik|tMb?d?Em|J{$C02w7!qC6gAeLaWqxAUFc^lq*HWia|%6j z3bkLosSS!IHqc~JW>jt(+TN#qp%?GInN?RDUbnVe+l|K0X9=A0p}EU#=fxK6Ztrey z8AOM>PL{wOK{F^uIj4)e7XVG*FvXSjFU5E^ZXHl3(^N=)+nCVt?@Fa?1ABCQoQ%3l3t-f-RazT_ehjJa=N?Zvf)W!r$ zEU7PWYTH{$c;XhAeBxp$OMzC~Y1Gr|DG{NQE3V59&yHJ-#`EVt`C&+R6uC$ljVUy< zvf2s%=kw=UwLEuM8`;;=bO)0@y?R>Rkd%EeWjZk2xiJDyTW`7>x+CavH87QSk-QhK z?>#_zWSvE$Ct3;S?l?@W`T}?ojW%2G^E)a$Ue$x&k^|(~w&E!Zq~(S7x@G~AGD}pm z$7mF8mMw<~a}5P+ftR*CXcV>@O>~2KW2;}|)ld@6F(dh&Bb{hFJU5oYfJo?Ssm?`+ zH@=Z59gr@~Z9Z*F4tUJ0OhJ#Jf<6Kx4(4ythQb z90g5COhavngiK13QX-6??2#cy!#$yQn)9`Z-Sqspxbx0r4na$?`hbj&;#JYFbSXc{tG^n)XuWE^YQZU2)-P#vCoLFJ^LJ*4 zn*|Nq46cs>BDZC1_JoJjIHD42`UY?DB?|P+K=(*~HK>t}?jkBHKFj){OPGU(Q=^7Z z4+FTvGlbre z((Vd=CuzG}YqPFj7Cc%~P7V`Iw!Ri7bTK5)O#c_Hcv6YyJmpD zC&BF8{QaJQJ%y#yn)bk&f*H?oxXRq?3IpS#m8Q7oN&u+rstH%iZZrG0*kIsNo-dGYP5-PbR6U+(VyWqaq<>ld&8 z0^9e|_^c<>QvEO6E4Ni8?oaZpufq=!?~(`((C6$z)Qec?^>w?-ZE1n#7IeM%;JVE1 zg<_tww5Ysgbp|bMLYmSfq)EW2+^X?a>iT_y3bG<@@YyTgi#w0|Q<3eMHy7^k2yO}L zH|JRtnQMDBL5+0_r_)#mDA(ygJaT`Zl9(1HJQ$`PK+`MyBORHqc`g4auLgVR{YJNj zw>e-Xq#?+Pa9u>uUZ>D)vu?|tUD1lQ829YA73s^iCJ`ZZ8jzo)=|Fz$1>d^kDxI4B zSpY@)9eB}t-P&!2w981++}Uo5S8o~(fc157+wr968gnSZe6~U8xb!?TCla0k>v?BB zn1p9qr0X{J0ZHk#X-5<^@w6>O;O95>c&N9{V&n{%yxF^y|2H8>DT zXwA}E+ZY5Gv0I}DQ)&~Y`_2yQ_BAk$6sSxj629NNe23HUbnoN{LfXxYCz>T-Pa=6; z0aLcdZ0RO8EZgfEe3K{5E|G&~4%g9*FWi1*11}U5){Bu6p%{)%F1p5(S{6AiH&DvO zgkiUjsYjcQb^PDmtJtZurUA~^Qez$5yD~6uBgMvj!vWU_>1iIAH|9kL?9W!j0}^5F zy)eHyOYz(VWK>HU7FR>%=}Qj44}qQ&ig$=R2UR{S*Yk$@s(gEB1vBi0V{<ivVF(JZNy0^+nS_y8IeZKJtW#znRLUb z{p3GU{s-F-ZfSgtF%Dz8=Tq{6cjpG1F7A;P#4X(YCw2y^UZ? z(Z0BrnB&su7^F$x25ISL%Q0W({2=ba+nnL(7dOFC2@Onm4@8uNRG>MSFomG z!y^9{QwOR{HccdryOEZxjhKAgM-cnTN3ANpHe{WUk1Wn&kQ5a*>^x0kb;NRKr+<7E zbpzvrHP|T*3GH*mh@!2MS-ZQ9pbhD6Xj>MsOVhYLUDSb>+it}7AYxLPPKsmQ-&zUQ zvhXd>1RPVPh2KxyI+SCAxT1$d!|w4+M0fl4!%qXIsD{@8DTZPT;zWrp5E;KUjUhRO z{}uU5>&oW#O^V^ICR*$09C_4yK1=qZ=;&Pf29$wiPdTUMfkigS5|XKb79xSYG{AU| z8rZ3O2~S2b&*U-hYeU7X%zD*LGhvkH^>w(QT4^+WDie~L-)3!yxl|x%pohh6`54h< z;^u^)GUZ8F7mI|&jFUPK)KJb=tTd#jJR)<20~=%DgE<|?5*YAIn2`G7uozMRS_{U# zc@u@91nc*;JZw3GngE&rY-k|%sep|g6J6TnL%Qj!k^>XSji6c5Zx!AP(WmaTUI(_Z z3kSCMw6dBfQZm{cY_NWUF{AlzfXi>YS@;NCrAw2XbER#M|Gtt~p+JS>p9I3GAoLjV)1LSI3U^s!cK+>26JmLwM z@l^I8QZXsrWOauz3f-lZ`@qzI#wKl)31Hju&(d^YP1oJo(*N#mkuYY-TF!YWdNh%8 zS;X9EO79jYBQAsu;aC`74f3Ty8uGt)?ca3-xXzVUS7E>7mgLFz`O%ZVks*P1l%&8@ zd;aL}@kiwSzn{p>{D{L4hD2yB&~Pm(h^8R?-3B3hj^iOC@V5^qZ;$tWxYUAXKuM|_ z*Gt^)OS3%X;pGmMf?*8uEx>SbFQT^v?+DIOh*=Uk7nr097|!FP)3;}OTz=ZSI7N^b z-tCe@yIq|JmqWyHpfFw#MJ(?hYm7RkVMKF0n^`_wCO(Tv+j9oGDOQ&3CRP|%qq6k# zLnTdYCkTQDFY4It(*YALqp-8wNcKY{5j_(oSww)EIPL+BQfVSk=MO*i%xpBS3-mfr z9hqw#Iw*HGz&Htb-d{*3=MmVf}B)uL@UxZ*yTT0hwx`fJ*(a|RsrQGHTyc{`^F43jD8jwt-oum zIJz)x^FvR2^!qux*!J@|566zKG%6nZl26{wQ0wg1s=&6E8O1h$c~fo$qr}F~K9v$I zHw^iBh*~dv{_d`En*OK%>c4DxyREs_T&t_Hwzg#r2`f+}gd@H31?F(Bwjx>5x3IFr zwhkNK8Np4hjM!$9EQ+=u;bTpq#0G{`XuVBsK``LFI96g8(7aWx8No(B-PIN%p4h@u zuSA4kRwsZTel&-<;NRgarOBlt!R;nGV2VQwbz_5OaK8OuTsUD(_iGNr<7YYV<(+^2 zVL#uQd-WZQIOyOHv+x^G+X*FCo9aV<7lx6OO% zEhed89A$TaN$S0`W8G2t&Q7^iEu2sq%CgmMd2~RMaD^e$E3j*;nF>nd)Qp;YS(4DE zH+z^nV|2f~V}PJWpqaM4PHZ#CPb5*S>E56x&Hi2JD8Yb+Swsb{oip|(j2?mpyp|ZE zX@dnk6(r6g#nOm2r2}k&iPj;1VR}W&zuSuKG?x1lMG-o*Wn&>DLEDfS?h{-4scro7 zZnqb=eoY%jQpPmKo%P*UuTPA$4AM;h+>R?qnz$9`g~0QEY*_4qRL+RY`_%7TL(ROA zrfHj=+ZzS3F} zVNoXN)j&`=;8ECt*Ls!8a3%BJY7Gi8v!3VNvAx@92+#Ym0G_WN8BHyh%soQ+meoQN z*BP!&dm9l;`fdAX(*ZQAkAGXy6pg)nAJasIz1*3F{$lIJJfso&{?arWwE)kzT4fM* z{4KNTikrOoaX|Ez2R7!dbUPBR`nZ>5>2O(1^XuD$f(sg&^ZjPKz{OnwPb>n)8Ysm3 zq&)+YjKEB417M?yp?^*r+uqDw;iY?^T=$f1ha$Vh;YU2_CB~dAjXdwgnD5%4G!vXX zAg*8JMwWkUrvv}7^K!?_E+t?sD1~ z+A@Zyu{{xNVg6*38=<%FsOnOuZTg*>dm1~9+yq!4zhiCGmlu#3a6!3j7rofzwu9Q; zh#HW669pS80sbDZZ)&G`7}mhwoJajuMDCCl6_RGrh? z9ywNmQiq=VR71wP_pZv4KGx14$|LKFdL4+h9_)TjnF5uRfwfbhoSAO1DKIG|;p$VM z0{KroG=|f7O!EU=nEcO7T>KNwO$)0DB%)eE=0f^%lms+Dx=BmLFbTz$z@dvJ+g}!Jj_CHwNpL{`uw0co5&V9n%1F`I!Ag zAnrsk=HoQySaa>~oa0GqYoG#wy8(~rSpRz{UON=0_dooXu`*Wc|F3;$>*;xVo}Q=Y T9-jX{00960FY4+G0MG#dA~2NU diff --git a/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml b/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml deleted file mode 100644 index 732d6fc9fe..0000000000 --- a/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml +++ /dev/null @@ -1,1223 +0,0 @@ -# source: https://github.com/Altinity/clickhouse-operator/blob/master/deploy/operator/clickhouse-operator-install-crd.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: clickhouseinstallations.clickhouse.altinity.com -spec: - group: clickhouse.altinity.com - scope: Namespaced - names: - kind: ClickHouseInstallation - singular: clickhouseinstallation - plural: clickhouseinstallations - shortNames: - - chi - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: version - type: string - description: Operator version - priority: 1 # show in wide view - jsonPath: .status.version - - name: clusters - type: integer - description: Clusters count - priority: 0 # show in standard view - jsonPath: .status.clusters - - name: shards - type: integer - description: Shards count - priority: 1 # show in wide view - jsonPath: .status.shards - - name: hosts - type: integer - description: Hosts count - priority: 0 # show in standard view - jsonPath: .status.hosts - - name: taskID - type: string - description: TaskID - priority: 1 # show in wide view - jsonPath: .status.taskID - - name: status - type: string - description: CHI status - priority: 0 # show in standard view - jsonPath: .status.status - - name: updated - type: integer - description: Updated hosts count - priority: 1 # show in wide view - jsonPath: .status.updated - - name: added - type: integer - description: Added hosts count - priority: 1 # show in wide view - jsonPath: .status.added - - name: deleted - type: integer - description: Hosts deleted count - priority: 1 # show in wide view - jsonPath: .status.deleted - - name: delete - type: integer - description: Hosts to be deleted count - priority: 1 # show in wide view - jsonPath: .status.delete - - name: endpoint - type: string - description: Client access endpoint - priority: 1 # show in wide view - jsonPath: .status.endpoint - schema: - openAPIV3Schema: - description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters" - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - type: object - description: "Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other" - x-kubernetes-preserve-unknown-fields: true - spec: - type: object - # x-kubernetes-preserve-unknown-fields: true - description: | - Specification of the desired behavior of one or more ClickHouse clusters - More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md" - properties: - taskID: - type: string - description: "Allow define custom taskID for named update and watch status of this update execution in .status.taskIDs field, by default every update of chi manifest will generate random taskID" - # Need to be StringBool - stop: - type: string - description: | - Allow stop all ClickHouse clusters described in current chi. - Stop mechanism works as follows: - - When `stop` is `1` then setup `Replicas: 0` in each related to current `chi` StatefulSet resource, all `Pods` and `Service` resources will desctroy, but PVCs still live - - When `stop` is `0` then `Pods` will created again and will attach retained PVCs and `Service` also will created again - enum: - # List StringBoolXXX constants from model - - "" - - "0" - - "1" - - "False" - - "false" - - "True" - - "true" - - "No" - - "no" - - "Yes" - - "yes" - - "Off" - - "off" - - "On" - - "on" - - "Disable" - - "disable" - - "Enable" - - "enable" - - "Disabled" - - "disabled" - - "Enabled" - - "enabled" - restart: - type: string - description: "restart policy for StatefulSets. When value `RollingUpdate` it allow graceful restart one by one instead of restart all StatefulSet simultaneously" - enum: - - "" - - "RollingUpdate" - # Need to be StringBool - troubleshoot: - type: string - description: "allows troubleshoot Pods during CrashLoopBack state, when you apply wrong configuration, `clickhouse-server` wouldn't startup" - enum: - # List StringBoolXXX constants from model - - "" - - "0" - - "1" - - "False" - - "false" - - "True" - - "true" - - "No" - - "no" - - "Yes" - - "yes" - - "Off" - - "off" - - "On" - - "on" - - "Disable" - - "disable" - - "Enable" - - "enable" - - "Disabled" - - "disabled" - - "Enabled" - - "enabled" - namespaceDomainPattern: - type: string - description: "custom domain suffix which will add to end of `Service` or `Pod` name, use it when you use custom cluster domain in your Kubernetes cluster" - templating: - type: object - # nullable: true - description: "optional, define policy for auto applying ClickHouseInstallationTemplate inside ClickHouseInstallation" - properties: - policy: - type: string - description: "when defined as `auto` inside ClickhouseInstallationTemplate, it will auto add into all ClickHouseInstallation, manual value is default" - enum: - - "auto" - - "manual" - reconciling: - type: object - description: "optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" - # nullable: true - properties: - policy: - type: string - configMapPropagationTimeout: - type: integer - description: | - timeout in seconds when `clickhouse-operator` will wait when applied `ConfigMap` during reconcile `ClickhouseInstallation` pods will updated from cache - see details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically - minimum: 0 - maximum: 3600 - cleanup: - type: object - description: "optional, define behavior for cleanup Kubernetes resources during reconcile cycle" - # nullable: true - properties: - unknownObjects: - type: object - description: "what clickhouse-operator shall do when found Kubernetes resources which should be managed with clickhouse-operator, but not have `ownerReference` to any currently managed `ClickHouseInstallation` resource, default behavior is `Delete`" - # nullable: true - properties: - statefulSet: - type: string - description: "behavior policy for unknown StatefulSet, Delete by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - pvc: - type: string - description: "behavior policy for unknown PVC, Delete by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - configMap: - type: string - description: "behavior policy for unknown ConfigMap, Delete by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - service: - type: string - description: "behavior policy for unknown Service, Delete by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - reconcileFailedObjects: - type: object - description: "what clickhouse-operator shall do when reconciling Kubernetes resources are failed, default behavior is `Retain`" - # nullable: true - properties: - statefulSet: - type: string - description: "behavior policy for failed StatefulSet reconciling, Retain by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - pvc: - type: string - description: "behavior policy for failed PVC reconciling, Retain by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - configMap: - type: string - description: "behavior policy for failed ConfigMap reconciling, Retain by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - service: - type: string - description: "behavior policy for failed Service reconciling, Retain by default" - enum: - # List ObjectsCleanupXXX constants from model - - "Retain" - - "Delete" - defaults: - type: object - description: | - define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level - More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults - # nullable: true - properties: - # Need to be StringBool - replicasUseFQDN: - type: string - description: | - define should replicas be specified by FQDN in ``, then "no" then will use short hostname and clickhouse-server will use kubernetes default suffixes for properly DNS lookup - "yes" by default - enum: - # List StringBoolXXX constants from model - - "" - - "0" - - "1" - - "False" - - "false" - - "True" - - "true" - - "No" - - "no" - - "Yes" - - "yes" - - "Off" - - "off" - - "On" - - "on" - - "Disable" - - "disable" - - "Enable" - - "enable" - - "Disabled" - - "disabled" - - "Enabled" - - "enabled" - distributedDDL: - type: object - description: | - allows change `` settings - More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl - # nullable: true - properties: - profile: - type: string - description: "Settings from this profile will be used to execute DDL queries" - templates: - type: object - description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" - serviceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource" - clusterServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" - shardServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - configuration: - type: object - description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" - # nullable: true - properties: - zookeeper: - type: object - description: | - allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` - `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ - currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` - More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper - # nullable: true - properties: - nodes: - type: array - description: "describe every available zookeeper cluster node for interaction" - # nullable: true - items: - type: object - #required: - # - host - properties: - host: - type: string - description: "dns name or ip address for Zookeeper node" - port: - type: integer - description: "TCP port which used to connect to Zookeeper node" - minimum: 0 - maximum: 65535 - session_timeout_ms: - type: integer - description: "session timeout during connect to Zookeeper" - operation_timeout_ms: - type: integer - description: "one operation timeout during Zookeeper transactions" - root: - type: string - description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" - identity: - type: string - description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" - users: - type: object - description: | - allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` - you can configure password hashed, authorization restrictions, database level security row filters etc. - More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ - Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers - # nullable: true - x-kubernetes-preserve-unknown-fields: true - profiles: - type: object - description: | - allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` - you can configure any aspect of settings profile - More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ - Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles - # nullable: true - x-kubernetes-preserve-unknown-fields: true - quotas: - type: object - description: | - allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` - you can configure any aspect of resource quotas - More details: https://clickhouse.tech/docs/en/operations/quotas/ - Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas - # nullable: true - x-kubernetes-preserve-unknown-fields: true - settings: - type: object - description: | - allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings - # nullable: true - x-kubernetes-preserve-unknown-fields: true - files: - type: object - description: | - allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - every key in this object is the file name - every value in this object is the file content - you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html - each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored - More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml - # nullable: true - x-kubernetes-preserve-unknown-fields: true - clusters: - type: array - description: | - describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level - every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` - all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` - Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ - If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables - # nullable: true - items: - type: object - #required: - # - name - properties: - name: - type: string - description: "cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources" - minLength: 1 - # See namePartClusterMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - zookeeper: - type: object - description: | - optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` - override top-level `chi.spec.configuration.zookeeper` settings - # nullable: true - properties: - nodes: - type: array - description: "describe every available zookeeper cluster node for interaction" - # nullable: true - items: - type: object - #required: - # - host - properties: - host: - type: string - description: "dns name or ip address for Zookeeper node" - port: - type: integer - description: "TCP port which used to connect to Zookeeper node" - minimum: 0 - maximum: 65535 - session_timeout_ms: - type: integer - description: "session timeout during connect to Zookeeper" - operation_timeout_ms: - type: integer - description: "one operation timeout during Zookeeper transactions" - root: - type: string - description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" - identity: - type: string - description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" - settings: - type: object - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` - override top-level `chi.spec.configuration.settings` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - files: - type: object - description: | - optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - override top-level `chi.spec.configuration.files` - # nullable: true - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster - override top-level `chi.spec.configuration.templates` - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one cluster" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" - serviceTemplate: - type: string - description: "optional, fully ignores for cluster-level" - clusterServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" - shardServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - layout: - type: object - description: | - describe current cluster layout, how much shards in cluster, how much replica in shard - allows override settings on each shard and replica separatelly - # nullable: true - properties: - type: - type: string - description: "DEPRECATED - to be removed soon" - shardsCount: - type: integer - description: "how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default" - replicasCount: - type: integer - description: "how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default" - shards: - type: array - description: "optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do" - # nullable: true - items: - type: object - properties: - name: - type: string - description: "optional, by default shard name is generated, but you can override it and setup custom name" - minLength: 1 - # See namePartShardMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - - definitionType: - type: string - description: "DEPRECATED - to be removed soon" - weight: - type: integer - description: | - optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, - will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml - More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ - # Need to be StringBool - internalReplication: - type: string - description: | - optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise - allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, - will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml - More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ - enum: - # List StringBoolXXX constants from model - - "" - - "0" - - "1" - - "False" - - "false" - - "True" - - "true" - - "No" - - "no" - - "Yes" - - "yes" - - "Off" - - "off" - - "On" - - "on" - - "Disable" - - "disable" - - "Enable" - - "enable" - - "Disabled" - - "disabled" - - "Enabled" - - "enabled" - settings: - type: object - # nullable: true - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` - override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - x-kubernetes-preserve-unknown-fields: true - files: - type: object - # nullable: true - description: | - optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard - override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - serviceTemplate: - type: string - description: "optional, fully ignores for shard-level" - clusterServiceTemplate: - type: string - description: "optional, fully ignores for shard-level" - shardServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - replicasCount: - type: integer - description: | - optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, - shard contains 1 replica by default - override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` - minimum: 1 - replicas: - type: array - description: | - optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` - # nullable: true - items: - # Host - type: object - properties: - name: - type: string - description: "optional, by default replica name is generated, but you can override it and setup custom name" - minLength: 1 - # See namePartReplicaMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - tcpPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` - allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` - minimum: 1 - maximum: 65535 - httpPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` - allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` - minimum: 1 - maximum: 65535 - interserverHTTPPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` - allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol - minimum: 1 - maximum: 65535 - settings: - type: object - # nullable: true - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` - override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - x-kubernetes-preserve-unknown-fields: true - files: - type: object - # nullable: true - description: | - optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica - override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - serviceTemplate: - type: string - description: "optional, fully ignores for replica-level" - clusterServiceTemplate: - type: string - description: "optional, fully ignores for replica-level" - shardServiceTemplate: - type: string - description: "optional, fully ignores for replica-level" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - replicas: - type: array - description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" - # nullable: true - items: - type: object - properties: - name: - type: string - description: "optional, by default replica name is generated, but you can override it and setup custom name" - minLength: 1 - # See namePartShardMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - settings: - type: object - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` - override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - files: - type: object - # nullable: true - description: | - optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica - override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - serviceTemplate: - type: string - description: "optional, fully ignores for replica-level" - clusterServiceTemplate: - type: string - description: "optional, fully ignores for replica-level" - shardServiceTemplate: - type: string - description: "optional, fully ignores for replica-level" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - shardsCount: - type: integer - description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" - minimum: 1 - shards: - type: array - description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" - # nullable: true - items: - # Host - type: object - properties: - name: - type: string - description: "optional, by default shard name is generated, but you can override it and setup custom name" - minLength: 1 - # See namePartReplicaMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - tcpPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` - allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` - minimum: 1 - maximum: 65535 - httpPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` - allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` - minimum: 1 - maximum: 65535 - interserverHTTPPort: - type: integer - description: | - optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` - allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol - minimum: 1 - maximum: 65535 - settings: - type: object - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` - override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - files: - type: object - description: | - optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents - # nullable: true - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: | - optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica - override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` - # nullable: true - properties: - hostTemplate: - type: string - description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard" - podTemplate: - type: string - description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - dataVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - logVolumeClaimTemplate: - type: string - description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" - serviceTemplate: - type: string - description: "optional, fully ignores for shard-level" - clusterServiceTemplate: - type: string - description: "optional, fully ignores for shard-level" - shardServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" - replicaServiceTemplate: - type: string - description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" - volumeClaimTemplate: - type: string - description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" - templates: - type: object - description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" - # nullable: true - properties: - hostTemplates: - type: array - description: "hostTemplate will use during apply to generate `clickhose-server` config files" - # nullable: true - items: - type: object - #required: - # - name - properties: - name: - description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" - type: string - portDistribution: - type: array - description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" - # nullable: true - items: - type: object - #required: - # - type - properties: - type: - type: string - description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" - enum: - # List PortDistributionXXX constants - - "" - - "Unspecified" - - "ClusterScopeIndex" - spec: - # Host - type: object - properties: - name: - type: string - description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`" - minLength: 1 - # See namePartReplicaMaxLen const - maxLength: 15 - pattern: "^[a-zA-Z0-9-]{0,15}$" - tcpPort: - type: integer - description: | - optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply - if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` - More info: https://clickhouse.tech/docs/en/interfaces/tcp/ - minimum: 1 - maximum: 65535 - httpPort: - type: integer - description: | - optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply - if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` - More info: https://clickhouse.tech/docs/en/interfaces/http/ - minimum: 1 - maximum: 65535 - interserverHTTPPort: - type: integer - description: | - optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply - if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` - More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port - minimum: 1 - maximum: 65535 - settings: - type: object - description: | - optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` - More details: https://clickhouse.tech/docs/en/operations/settings/settings/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - files: - type: object - description: | - optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` - # nullable: true - x-kubernetes-preserve-unknown-fields: true - templates: - type: object - description: "be carefull, this part of CRD allows override template inside template, don't use it if you don't understand what you do" - # nullable: true - properties: - hostTemplate: - type: string - podTemplate: - type: string - dataVolumeClaimTemplate: - type: string - logVolumeClaimTemplate: - type: string - serviceTemplate: - type: string - clusterServiceTemplate: - type: string - shardServiceTemplate: - type: string - replicaServiceTemplate: - type: string - - podTemplates: - type: array - description: | - podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone - More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates - # nullable: true - items: - type: object - #required: - # - name - properties: - name: - type: string - description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" - generateName: - type: string - description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables" - zone: - type: object - description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" - #required: - # - values - properties: - key: - type: string - description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" - values: - type: array - description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" - # nullable: true - items: - type: string - distribution: - type: string - description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" - enum: - - "" - - "Unspecified" - - "OnePerHost" - podDistribution: - type: array - description: "define ClickHouse Pod distibution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" - # nullable: true - items: - type: object - #required: - # - type - properties: - type: - type: string - description: "you can define multiple affinity policy types" - enum: - # List PodDistributionXXX constants - - "" - - "Unspecified" - - "ClickHouseAntiAffinity" - - "ShardAntiAffinity" - - "ReplicaAntiAffinity" - - "AnotherNamespaceAntiAffinity" - - "AnotherClickHouseInstallationAntiAffinity" - - "AnotherClusterAntiAffinity" - - "MaxNumberPerNode" - - "NamespaceAffinity" - - "ClickHouseInstallationAffinity" - - "ClusterAffinity" - - "ShardAffinity" - - "ReplicaAffinity" - - "PreviousTailAffinity" - - "CircularReplication" - scope: - type: string - description: "scope for apply each podDistribution" - enum: - # list PodDistributionScopeXXX constants - - "" - - "Unspecified" - - "Shard" - - "Replica" - - "Cluster" - - "ClickHouseInstallation" - - "Namespace" - number: - type: integer - description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" - minimum: 0 - maximum: 65535 - topologyKey: - type: string - description: "use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" - spec: - # TODO specify PodSpec - type: object - description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" - # nullable: true - x-kubernetes-preserve-unknown-fields: true - metadata: - type: object - description: | - allows pass standard object's metadata from template to Pod - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - # nullable: true - x-kubernetes-preserve-unknown-fields: true - - volumeClaimTemplates: - type: array - description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else" - # nullable: true - items: - type: object - #required: - # - name - # - spec - properties: - name: - description: | - template name, could use to link inside - top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, - cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, - shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` - replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` - type: string - reclaimPolicy: - type: string - description: "define behavior of `PVC` deletion policy during delete `Pod`, `Delete` by default, when `Retain` then `PVC` still alive even `Pod` will deleted" - enum: - - "" - - "Retain" - - "Delete" - metadata: - type: object - description: | - allows pass standard object's metadata from template to PVC - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - # nullable: true - x-kubernetes-preserve-unknown-fields: true - spec: - type: object - description: | - allows define all aspects of `PVC` resource - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims - # nullable: true - x-kubernetes-preserve-unknown-fields: true - serviceTemplates: - type: array - description: | - allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level - # nullable: true - items: - type: object - #required: - # - name - # - spec - properties: - name: - type: string - description: | - template name, could use to link inside - chi-level `chi.spec.defaults.templates.serviceTemplate` - cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` - shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` - replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` - generateName: - type: string - description: "allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables" - metadata: - # TODO specify ObjectMeta - type: object - description: | - allows pass standard object's metadata from template to Service - Could be use for define specificly for Cloud Provider metadata which impact to behavior of service - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - spec: - # TODO specify ServiceSpec - type: object - description: | - describe behavior of generated Service - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - # nullable: true - x-kubernetes-preserve-unknown-fields: true - useTemplates: - type: array - description: "list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters" - # nullable: true - items: - type: object - #required: - # - name - properties: - name: - type: string - description: "name of `ClickHouseInstallationTemplate` (chit) resource" - namespace: - type: string - description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`" - useType: - type: string - description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" - enum: - # List useTypeXXX constants from model - - "" - - "merge" \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore b/deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore deleted file mode 100644 index 7653e97e66..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore +++ /dev/null @@ -1,25 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ - -unittests/ diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml deleted file mode 100644 index b855a035a5..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -name: alertmanager -description: The Alertmanager handles alerts sent by client applications such as the Prometheus server. -type: application -version: 0.5.0 -appVersion: 0.5.0 - diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml deleted file mode 100644 index cba5de8e29..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -configmapReload: - enabled: true diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt deleted file mode 100644 index 91577ad79a..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt +++ /dev/null @@ -1,21 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alertmanager.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ .Values.service.port }}:80 -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl deleted file mode 100644 index 47d01ca1c6..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "alertmanager.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "alertmanager.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "alertmanager.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "alertmanager.labels" -}} -helm.sh/chart: {{ include "alertmanager.chart" . }} -{{ include "alertmanager.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "alertmanager.selectorLabels" -}} -app.kubernetes.io/name: {{ include "alertmanager.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "alertmanager.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "alertmanager.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml deleted file mode 100644 index 71d5ea0933..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.config }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "alertmanager.fullname" . }} - labels: - {{- include "alertmanager.labels" . | nindent 4 }} -data: - alertmanager.yml: | - {{- toYaml .Values.config | default "{}" | nindent 4 }} - {{- range $key, $value := .Values.templates }} - {{ $key }}: |- - {{- $value | nindent 4 }} - {{- end }} -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml deleted file mode 100644 index efc9599c01..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "alertmanager.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "alertmanager.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml deleted file mode 100644 index f6f8b3e80a..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.podDisruptionBudget -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "alertmanager.fullname" . }} - labels: - {{- include "alertmanager.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - {{- include "alertmanager.selectorLabels" . | nindent 6 }} -{{ toYaml .Values.podDisruptionBudget | indent 2 }} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml deleted file mode 100644 index 9ca80f4cba..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "alertmanager.serviceAccountName" . }} - labels: - {{- include "alertmanager.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml deleted file mode 100644 index 81e30a9468..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: alertmanager - labels: - {{- include "alertmanager.labels" . | nindent 4 }} -{{- if .Values.service.annotations }} - annotations: - {{- toYaml .Values.service.annotations | nindent 4 }} -{{- end }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - selector: - {{- include "alertmanager.selectorLabels" . | nindent 4 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ include "alertmanager.fullname" . }}-headless - labels: - {{- include "alertmanager.labels" . | nindent 4 }} -spec: - clusterIP: None - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - {{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }} - - port: 9094 - targetPort: 9094 - protocol: TCP - name: cluster-tcp - - port: 9094 - targetPort: 9094 - protocol: UDP - name: cluster-udp - {{- end }} - selector: - {{- include "alertmanager.selectorLabels" . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml deleted file mode 100644 index 95ed0ce72e..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml +++ /dev/null @@ -1,152 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "alertmanager.fullname" . }} - labels: - {{- include "alertmanager.labels" . | nindent 4 }} -{{- if .Values.statefulSet.annotations }} - annotations: - {{ toYaml .Values.statefulSet.annotations | nindent 4 }} -{{- end }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "alertmanager.selectorLabels" . | nindent 6 }} - serviceName: {{ include "alertmanager.fullname" . }}-headless - template: - metadata: - labels: - {{- include "alertmanager.selectorLabels" . | nindent 8 }} -{{- if .Values.podLabels }} - {{ toYaml .Values.podLabels | nindent 8 }} -{{- end }} - annotations: - {{- if not .Values.configmapReload.enabled }} - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- end }} -{{- if .Values.podAnnotations }} - {{- toYaml .Values.podAnnotations | nindent 8 }} -{{- end }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "alertmanager.serviceAccountName" . }} - {{- with .Values.dnsConfig }} - dnsConfig: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - {{- if and (.Values.configmapReload.enabled) (.Values.config) }} - - name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }} - image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" - args: - - --volume-dir=/etc/alertmanager - - --webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload - resources: - {{- toYaml .Values.configmapReload.resources | nindent 12 }} - volumeMounts: - - name: config - mountPath: /etc/alertmanager - {{- end }} - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP -{{- if .Values.command }} - command: - {{- toYaml .Values.command | nindent 12 }} -{{- end }} - args: - - --storage.path=/alertmanager - - --config.file=/etc/alertmanager/alertmanager.yml - {{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }} - - --cluster.advertise-address=$(POD_IP):9094 - - --cluster.listen-address=0.0.0.0:9094 - {{- end }} - {{- if gt .Values.replicaCount 1.0}} - {{- $fullName := include "alertmanager.fullname" . }} - {{- range $i := until (int .Values.replicaCount) }} - - --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:9094 - {{- end }} - {{- end }} - {{- if .Values.additionalPeers }} - {{- range $item := .Values.additionalPeers }} - - --cluster.peer={{ $item }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - ports: - - name: http - containerPort: 9093 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - {{- if .Values.config }} - - name: config - mountPath: /etc/alertmanager - {{- end }} - - name: storage - mountPath: /alertmanager - {{- if .Values.config }} - volumes: - - name: config - configMap: - name: {{ include "alertmanager.fullname" . }} - {{- end }} - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: - {{- toYaml .Values.persistence.accessModes | nindent 10 }} - resources: - requests: - storage: {{ .Values.persistence.size }} - {{- if .Values.persistence.storageClass }} - {{- if (eq "-" .Values.persistence.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.persistence.storageClass }} - {{- end }} - {{- end }} -{{- else }} - - name: storage - emptyDir: {} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap deleted file mode 100644 index 3b149c763b..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap +++ /dev/null @@ -1,48 +0,0 @@ -should match snapshot of default values: - 1: | - apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: alertmanager - app.kubernetes.io/version: 1.0.0 - helm.sh/chart: alertmanager-1.0.0 - name: RELEASE-NAME-alertmanager - spec: - ingressClassName: nginx-test - rules: - - host: alertmanager.domain.com - http: - paths: - - backend: - service: - name: RELEASE-NAME-alertmanager - port: - number: 9093 - path: / - pathType: ImplementationSpecific -should match snapshot of default values with old kubernetes ingress: - 1: | - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: nginx-test - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: alertmanager - app.kubernetes.io/version: 1.0.0 - helm.sh/chart: alertmanager-1.0.0 - name: RELEASE-NAME-alertmanager - spec: - rules: - - host: alertmanager.domain.com - http: - paths: - - backend: - serviceName: RELEASE-NAME-alertmanager - servicePort: 9093 - path: / diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml deleted file mode 100644 index b468c0c2b8..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml +++ /dev/null @@ -1,81 +0,0 @@ -suite: test ingress -templates: - - ingress.yaml -tests: - - it: should be empty if ingress is not enabled - asserts: - - hasDocuments: - count: 0 - - it: should have apiVersion extensions/v1beta1 for k8s < 1.14 - set: - ingress.enabled: true - capabilities: - majorVersion: 1 - minorVersion: 13 - asserts: - - hasDocuments: - count: 1 - - isKind: - of: Ingress - - isAPIVersion: - of: extensions/v1beta1 - - it: should have apiVersion networking.k8s.io/v1beta1 for k8s < 1.19 - set: - ingress.enabled: true - capabilities: - majorVersion: 1 - minorVersion: 18 - asserts: - - hasDocuments: - count: 1 - - isKind: - of: Ingress - - isAPIVersion: - of: networking.k8s.io/v1beta1 - - it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19 - set: - ingress.enabled: true - capabilities: - majorVersion: 1 - minorVersion: 19 - asserts: - - hasDocuments: - count: 1 - - isKind: - of: Ingress - - isAPIVersion: - of: networking.k8s.io/v1 - - it: should have an ingressClassName for k8s >= 1.19 - set: - ingress.enabled: true - ingress.className: nginx-test - capabilities: - majorVersion: 1 - minorVersion: 19 - asserts: - - hasDocuments: - count: 1 - - equal: - path: spec.ingressClassName - value: nginx-test - - it: should match snapshot of default values - set: - ingress.enabled: true - ingress.className: nginx-test - chart: - version: 1.0.0 - appVersion: 1.0.0 - asserts: - - matchSnapshot: { } - - it: should match snapshot of default values with old kubernetes ingress - set: - ingress.enabled: true - ingress.className: nginx-test - capabilities: - majorVersion: 1 - minorVersion: 17 - chart: - version: 1.0.0 - appVersion: 1.0.0 - asserts: - - matchSnapshot: { } diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml deleted file mode 100644 index 1547564070..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml +++ /dev/null @@ -1,189 +0,0 @@ -# Default values for alertmanager. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: signoz/alertmanager - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "0.5.0" - -extraArgs: {} - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podSecurityContext: - fsGroup: 65534 -dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 -securityContext: - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - -additionalPeers: [] - -service: - annotations: {} - type: ClusterIP - port: 9093 - # if you want to force a specific nodePort. Must be use with service.type=NodePort - # nodePort: - -ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: alertmanager.domain.com - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - alertmanager.domain.com - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 10m - # memory: 32Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -statefulSet: - annotations: {} - -podAnnotations: {} -podLabels: {} - -# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} - # maxUnavailable: 1 - # minAvailable: 1 - -command: [] - -persistence: - enabled: true - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 100Mi - -config: - global: - resolve_timeout: 1m - slack_api_url: 'https://hooks.slack.com/services/xxx' - - templates: - - '/etc/alertmanager/*.tmpl' - - receivers: - - name: 'slack-notifications' - slack_configs: - - channel: '#alerts' - send_resolved: true - icon_url: https://avatars3.githubusercontent.com/u/3380462 - title: '{{ template "slack.title" . }}' - text: '{{ template "slack.text" . }}' - - - route: - receiver: 'slack-notifications' - -## Monitors ConfigMap changes and POSTs to a URL -## Ref: https://github.com/jimmidyson/configmap-reload -## -configmapReload: - ## If false, the configmap-reload container will not be deployed - ## - enabled: false - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 - pullPolicy: IfNotPresent - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - -templates: - title.tmpl: |- - {{ define "slack.title" }} - [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} - {{- if gt (len .CommonLabels) (len .GroupLabels) -}} - {{" "}}( - {{- with .CommonLabels.Remove .GroupLabels.Names }} - {{- range $index, $label := .SortedPairs -}} - {{ if $index }}, {{ end }} - {{- $label.Name }}="{{ $label.Value -}}" - {{- end }} - {{- end -}} - ) - {{- end }} - {{ end }} - text.tmpl: |- - {{ define "slack.text" }} - {{ range .Alerts -}} - *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }} - - *Description:* {{ .Annotations.description }} - - *Details:* - {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` - {{ end }} - {{ end }} - {{ end }} \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/.helmignore b/deploy/kubernetes/platform/signoz-charts/frontend/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml deleted file mode 100644 index 86f9b4eecf..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: frontend -description: A Helm chart for SigNoz Frontend Service - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.5.4 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 0.5.4 diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/NOTES.txt b/deploy/kubernetes/platform/signoz-charts/frontend/templates/NOTES.txt deleted file mode 100644 index 325c5b9100..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/NOTES.txt +++ /dev/null @@ -1,21 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "frontend.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "frontend.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "frontend.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "frontend.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/_helpers.tpl b/deploy/kubernetes/platform/signoz-charts/frontend/templates/_helpers.tpl deleted file mode 100644 index c5b2f64fc9..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "frontend.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "frontend.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "frontend.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "frontend.labels" -}} -helm.sh/chart: {{ include "frontend.chart" . }} -{{ include "frontend.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "frontend.selectorLabels" -}} -app.kubernetes.io/name: {{ include "frontend.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "frontend.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "frontend.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml deleted file mode 100644 index 278cb1513c..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.config.name }} - labels: - release: {{ .Release.Name }} -data: - default.conf: |- - server { - listen {{ .Values.service.port }}; - server_name _; - - gzip on; - gzip_static on; - gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - gzip_proxied any; - gzip_vary on; - gzip_comp_level 6; - gzip_buffers 16 8k; - gzip_http_version 1.1; - - location / { - root /usr/share/nginx/html; - index index.html index.htm; - try_files $uri $uri/ /index.html; - } - location /api/alertmanager{ - proxy_pass http://{{ .Values.config.alertmanagerUrl }}/api/v2; - } - location /api { - proxy_pass http://{{ .Values.config.queryServiceUrl }}/api; - } - - # redirect server error pages to the static page /50x.html - # - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/share/nginx/html; - } - } \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml deleted file mode 100644 index 7f0ac67e9e..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/deployment.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "frontend.fullname" . }} - labels: - {{- include "frontend.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "frontend.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "frontend.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - volumes: - - name: nginx-config - configMap: - name: {{ .Values.config.name }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.service.port }} - protocol: TCP - env: - - name: REACT_APP_QUERY_SERVICE_URL - value: {{ .Values.configVars.REACT_APP_QUERY_SERVICE_URL }} - volumeMounts: - - name: nginx-config - mountPath: /etc/nginx/conf.d - # livenessProbe: - # httpGet: - # path: / - # port: http - # readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/ingress.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/ingress.yaml deleted file mode 100644 index 27e775b047..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/ingress.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "frontend.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "frontend.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/service.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/service.yaml deleted file mode 100644 index 708b0eda39..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "frontend.fullname" . }} - labels: - {{- include "frontend.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "frontend.selectorLabels" . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/serviceaccount.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/serviceaccount.yaml deleted file mode 100644 index e4c361826d..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "frontend.serviceAccountName" . }} - labels: - {{- include "frontend.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/tests/test-connection.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/tests/test-connection.yaml deleted file mode 100644 index d95398cc6f..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "frontend.fullname" . }}-test-connection" - labels: - {{- include "frontend.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "frontend.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml deleted file mode 100644 index 7538dcfec6..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# Default values for frontend. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: signoz/frontend - tag: 0.5.4 - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -configVars: {} - -config: - name: signoz-nginx-config - queryServiceUrl: signoz-query-service:8080 - alertmanagerUrl: alertmanager:9093 - -serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 3000 - - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/.helmignore b/deploy/kubernetes/platform/signoz-charts/query-service/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml deleted file mode 100644 index dfe8b7179d..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: query-service -description: A Helm chart for running SigNoz Query Service in Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.5.4 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 0.5.4 diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/NOTES.txt b/deploy/kubernetes/platform/signoz-charts/query-service/templates/NOTES.txt deleted file mode 100644 index 6ce1c33ae1..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/NOTES.txt +++ /dev/null @@ -1,21 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "query-service.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "query-service.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "query-service.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "query-service.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/_helpers.tpl b/deploy/kubernetes/platform/signoz-charts/query-service/templates/_helpers.tpl deleted file mode 100644 index 927bb49aa6..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "query-service.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "query-service.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "query-service.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "query-service.labels" -}} -helm.sh/chart: {{ include "query-service.chart" . }} -{{ include "query-service.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "query-service.selectorLabels" -}} -app.kubernetes.io/name: {{ include "query-service.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "query-service.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "query-service.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/ingress.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/ingress.yaml deleted file mode 100644 index ceb66508f2..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/ingress.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "query-service.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "query-service.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml deleted file mode 100644 index 6dc77baf64..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: prometheus-config -data: - prometheus.yml: | - # my global config - global: - scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - - # Alertmanager configuration - alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 - - # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. - rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - - 'alerts.yml' - - # A scrape configuration containing exactly one endpoint to scrape: - # Here it's Prometheus itself. - scrape_configs: - - - remote_read: - - url: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password - diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/service.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/service.yaml deleted file mode 100644 index 151d25d0dd..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "query-service.fullname" . }} - labels: - {{- include "query-service.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "query-service.selectorLabels" . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/serviceaccount.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/serviceaccount.yaml deleted file mode 100644 index 1d1184f1f5..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "query-service.serviceAccountName" . }} - labels: - {{- include "query-service.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml deleted file mode 100644 index 7fbad5057d..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml +++ /dev/null @@ -1,89 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "query-service.fullname" . }} - labels: - {{- include "query-service.labels" . | nindent 4 }} -spec: - serviceName: query-service - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "query-service.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "query-service.selectorLabels" . | nindent 8 }} - spec: - containers: - - name: {{ .Chart.Name }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: ["-config=/root/config/prometheus.yml"] - ports: - - name: http - containerPort: 8080 - protocol: TCP - env: - - name: DruidClientUrl - value: {{ .Values.configVars.DruidClientUrl }} - - name: DruidDatasource - value: {{ .Values.configVars.DruidDatasource }} - - name: STORAGE - value: {{ .Values.configVars.STORAGE }} - - name: ClickHouseUrl - value: {{ .Values.configVars.ClickHouseUrl}} - - name: GODEBUG - value: netdns=go - - name: TELEMETRY_ENABLED - value: {{ .Values.configVars.TELEMETRY_ENABLED}} - # livenessProbe: - # httpGet: - # path: / - # port: http - # readinessProbe: - # httpGet: - # path: / - # port: http - volumeMounts: - - name: prometheus - mountPath: /root/config - - name: signoz-db - mountPath: /var/lib/signoz/ - - name: dashboards - mountPath: /root/config/dashboards - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: prometheus - configMap: - name: prometheus-config - - name: dashboards - emptyDir: {} - - volumeClaimTemplates: - - metadata: - name: signoz-db - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/tests/test-connection.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/tests/test-connection.yaml deleted file mode 100644 index 8f864d43db..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "query-service.fullname" . }}-test-connection" - labels: - {{- include "query-service.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "query-service.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml deleted file mode 100644 index 038d5802a5..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# Default values for query-service. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: signoz/query-service - tag: 0.5.4 - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - - -configVars: - DruidClientUrl: http://signoz-druid-router:8888 - DruidDatasource: flattened_spans - ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password - STORAGE: clickhouse - - -serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 8080 - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/deploy/kubernetes/platform/templates/clickhouse-config.yaml b/deploy/kubernetes/platform/templates/clickhouse-config.yaml deleted file mode 100644 index 51011c64ab..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-config.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: initdb-config -data: - init-db.sql: |- - CREATE TABLE IF NOT EXISTS signoz_index ( - timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), - traceID String CODEC(ZSTD(1)), - spanID String CODEC(ZSTD(1)), - parentSpanID String CODEC(ZSTD(1)), - serviceName LowCardinality(String) CODEC(ZSTD(1)), - name LowCardinality(String) CODEC(ZSTD(1)), - kind Int32 CODEC(ZSTD(1)), - durationNano UInt64 CODEC(ZSTD(1)), - tags Array(String) CODEC(ZSTD(1)), - tagsKeys Array(String) CODEC(ZSTD(1)), - tagsValues Array(String) CODEC(ZSTD(1)), - statusCode Int64 CODEC(ZSTD(1)), - references String CODEC(ZSTD(1)), - externalHttpMethod Nullable(String) CODEC(ZSTD(1)), - externalHttpUrl Nullable(String) CODEC(ZSTD(1)), - component Nullable(String) CODEC(ZSTD(1)), - dbSystem Nullable(String) CODEC(ZSTD(1)), - dbName Nullable(String) CODEC(ZSTD(1)), - dbOperation Nullable(String) CODEC(ZSTD(1)), - peerService Nullable(String) CODEC(ZSTD(1)), - INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64, - INDEX idx_duration durationNano TYPE minmax GRANULARITY 1 - ) ENGINE MergeTree() - PARTITION BY toDate(timestamp) - ORDER BY (serviceName, -toUnixTimestamp(timestamp)) \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/clickhouse-instance.yaml b/deploy/kubernetes/platform/templates/clickhouse-instance.yaml deleted file mode 100644 index bcdb58c7ba..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-instance.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{ if (eq (.Values.cloud | toString) "gcp" )}} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: gce-resizable -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-standard - fstype: ext4 - replication-type: none -reclaimPolicy: Retain -#volumeBindingMode: Immediate -allowVolumeExpansion: true ---- -{{- else if (eq (.Values.cloud | toString) "aws") }} -# -# AWS resizable disk example -# -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: gp2-resizable -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 -reclaimPolicy: Retain -#volumeBindingMode: Immediate -allowVolumeExpansion: true ---- -{{- end }} -apiVersion: "clickhouse.altinity.com/v1" -kind: "ClickHouseInstallation" -metadata: - name: signoz -spec: - defaults: - templates: - dataVolumeClaimTemplate: default-volume-claim - # logVolumeClaimTemplate: default-volume-claim - serviceTemplate: chi-service-template - configuration: - zookeeper: - nodes: - - host: signoz-zookeeper - port: 2181 - session_timeout_ms: 6000 - clusters: - - name: cluster - # Templates are specified for this cluster explicitly - templates: - dataVolumeClaimTemplate: default-volume-claim - # logVolumeClaimTemplate: default-volume-claim - podTemplate: pod-template-with-volume - layout: - shardsCount: 1 - replicasCount: 1 - templates: - hostTemplates: - - name: port-distribution - portDistribution: - - type: ClusterScopeIndex - spec: - tcpPort: 9000 - httpPort: 8123 - interserverHTTPPort: 9009 - - podTemplates: - - name: pod-template-with-volume - spec: - containers: - - name: clickhouse - image: yandex/clickhouse-server:21.7 - volumeMounts: - - name: default-volume-claim - mountPath: /var/lib/clickhouse - - name: initdb - mountPath: /docker-entrypoint-initdb.d - volumes: - - name: initdb - configMap: - name: initdb-config - serviceTemplates: - - name: chi-service-template - generateName: signoz-clickhouse - spec: - ports: - - name: http - port: 8123 - - name: tcp - port: 9000 - type: {{ .Values.clickhouseOperator.serviceType }} - volumeClaimTemplates: - - name: default-volume-claim - reclaimPolicy: Retain - spec: - {{- if (eq (.Values.cloud | toString) "gcp" )}} - storageClassName: gce-resizable - {{- else if (eq (.Values.cloud | toString) "aws") }} - storageClassName: gp2-resizable - {{- else if (eq (.Values.cloud | toString) "hcloud") }} - storageClassName: hcloud-volumes - {{- end }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.clickhouseOperator.storage | quote }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml deleted file mode 100644 index 5242d671d5..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml +++ /dev/null @@ -1,157 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Template Parameters: -# -# NAMESPACE=posthog -# COMMENT=# -# ROLE_KIND=ClusterRole -# ROLE_NAME=clickhouse-operator-posthog -# ROLE_BINDING_KIND=ClusterRoleBinding -# ROLE_BINDING_NAME=clickhouse-operator-posthog -# -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: clickhouse-operator-posthog - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - services - verbs: - - create - - delete - - get - - patch - - update - - list - - watch -- apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - persistentvolumes - - pods - verbs: - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - delete - - get - - patch - - update - - list - - watch -- apiGroups: - - apps - resources: - - replicasets - verbs: - - delete - - get - - patch - - update -- apiGroups: - - apps - resourceNames: - - clickhouse-operator - resources: - - deployments - verbs: - - get - - patch - - update - - delete -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - patch - - update - - list - - watch -- apiGroups: - - clickhouse.altinity.com - resources: - - clickhouseinstallations - verbs: - - delete - - get - - patch - - update -- apiGroups: - - clickhouse.altinity.com - resources: - - clickhouseinstallations - - clickhouseinstallationtemplates - - clickhouseoperatorconfigurations - verbs: - - get - - list - - watch -- apiGroups: - - clickhouse.altinity.com - resources: - - clickhouseinstallations/finalizers - - clickhouseinstallationtemplates/finalizers - - clickhouseoperatorconfigurations/finalizers - verbs: - - update -- apiGroups: - - clickhouse.altinity.com - resources: - - clickhouseinstallations/status - - clickhouseinstallationtemplates/status - - clickhouseoperatorconfigurations/status - verbs: - - create - - delete - - get - - update - - patch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml deleted file mode 100644 index e06d68a4f2..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Setup ClusterRoleBinding between ClusterRole and ServiceAccount. -# ClusterRoleBinding is namespace-less and must have unique name -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: clickhouse-operator-posthog - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: clickhouse-operator-posthog -subjects: -- kind: ServiceAccount - name: clickhouse-operator - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml deleted file mode 100644 index 8c70ddd136..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml +++ /dev/null @@ -1,418 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Template Parameters: -# -# NAME=etc-clickhouse-operator-files -# NAMESPACE=posthog -# COMMENT= -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: etc-clickhouse-operator-files - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -data: - config.yaml: | - ################################################ - ## - ## Watch Namespaces Section - ## - ################################################ - - # List of namespaces where clickhouse-operator watches for events. - # Concurrently running operators should watch on different namespaces - #watchNamespaces: ["dev", "test"] - watchNamespaces: [] - - ################################################ - ## - ## Additional Configuration Files Section - ## - ################################################ - - # Path to folder where ClickHouse configuration files common for all instances within CHI are located. - chCommonConfigsPath: config.d - - # Path to folder where ClickHouse configuration files unique for each instance (host) within CHI are located. - chHostConfigsPath: conf.d - - # Path to folder where ClickHouse configuration files with users settings are located. - # Files are common for all instances within CHI - chUsersConfigsPath: users.d - - # Path to folder where ClickHouseInstallation .yaml manifests are located. - # Manifests are applied in sorted alpha-numeric order - chiTemplatesPath: templates.d - - ################################################ - ## - ## Cluster Create/Update/Delete Objects Section - ## - ################################################ - - # How many seconds to wait for created/updated StatefulSet to be Ready - statefulSetUpdateTimeout: 300 - - # How many seconds to wait between checks for created/updated StatefulSet status - statefulSetUpdatePollPeriod: 5 - - # What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds - # Possible options: - # 1. abort - do nothing, just break the process and wait for admin - # 2. delete - delete newly created problematic StatefulSet - # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet - onStatefulSetCreateFailureAction: ignore - - # What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds - # Possible options: - # 1. abort - do nothing, just break the process and wait for admin - # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. - # Pod would be recreated by StatefulSet based on rollback-ed configuration - # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet - onStatefulSetUpdateFailureAction: rollback - - ################################################ - ## - ## ClickHouse Settings Section - ## - ################################################ - - # Default values for ClickHouse user configuration - # 1. user/profile - string - # 2. user/quota - string - # 3. user/networks/ip - multiple strings - # 4. user/password - string - chConfigUserDefaultProfile: default - chConfigUserDefaultQuota: default - chConfigUserDefaultNetworksIP: - - "::1" - - "127.0.0.1" - chConfigUserDefaultPassword: "default" - - # Default host_regexp to limit network connectivity from outside - chConfigNetworksHostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$" - - ################################################ - ## - ## Access to ClickHouse instances - ## - ################################################ - - # ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances - # for: - # 1. Metrics requests - # 2. Schema maintenance - # 3. DROP DNS CACHE - # User with such credentials can be specified in additional ClickHouse .xml config files, - # located in `chUsersConfigsPath` folder - chUsername: "clickhouse_operator" - chPassword: "clickhouse_operator_password" - - # Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances - # Can be used instead of explicitly specified username and password - chCredentialsSecretNamespace: "" - chCredentialsSecretName: "" - - # Port where to connect to ClickHouse instances to - chPort: 8123 - - ################################################ - ## - ## Log parameters - ## - ################################################ - - logtostderr: "true" - alsologtostderr: "false" - v: "1" - stderrthreshold: "" - vmodule: "" - log_backtrace_at: "" - - ################################################ - ## - ## Runtime parameters - ## - ################################################ - - # Max number of concurrent reconciles in progress - reconcileThreadsNumber: 10 - reconcileWaitExclude: true - reconcileWaitInclude: false - - ################################################ - ## - ## Labels management parameters - ## - ################################################ - - # When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, - # exclude labels from the following list: - #excludeFromPropagationLabels: - # - "labelA" - # - "labelB" - - # Whether to append *Scope* labels to StatefulSet and Pod. - # Full list of available *scope* labels check in labeler.go - # LabelShardScopeIndex - # LabelReplicaScopeIndex - # LabelCHIScopeIndex - # LabelCHIScopeCycleSize - # LabelCHIScopeCycleIndex - # LabelCHIScopeCycleOffset - # LabelClusterScopeIndex - # LabelClusterScopeCycleSize - # LabelClusterScopeCycleIndex - # LabelClusterScopeCycleOffset - appendScopeLabels: "no" - - ################################################ - ## - ## Pod management parameters - ## - ################################################ - # Grace period for Pod termination. - # How many seconds to wait between sending - # SIGTERM and SIGKILL during Pod termination process. - # Increase this number is case of slow shutdown. - terminationGracePeriod: 30 - ---- - -# Template Parameters: -# -# NAME=etc-clickhouse-operator-confd-files -# NAMESPACE=posthog -# COMMENT= -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: etc-clickhouse-operator-confd-files - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -data: - ---- - -# Template Parameters: -# -# NAME=etc-clickhouse-operator-configd-files -# NAMESPACE=posthog -# COMMENT= -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: etc-clickhouse-operator-configd-files - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -data: - 01-clickhouse-01-listen.xml: | - - - :: - 0.0.0.0 - 1 - - - 01-clickhouse-02-logger.xml: | - - - - debug - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - - 1 - - - - 01-clickhouse-03-query_log.xml: | - - - system -

query_log
- Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day - 7500 - - - - - 01-clickhouse-04-part_log.xml: | - - - system - part_log
- Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day - 7500 -
-
- ---- - -# Template Parameters: -# -# NAME=etc-clickhouse-operator-templatesd-files -# NAMESPACE=posthog -# COMMENT= -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: etc-clickhouse-operator-templatesd-files - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -data: - 001-templates.json.example: | - { - "apiVersion": "clickhouse.altinity.com/v1", - "kind": "ClickHouseInstallationTemplate", - "metadata": { - "name": "01-default-volumeclaimtemplate" - }, - "spec": { - "templates": { - "volumeClaimTemplates": [ - { - "name": "chi-default-volume-claim-template", - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "2Gi" - } - } - } - } - ], - "podTemplates": [ - { - "name": "chi-default-oneperhost-pod-template", - "distribution": "OnePerHost", - "spec": { - "containers" : [ - { - "name": "clickhouse", - "image": "yandex/clickhouse-server:19.3.7", - "ports": [ - { - "name": "http", - "containerPort": 8123 - }, - { - "name": "client", - "containerPort": 9000 - }, - { - "name": "interserver", - "containerPort": 9009 - } - ] - } - ] - } - } - ] - } - } - } - - default-pod-template.yaml.example: | - apiVersion: "clickhouse.altinity.com/v1" - kind: "ClickHouseInstallationTemplate" - metadata: - name: "default-oneperhost-pod-template" - spec: - templates: - podTemplates: - - name: default-oneperhost-pod-template - distribution: "OnePerHost" - default-storage-template.yaml.example: | - apiVersion: "clickhouse.altinity.com/v1" - kind: "ClickHouseInstallationTemplate" - metadata: - name: "default-storage-template-2Gi" - spec: - templates: - volumeClaimTemplates: - - name: default-storage-template-2Gi - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - - readme: | - Templates in this folder are packaged with an operator and available via 'useTemplate' - ---- - -# Template Parameters: -# -# NAME=etc-clickhouse-operator-usersd-files -# NAMESPACE=posthog -# COMMENT= -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: etc-clickhouse-operator-usersd-files - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -data: - 01-clickhouse-user.xml: | - - - - - 127.0.0.1 - 0.0.0.0/0 - ::/0 - - 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448 - clickhouse_operator - default - - - - - 0 - 1 - 10 - - - - - 02-clickhouse-default-profile.xml: | - - - - 1 - 1000 - 1 - 1 - - - - 03-database-ordinary.xml: | - - - - - Ordinary - - - - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml deleted file mode 100644 index fe0981dc61..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml +++ /dev/null @@ -1,129 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Template Parameters: -# -# NAMESPACE=posthog -# COMMENT= -# OPERATOR_IMAGE=altinity/clickhouse-operator:latest -# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:latest -# -# Setup Deployment for clickhouse-operator -# Deployment would be created in kubectl-specified namespace -kind: Deployment -apiVersion: apps/v1 -metadata: - name: clickhouse-operator - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -spec: - replicas: 1 - selector: - matchLabels: - app: clickhouse-operator - template: - metadata: - labels: - app: clickhouse-operator - annotations: - prometheus.io/port: '8888' - prometheus.io/scrape: 'true' - spec: - serviceAccountName: clickhouse-operator - volumes: - - name: etc-clickhouse-operator-folder - configMap: - name: etc-clickhouse-operator-files - - name: etc-clickhouse-operator-confd-folder - configMap: - name: etc-clickhouse-operator-confd-files - - name: etc-clickhouse-operator-configd-folder - configMap: - name: etc-clickhouse-operator-configd-files - - name: etc-clickhouse-operator-templatesd-folder - configMap: - name: etc-clickhouse-operator-templatesd-files - - name: etc-clickhouse-operator-usersd-folder - configMap: - name: etc-clickhouse-operator-usersd-files - containers: - - name: clickhouse-operator - image: altinity/clickhouse-operator:latest - imagePullPolicy: Always - volumeMounts: - - name: etc-clickhouse-operator-folder - mountPath: /etc/clickhouse-operator - - name: etc-clickhouse-operator-confd-folder - mountPath: /etc/clickhouse-operator/conf.d - - name: etc-clickhouse-operator-configd-folder - mountPath: /etc/clickhouse-operator/config.d - - name: etc-clickhouse-operator-templatesd-folder - mountPath: /etc/clickhouse-operator/templates.d - - name: etc-clickhouse-operator-usersd-folder - mountPath: /etc/clickhouse-operator/users.d - env: - # Pod-specific - # spec.nodeName: ip-172-20-52-62.ec2.internal - - name: OPERATOR_POD_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # metadata.name: clickhouse-operator-6f87589dbb-ftcsf - - name: OPERATOR_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - # metadata.namespace: kube-system - - name: OPERATOR_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - # status.podIP: 100.96.3.2 - - name: OPERATOR_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - # spec.serviceAccount: clickhouse-operator - # spec.serviceAccountName: clickhouse-operator - - name: OPERATOR_POD_SERVICE_ACCOUNT - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - # Container-specific - - name: OPERATOR_CONTAINER_CPU_REQUEST - valueFrom: - resourceFieldRef: - containerName: clickhouse-operator - resource: requests.cpu - - name: OPERATOR_CONTAINER_CPU_LIMIT - valueFrom: - resourceFieldRef: - containerName: clickhouse-operator - resource: limits.cpu - - name: OPERATOR_CONTAINER_MEM_REQUEST - valueFrom: - resourceFieldRef: - containerName: clickhouse-operator - resource: requests.memory - - name: OPERATOR_CONTAINER_MEM_LIMIT - valueFrom: - resourceFieldRef: - containerName: clickhouse-operator - resource: limits.memory - - - name: metrics-exporter - image: altinity/metrics-exporter:latest - imagePullPolicy: Always - volumeMounts: - - name: etc-clickhouse-operator-folder - mountPath: /etc/clickhouse-operator - - name: etc-clickhouse-operator-confd-folder - mountPath: /etc/clickhouse-operator/conf.d - - name: etc-clickhouse-operator-configd-folder - mountPath: /etc/clickhouse-operator/config.d - - name: etc-clickhouse-operator-templatesd-folder - mountPath: /etc/clickhouse-operator/templates.d - - name: etc-clickhouse-operator-usersd-folder - mountPath: /etc/clickhouse-operator/users.d - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml deleted file mode 100644 index 4794c800c7..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Template Parameters: -# -# NAMESPACE=posthog -# COMMENT= -# -# Setup ClusterIP Service to provide monitoring metrics for Prometheus -# Service would be created in kubectl-specified namespace -# In order to get access outside of k8s it should be exposed as: -# kubectl --namespace prometheus port-forward service/prometheus 9090 -# and point browser to localhost:9090 -kind: Service -apiVersion: v1 -metadata: - name: clickhouse-operator-metrics - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - labels: - app: clickhouse-operator -spec: - ports: - - port: 8888 - name: clickhouse-operator-metrics - selector: - app: clickhouse-operator - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml deleted file mode 100644 index 247e80a197..0000000000 --- a/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.clickhouseOperator.enabled }} -# Template Parameters: -# -# COMMENT= -# NAMESPACE=posthog -# NAME=clickhouse-operator -# -# Setup ServiceAccount -apiVersion: v1 -kind: ServiceAccount -metadata: - name: clickhouse-operator - namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} - -{{- end }} diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml deleted file mode 100644 index ee337b6f42..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: otel-collector-metrics-conf - labels: - app: opentelemetry - component: otel-collector-metrics-conf -data: - otel-collector-metrics-config: | - receivers: - otlp: - protocols: - grpc: - http: - - # Data sources: metrics - prometheus: - config: - scrape_configs: - - job_name: "otel-collector" - scrape_interval: 30s - static_configs: - - targets: ["otel-collector:8889"] - processors: - batch: - send_batch_size: 1000 - timeout: 10s - memory_limiter: - # Same as --mem-ballast-size-mib CLI argument - ballast_size_mib: 683 - # 80% of maximum memory up to 2G - limit_mib: 1500 - # 25% of limit up to 2G - spike_limit_mib: 512 - check_interval: 5s - # queued_retry: - # num_workers: 4 - # queue_size: 100 - # retry_on_failure: true - extensions: - health_check: {} - zpages: {} - exporters: - clickhousemetricswrite: - endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password - - service: - extensions: [health_check, zpages] - pipelines: - metrics: - receivers: [otlp, prometheus] - processors: [batch] - exporters: [clickhousemetricswrite] \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml deleted file mode 100644 index eeed7079e5..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-collector-metrics - labels: - app: opentelemetry - component: otel-collector-metrics -spec: - selector: - matchLabels: - app: opentelemetry - component: otel-collector-metrics - minReadySeconds: 5 - progressDeadlineSeconds: 120 - replicas: 1 #TODO - adjust this to your own requirements - template: - metadata: - labels: - app: opentelemetry - component: otel-collector-metrics - spec: - containers: - - command: - - "/otelcontribcol" - - "--config=/conf/otel-collector-metrics-config.yaml" -# Memory Ballast size should be max 1/3 to 1/2 of memory. - - "--mem-ballast-size-mib=683" - image: signoz/otelcontribcol:0.4.2 - name: otel-collector - resources: - limits: - cpu: 1 - memory: 2Gi - requests: - cpu: 200m - memory: 400Mi - ports: - - containerPort: 55679 # Default endpoint for ZPages. - - containerPort: 55680 # Default endpoint for OpenTelemetry receiver. - - containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver. - - containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver. - - containerPort: 14250 # Default endpoint for Jaeger GRPC receiver. - - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. - - containerPort: 9411 # Default endpoint for Zipkin receiver. - - containerPort: 8888 # Default endpoint for querying metrics. - volumeMounts: - - name: otel-collector-metrics-config-vol - mountPath: /conf -# - name: otel-collector-secrets -# mountPath: /secrets - livenessProbe: - httpGet: - path: / - port: 13133 # Health Check extension default port. - readinessProbe: - httpGet: - path: / - port: 13133 # Health Check extension default port. - volumes: - - configMap: - name: otel-collector-metrics-conf - items: - - key: otel-collector-metrics-config - path: otel-collector-metrics-config.yaml - name: otel-collector-metrics-config-vol -# - secret: -# name: otel-collector-secrets -# items: -# - key: cert.pem -# path: cert.pem -# - key: key.pem -# path: key.pem diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml deleted file mode 100644 index b8a1f93a5f..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: otel-collector-metrics - labels: - app: opentelemetry - component: otel-collector-metrics -spec: - ports: - - name: otlp # Default endpoint for OpenTelemetry receiver. - port: 55680 - protocol: TCP - targetPort: 55680 - - name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver. - port: 55681 - protocol: TCP - targetPort: 55681 - - name: otlp-grpc # Default endpoint for OpenTelemetry receiver. - port: 4317 - protocol: TCP - targetPort: 4317 - - name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver - port: 14250 - - name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver. - port: 14268 - - name: zipkin # Default endpoint for Zipkin receiver. - port: 9411 - - name: metrics # Default endpoint for querying metrics. - port: 8888 - selector: - component: otel-collector-metrics \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml deleted file mode 100644 index c1de1ba3a6..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: otel-collector-conf - labels: - app: opentelemetry - component: otel-collector-conf -data: - otel-collector-config: | - receivers: - otlp/spanmetrics: - protocols: - grpc: - endpoint: "localhost:12345" - otlp: - protocols: - grpc: - http: - jaeger: - protocols: - grpc: - thrift_http: - processors: - batch: - send_batch_size: 1000 - timeout: 10s - signozspanmetrics/prometheus: - metrics_exporter: prometheus - latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - memory_limiter: - # Same as --mem-ballast-size-mib CLI argument - ballast_size_mib: 683 - # 80% of maximum memory up to 2G - limit_mib: 1500 - # 25% of limit up to 2G - spike_limit_mib: 512 - check_interval: 5s - # queued_retry: - # num_workers: 4 - # queue_size: 100 - # retry_on_failure: true - extensions: - health_check: {} - zpages: {} - exporters: - clickhouse: - datasource: tcp://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password - clickhousemetricswrite: - endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password - resource_to_telemetry_conversion: - enabled: true - prometheus: - endpoint: "0.0.0.0:8889" - service: - extensions: [health_check, zpages] - pipelines: - traces: - receivers: [jaeger, otlp] - processors: [signozspanmetrics/prometheus, batch] - exporters: [clickhouse] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [clickhousemetricswrite] - metrics/spanmetrics: - receivers: [otlp/spanmetrics] - exporters: [prometheus] \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml deleted file mode 100644 index 52d3313e0e..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml +++ /dev/null @@ -1,73 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-collector - labels: - app: opentelemetry - component: otel-collector -spec: - selector: - matchLabels: - app: opentelemetry - component: otel-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 - replicas: 1 #TODO - adjust this to your own requirements - template: - metadata: - labels: - app: opentelemetry - component: otel-collector - spec: - containers: - - command: - - "/otelcontribcol" - - "--config=/conf/otel-collector-config.yaml" -# Memory Ballast size should be max 1/3 to 1/2 of memory. - - "--mem-ballast-size-mib=683" - image: signoz/otelcontribcol:0.4.2 - name: otel-collector - resources: - limits: - cpu: 1 - memory: 2Gi - requests: - cpu: 200m - memory: 400Mi - ports: - - containerPort: 55679 # Default endpoint for ZPages. - - containerPort: 55680 # Default endpoint for OpenTelemetry receiver. - - containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver. - - containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver. - - containerPort: 14250 # Default endpoint for Jaeger GRPC receiver. - - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. - - containerPort: 9411 # Default endpoint for Zipkin receiver. - - containerPort: 8888 # Default endpoint for querying metrics. - - containerPort: 8889 # Default endpoint for prometheus exported metrics. - volumeMounts: - - name: otel-collector-config-vol - mountPath: /conf -# - name: otel-collector-secrets -# mountPath: /secrets - livenessProbe: - httpGet: - path: / - port: 13133 # Health Check extension default port. - readinessProbe: - httpGet: - path: / - port: 13133 # Health Check extension default port. - volumes: - - configMap: - name: otel-collector-conf - items: - - key: otel-collector-config - path: otel-collector-config.yaml - name: otel-collector-config-vol -# - secret: -# name: otel-collector-secrets -# items: -# - key: cert.pem -# path: cert.pem -# - key: key.pem -# path: key.pem diff --git a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml deleted file mode 100644 index f64303b29b..0000000000 --- a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: otel-collector - labels: - app: opentelemetry - component: otel-collector -spec: - ports: - - name: otlp # Default endpoint for OpenTelemetry receiver. - port: 55680 - protocol: TCP - targetPort: 55680 - - name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver. - port: 55681 - protocol: TCP - targetPort: 55681 - - name: otlp-grpc # Default endpoint for OpenTelemetry receiver. - port: 4317 - protocol: TCP - targetPort: 4317 - - name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver - port: 14250 - - name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver. - port: 14268 - - name: zipkin # Default endpoint for Zipkin receiver. - port: 9411 - - name: metrics # Default endpoint for querying metrics. - port: 8888 - - name: prometheus-metrics # Default endpoint for querying prometheus metrics. - port: 8889 - selector: - component: otel-collector \ No newline at end of file diff --git a/deploy/kubernetes/platform/values.yaml b/deploy/kubernetes/platform/values.yaml deleted file mode 100644 index e37ea7a5d9..0000000000 --- a/deploy/kubernetes/platform/values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -zookeeper: - autopurge: - purgeInterval: 1 - -query-service: - configVars: - ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password - STORAGE: clickhouse - TELEMETRY_ENABLED: true - -cloud: aws - -clickhouseOperator: - enabled: true - storage: 20Gi - serviceType: ClusterIP From 821b80acdef0e41f07444e21ed14341565aa7a4e Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Tue, 8 Feb 2022 16:37:06 +0530 Subject: [PATCH 56/81] chore: external address query is updated (#685) --- frontend/src/container/MetricsApplication/Tabs/External.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/MetricsApplication/Tabs/External.tsx b/frontend/src/container/MetricsApplication/Tabs/External.tsx index 092779541c..8e159ce8b7 100644 --- a/frontend/src/container/MetricsApplication/Tabs/External.tsx +++ b/frontend/src/container/MetricsApplication/Tabs/External.tsx @@ -81,7 +81,7 @@ const External = ({ getWidget }: ExternalProps): JSX.Element => { fullViewOptions={false} widget={getWidget([ { - query: `sum(rate(signoz_external_call_latency_sum{service_name="${servicename}"}[5m])/rate(signoz_external_call_latency_count{service_name="${servicename}"}[5m])) by (http_url)`, + query: `(sum(rate(signoz_external_call_latency_sum{service_name="${servicename}"}[5m])) by (http_url))/(sum(rate(signoz_external_call_latency_count{service_name="${servicename}"}[5m])) by (http_url))`, legend: '{{http_url}}', }, ])} From f74467e33cf4ec48d8ebacc4c8f615bc3e388d10 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Tue, 8 Feb 2022 17:45:40 +0530 Subject: [PATCH 57/81] fix: exclude operation in trace APIs (#682) --- .../app/clickhouseReader/reader.go | 24 +++++++++++++++---- pkg/query-service/constants/constants.go | 17 ++++++------- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 184127c2dc..ba20131876 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1356,6 +1356,10 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode var query string excludeMap := make(map[string]struct{}) for _, e := range queryParams.Exclude { + if e == constants.OperationRequest { + excludeMap[constants.OperationDB] = struct{}{} + continue + } excludeMap[e] = struct{}{} } @@ -1382,7 +1386,7 @@ func (r *ClickHouseReader) GetSpanFilters(ctx context.Context, queryParams *mode args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args) } if len(queryParams.MinDuration) != 0 { @@ -1599,6 +1603,10 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo excludeMap := make(map[string]struct{}) for _, e := range queryParams.Exclude { + if e == constants.OperationRequest { + excludeMap[constants.OperationDB] = struct{}{} + continue + } excludeMap[e] = struct{}{} } @@ -1626,7 +1634,7 @@ func (r *ClickHouseReader) GetFilteredSpans(ctx context.Context, queryParams *mo args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args) } if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" @@ -1746,6 +1754,10 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model excludeMap := make(map[string]struct{}) for _, e := range queryParams.Exclude { + if e == constants.OperationRequest { + excludeMap[constants.OperationDB] = struct{}{} + continue + } excludeMap[e] = struct{}{} } @@ -1773,7 +1785,7 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args) } if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" @@ -2261,6 +2273,10 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query excludeMap := make(map[string]struct{}) for _, e := range queryParams.Exclude { + if e == constants.OperationRequest { + excludeMap[constants.OperationDB] = struct{}{} + continue + } excludeMap[e] = struct{}{} } @@ -2352,7 +2368,7 @@ func (r *ClickHouseReader) GetFilteredSpansAggregates(ctx context.Context, query args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Component, constants.Component, &query, args) } if len(queryParams.Operation) > 0 { - args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.Operation, &query, args) + args = buildFilterArrayQuery(ctx, excludeMap, queryParams.Operation, constants.OperationDB, &query, args) } if len(queryParams.MinDuration) != 0 { query = query + " AND durationNano >= ?" diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 4f150f358e..3339432513 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -27,12 +27,13 @@ const ALERTMANAGER_API_PREFIX = "http://alertmanager:9093/api/" const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db" const ( - ServiceName = "serviceName" - HttpRoute = "httpRoute" - HttpCode = "httpCode" - HttpHost = "httpHost" - HttpUrl = "httpUrl" - HttpMethod = "httpMethod" - Component = "component" - Operation = "name" + ServiceName = "serviceName" + HttpRoute = "httpRoute" + HttpCode = "httpCode" + HttpHost = "httpHost" + HttpUrl = "httpUrl" + HttpMethod = "httpMethod" + Component = "component" + OperationDB = "name" + OperationRequest = "operation" ) From 6342e1cebc1b219a54d2734ca970823545fb455c Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 8 Feb 2022 17:50:02 +0530 Subject: [PATCH 58/81] =?UTF-8?q?docs(deploy):=20=F0=9F=93=9D=20Add=20READ?= =?UTF-8?q?ME=20docs=20for=20deploy=20(#669)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- deploy/README.md | 88 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 deploy/README.md diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 0000000000..cbd7d8a63a --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,88 @@ +# Deploy + +Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz) +and currently are in `signoz/deploy` folder. + +## Docker + +If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/) +to set up docker before proceeding with the next steps. + +### Using Install Script + +Now run the following command to install: + +```sh +./install.sh +``` + +### Using Docker Compose + +If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/) +to set up docker compose before proceeding with the next steps. + +For x86 chip (amd): + +```sh +docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d +``` + +For Mac with Apple chip (arm): + +```sh +docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d +``` + +Open http://localhost:3000 in your favourite browser. In couple of minutes, you should see +the data generated from hotrod in SigNoz UI. + +## Kubernetes + +### Using Helm + +#### Bring up SigNoz cluster + +```sh +helm repo add signoz https://charts.signoz.io + +kubectl create ns platform + +helm -n platform install my-release signoz/signoz +``` + +To access the UI, you can `port-forward` the frontend service: + +```sh +kubectl -n platform port-forward svc/my-release-frontend 3000:3000 +``` + +Open http://localhost:3000 in your favourite browser. Few minutes after you generate load +from the HotROD application, you should see the data generated from hotrod in SigNoz UI. + +#### Test HotROD application with SigNoz + +```sh +kubectl create ns sample-application + +kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml +``` + +To generate load: + +```sh +kubectl -n sample-application run strzal --image=djbingham/curl \ +--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ +'locust_count=6' -F 'hatch_rate=2' http://locust-master:8089/swarm +``` + +To stop load: + +```sh +kubectl -n sample-application run strzal --image=djbingham/curl \ + --restart='OnFailure' -i --tty --rm --command -- curl \ + http://locust-master:8089/stop +``` + +## Uninstall/Troubleshoot? + +Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more. From d22d1d1c3bc04b8f37fd204beb7d021a6d16037a Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 8 Feb 2022 22:47:06 +0530 Subject: [PATCH 59/81] =?UTF-8?q?refactor(ports):=20=F0=9F=92=A5=20avoid?= =?UTF-8?q?=20exposing=20unnecessary=20ports=20and=20update=20frontend=20p?= =?UTF-8?q?ort=20to=203301=20(#679)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor(compose-yaml): ♻️ avoid unused and unnecessary ports mapping from compose files Signed-off-by: Prashant Shahi * refactor(frontend): 💥 change frontend port to 3301 BREAKING CHANGE: Signed-off-by: Prashant Shahi --- CONTRIBUTING.md | 4 +-- .../clickhouse-setup/docker-compose.yaml | 2 +- deploy/docker-swarm/common/nginx-config.conf | 2 +- .../clickhouse-setup/docker-compose.arm.yaml | 27 +---------------- .../clickhouse-setup/docker-compose.yaml | 29 +------------------ deploy/docker/common/nginx-config.conf | 2 +- .../docker-compose-tiny.yaml | 9 ++---- .../druid-kafka-setup/docker-compose.yaml | 9 ++---- deploy/install.sh | 6 ++-- frontend/Dockerfile | 2 +- frontend/README.md | 2 +- frontend/conf/default.conf | 2 +- frontend/docker-compose.yml | 2 +- frontend/webpack.config.js | 2 +- .../app/clickhouseReader/reader.go | 2 +- 15 files changed, 22 insertions(+), 80 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b215b9d478..4c0e474b3a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ If you don't want to install SigNoz backend just for doing frontend development, - `yarn install` - `yarn dev` -**_Frontend should now be accessible at `http://localhost:3000/application`_** +**_Frontend should now be accessible at `http://localhost:3301/application`_** # Contribute to Query-Service @@ -69,7 +69,7 @@ Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/char - [minikube](https://minikube.sigs.k8s.io/docs/start/) - create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster - run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart -- run `kubectl -n platform port-forward svc/my-release-frontend 3000:3000` to make SigNoz UI available at [localhost:3000](http://localhost:3000) +- run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301) **To load data with HotROD sample app:** diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 66afd79142..7313a96251 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -50,7 +50,7 @@ services: links: - "query-service" ports: - - "3000:3000" + - "3301:3301" volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf diff --git a/deploy/docker-swarm/common/nginx-config.conf b/deploy/docker-swarm/common/nginx-config.conf index 881cdc77cb..f059d3972e 100644 --- a/deploy/docker-swarm/common/nginx-config.conf +++ b/deploy/docker-swarm/common/nginx-config.conf @@ -1,5 +1,5 @@ server { - listen 3000; + listen 3301; server_name _; gzip on; diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml index 6f7a8912a7..cd68ead27f 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -3,12 +3,6 @@ version: "2.4" services: clickhouse: image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm - expose: - - 8123 - - 9000 - ports: - - 9001:9000 - - 8123:8123 volumes: - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./data/clickhouse/:/var/lib/clickhouse/ @@ -27,15 +21,11 @@ services: command: - '--config.file=/prometheus/alertmanager.yml' - '--storage.path=/data' - ports: - - 9093:9093 query-service: image: signoz/query-service:0.5.4 container_name: query-service command: ["-config=/root/config/prometheus.yml"] - ports: - - "8080:8080" volumes: - ./prometheus.yml:/root/config/prometheus.yml - ../dashboards:/root/config/dashboards @@ -54,10 +44,8 @@ services: container_name: frontend depends_on: - query-service - links: - - "query-service" ports: - - "3000:3000" + - "3301:3301" volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf @@ -67,16 +55,7 @@ services: volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml ports: - - "1777:1777" # pprof extension - - "8887:8888" # Prometheus metrics exposed by the agent - - "14268:14268" # Jaeger receiver - - "55678" # OpenCensus receiver - - "55680:55680" # OTLP HTTP/2.0 legacy port - - "55681:55681" # OTLP HTTP/1.0 receiver - "4317:4317" # OTLP GRPC receiver - - "55679:55679" # zpages extension - - "13133" # health_check - - "8889:8889" # prometheus exporter mem_limit: 2000m restart: always depends_on: @@ -95,8 +74,6 @@ services: hotrod: image: jaegertracing/example-hotrod:1.30 container_name: hotrod - ports: - - "9000:8080" logging: options: max-size: 50m @@ -109,8 +86,6 @@ services: image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" container_name: load-hotrod hostname: load-hotrod - ports: - - "8089:8089" environment: ATTACKED_HOST: http://hotrod:8080 LOCUST_MODE: standalone diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 8621c77e12..e7c8b40ee3 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -3,16 +3,9 @@ version: "2.4" services: clickhouse: image: yandex/clickhouse-server:21.12.3.32 - expose: - - 8123 - - 9000 - ports: - - 9001:9000 - - 8123:8123 volumes: - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./data/clickhouse/:/var/lib/clickhouse/ - healthcheck: # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] @@ -28,15 +21,11 @@ services: command: - '--config.file=/prometheus/alertmanager.yml' - '--storage.path=/data' - ports: - - 9093:9093 query-service: image: signoz/query-service:0.5.4 container_name: query-service command: ["-config=/root/config/prometheus.yml"] - ports: - - "8080:8080" volumes: - ./prometheus.yml:/root/config/prometheus.yml - ../dashboards:/root/config/dashboards @@ -46,7 +35,6 @@ services: - STORAGE=clickhouse - GODEBUG=netdns=go - TELEMETRY_ENABLED=true - depends_on: clickhouse: condition: service_healthy @@ -56,10 +44,8 @@ services: container_name: frontend depends_on: - query-service - links: - - "query-service" ports: - - "3000:3000" + - "3301:3301" volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf @@ -69,16 +55,7 @@ services: volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml ports: - - "1777:1777" # pprof extension - - "8887:8888" # Prometheus metrics exposed by the agent - - "14268:14268" # Jaeger receiver - - "55678" # OpenCensus receiver - - "55680:55680" # OTLP HTTP/2.0 legacy port - - "55681:55681" # OTLP HTTP/1.0 receiver - "4317:4317" # OTLP GRPC receiver - - "55679:55679" # zpages extension - - "13133" # health_check - - "8889:8889" # prometheus exporter mem_limit: 2000m restart: always depends_on: @@ -97,8 +74,6 @@ services: hotrod: image: jaegertracing/example-hotrod:1.30 container_name: hotrod - ports: - - "9000:8080" logging: options: max-size: 50m @@ -111,8 +86,6 @@ services: image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" container_name: load-hotrod hostname: load-hotrod - ports: - - "8089:8089" environment: ATTACKED_HOST: http://hotrod:8080 LOCUST_MODE: standalone diff --git a/deploy/docker/common/nginx-config.conf b/deploy/docker/common/nginx-config.conf index dd5eac0c83..3c7a9db8f0 100644 --- a/deploy/docker/common/nginx-config.conf +++ b/deploy/docker/common/nginx-config.conf @@ -1,5 +1,5 @@ server { - listen 3000; + listen 3301; server_name _; gzip on; diff --git a/deploy/docker/druid-kafka-setup/docker-compose-tiny.yaml b/deploy/docker/druid-kafka-setup/docker-compose-tiny.yaml index 5e4e82af1f..20ce14e822 100644 --- a/deploy/docker/druid-kafka-setup/docker-compose-tiny.yaml +++ b/deploy/docker/druid-kafka-setup/docker-compose-tiny.yaml @@ -167,7 +167,8 @@ services: container_name: query-service depends_on: - - router + router: + condition: service_healthy ports: - "8080:8080" volumes: @@ -180,10 +181,6 @@ services: - POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w - GODEBUG=netdns=go - depends_on: - router: - condition: service_healthy - frontend: image: signoz/frontend:0.4.1 container_name: frontend @@ -193,7 +190,7 @@ services: links: - "query-service" ports: - - "3000:3000" + - "3301:3301" volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf diff --git a/deploy/docker/druid-kafka-setup/docker-compose.yaml b/deploy/docker/druid-kafka-setup/docker-compose.yaml index a4ccf66af0..c47823492f 100644 --- a/deploy/docker/druid-kafka-setup/docker-compose.yaml +++ b/deploy/docker/druid-kafka-setup/docker-compose.yaml @@ -162,7 +162,8 @@ services: container_name: query-service depends_on: - - router + router: + condition: service_healthy ports: - "8080:8080" @@ -176,10 +177,6 @@ services: - POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w - GODEBUG=netdns=go - depends_on: - router: - condition: service_healthy - frontend: image: signoz/frontend:0.4.1 container_name: frontend @@ -189,7 +186,7 @@ services: links: - "query-service" ports: - - "3000:3000" + - "3301:3301" volumes: - ./nginx-config.conf:/etc/nginx/conf.d/default.conf diff --git a/deploy/install.sh b/deploy/install.sh index 54bef5d067..2ee9983ff1 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -102,7 +102,7 @@ check_os() { # The script should error out in case they aren't available check_ports_occupied() { local port_check_output - local ports_pattern="80|3000|8080" + local ports_pattern="80|3301|8080" if is_mac; then port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')" @@ -225,7 +225,7 @@ wait_for_containers_start() { # The while loop is important because for-loops don't work for dynamic values while [[ $timeout -gt 0 ]]; do - status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)" + status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)" if [[ status_code -eq 200 ]]; then break else @@ -492,7 +492,7 @@ else echo "" echo "🟢 Your installation is complete!" echo "" - echo -e "🟢 Your frontend is running on http://localhost:3000" + echo -e "🟢 Your frontend is running on http://localhost:3301" echo "" if [ $setup_type == 'clickhouse' ]; then diff --git a/frontend/Dockerfile b/frontend/Dockerfile index ce3d7022ef..0fb029f5f3 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -32,6 +32,6 @@ RUN rm -rf /usr/share/nginx/html/* # Copy from the stahg 1 COPY --from=builder /frontend/build /usr/share/nginx/html -EXPOSE 3000 +EXPOSE 3301 ENTRYPOINT ["nginx", "-g", "daemon off;"] \ No newline at end of file diff --git a/frontend/README.md b/frontend/README.md index 34269bf73a..99a36710cb 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -44,7 +44,7 @@ In the project directory, you can run: ### `yarn start` Runs the app in the development mode.\ -Open [http://localhost:3000](http://localhost:3000) to view it in the browser. +Open [http://localhost:3301](http://localhost:3301) to view it in the browser. The page will reload if you make edits.\ You will also see any lint errors in the console. diff --git a/frontend/conf/default.conf b/frontend/conf/default.conf index ec9fb99d70..dc0475eaf4 100644 --- a/frontend/conf/default.conf +++ b/frontend/conf/default.conf @@ -1,5 +1,5 @@ server { - listen 3000; + listen 3301; server_name _; gzip on; diff --git a/frontend/docker-compose.yml b/frontend/docker-compose.yml index 2a451ded94..8bc085de40 100644 --- a/frontend/docker-compose.yml +++ b/frontend/docker-compose.yml @@ -4,4 +4,4 @@ services: build: . image: signoz/frontend:latest ports: - - "3000:3000" + - "3301:3301" diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js index 4e27524146..3c2b8488fe 100644 --- a/frontend/webpack.config.js +++ b/frontend/webpack.config.js @@ -35,7 +35,7 @@ const config = { open: true, hot: true, liveReload: true, - port: portFinderSync.getPort(3000), + port: portFinderSync.getPort(3301), static: { directory: resolve(__dirname, 'public'), publicPath: '/', diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index ba20131876..ab124de5e9 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -168,7 +168,7 @@ func (r *ClickHouseReader) Start() { notifier := notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) // notifier.ApplyConfig(conf) - ExternalURL, err := computeExternalURL("", "0.0.0.0:3000") + ExternalURL, err := computeExternalURL("", "0.0.0.0:3301") if err != nil { fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", ExternalURL.String())) os.Exit(2) From 1acf009e628d07e960177783bc74cb1534088d15 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 8 Feb 2022 23:14:36 +0530 Subject: [PATCH 60/81] =?UTF-8?q?docs:=20=F0=9F=93=9D=20use=203301=20for?= =?UTF-8?q?=20frontend=20port=20in=20README?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deploy/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/README.md b/deploy/README.md index cbd7d8a63a..a67aa0b168 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -33,7 +33,7 @@ For Mac with Apple chip (arm): docker-compose -f docker/clickhouse-setup/docker-compose.arm.yaml up -d ``` -Open http://localhost:3000 in your favourite browser. In couple of minutes, you should see +Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see the data generated from hotrod in SigNoz UI. ## Kubernetes @@ -53,10 +53,10 @@ helm -n platform install my-release signoz/signoz To access the UI, you can `port-forward` the frontend service: ```sh -kubectl -n platform port-forward svc/my-release-frontend 3000:3000 +kubectl -n platform port-forward svc/my-release-frontend 3301:3301 ``` -Open http://localhost:3000 in your favourite browser. Few minutes after you generate load +Open http://localhost:3301 in your favourite browser. Few minutes after you generate load from the HotROD application, you should see the data generated from hotrod in SigNoz UI. #### Test HotROD application with SigNoz From 2de65748357dc24d1616635d3cca076ef39030b0 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Tue, 8 Feb 2022 23:29:26 +0530 Subject: [PATCH 61/81] =?UTF-8?q?chore(k3s):=20=F0=9F=A9=B9=20set=20up=20h?= =?UTF-8?q?otrod=20at=20start?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/e2e-k3s.yaml | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml index 132139615b..16b02cc943 100644 --- a/.github/workflows/e2e-k3s.yaml +++ b/.github/workflows/e2e-k3s.yaml @@ -29,6 +29,17 @@ jobs: - name: Inject the images to the cluster run: k3d image import signoz/query-service:$DOCKER_TAG signoz/frontend:$DOCKER_TAG -c signoz + - name: Set up HotROD sample-app + run: | + # create sample-application namespace + kubectl create ns sample-application + + # apply hotrod k8s manifest file + kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml + + # wait for all deployments in sample-application namespace to be READY + kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s + - name: Deploy the app run: | # add signoz helm repository @@ -54,15 +65,6 @@ jobs: - name: Kick off a sample-app workload run: | - # create sample-application namespace - kubectl create ns sample-application - - # apply hotrod k8s manifest file - kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml - - # wait for all deployments in sample-application namespace to be READY - kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s - # start the locust swarm kubectl -n sample-application run strzal --image=djbingham/curl \ --restart='OnFailure' -i --rm --command -- curl -X POST -F \ From be8ec756c621f1a260011b8c036077686cda3e37 Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Wed, 9 Feb 2022 11:31:13 +0530 Subject: [PATCH 62/81] Feat (UI) :Trace Filter page is updated (#684) * dayjs and less loader is added * webpack config is added * moment is removed * useDebounceFunction hook is made * old components and reducer is removed * search is updated * changes are upadted for the trace page as skeleton is ready * chore: method is change from dayjs * convertObject into params is updated * initial filters are updated * initial and final filter issue is fixed * selection of the filter is updated * filters are now able to selected * checkbox disable when loading is in progress * chore: getFilter filename is updated * feat: clear all and exapanded filter is updated * chore: clearAll and expand panel is updated * feat: useClickOutSide hook is added * chore: get filter url becomes encoded * chore: get tag filters is added * feat: search tags is wip * bug: global max,min on change bug is resolved * chore: getInitial filter is updated * chore: expand panel is updated * chore: get filter is updated * chore: code smells is updated * feat: loader is added in the panel header to show the loading * chore: search tags in wip * chore: button style is updated * chore: search in wip * chore: search ui is updated from the global state * chore: search in wip * chore: search is updated * chore: getSpansAggregate section is updated * useOutside click is updated * useclickoutside hook is updated * useclickoutside hook is updated * parsing is updated * initial filter is updated * feat: trace table is updated * chore: trace table is updated * chore: useClickout side is updated for the search panel * feat: unneccesary re-render and code is removed * chore: trace table is updated * custom component is removed and used antd search component * error state is updated over search component * chore: search bar is updated * chore: left panel search and table component connection is updated * chore: trace filter config is updated * chore: for graph reducer is updated * chore: graph is updated * chore: table is updated * chore: spans is updated * chore: reducer is updated * chore: graph component is updated * chore: number of graph condition is updated * chore: input and range slider is now sync * chore: duration is updated * chore: clearAllFilter is updated * chore: duration slider is updated * chore: duration is updated and panel body loading is updated * chore: slider container is added to add padding from left to right * chore: Select filter is updated * chore: duration filter is updated * chore: Divider is added * chore: none option is added in both the dropdown * chore: icon are updated * chore: added padding in the pages component * chore: none is updated * chore: antd notification is added in the redux action * chore: some of the changes are updated * chore: display value is updated for the filter panel heading * chore: calulation is memorised * chore: utils function are updated in trace reducer * chore: getFilters are updated * tracetable is updated * chore: actions is updated * chore: metrics application is updated * chore: search on clear action is updated * chore: serviceName panel position is updated * chore: added the label in the duration * bug: edge case is fixed * chore: some more changes are updated * chore: some more changes are updated * chore: clear all is fixed * chore: panel heading caret is updated * chore: checkbox is updated * chore: isError handler is updated over initial render * chore: traces is updated * fix: tag search is updated * chore: loading is added in the trace table and soring is introduced in the trace table * bug: multiple render for the key is fixed * Bug(UI): new suggestion is updated * feat: isTraceFilterEnum function is made * bug: new changes are updated * chore: get Filter is updated * chore: application metrics params is updated * chore: error is added in the application metrics * chore: filters is updated * chore: expand panel edge case is updated * chore: expand panel is updated and utls: updateUrl function is updated * chore: reset trace state when unmounted * chore: getFilter action is updated * chore: api duration is updated * chore: useEffect dependency is updated * chore: filter is updated with the new arch * bug: trace table issue is resolved * chore: application rps url is updated for trace * chore: duration filter is updated * chore: search key is updated * chore: filter is added in the search url * bug: filter is fixed * bug: filter is fixed * bug: filter is fixed * chore: reset trace data when unmounted * chore: TopEnd point is added * chore: getInitialSpanAggregate action is updated * chore: application url is updated * chore: no tags placeholder is updated * chore: flow from customer is now fixed * chore: search is updated * chore: select all button is removed * chore: prev filter is removed to show the result * chore: config is updated * chore: checkbox component is updated * chore: span filter is updated * chore: graph issue is resolved * chore: selected is updated * chore: all filter are selected * feat: new trace page is updated * chore: utils is updated * feat: trace filter page is updated * chore: duration is now fixed * chore: duration clear filter is added * chore: onClickCheck is updated * chore: trace filter page is updated * bug: some of bugs are resolved * chore: duration body is updated * chore: topEndPoint and application query is updated * chore: user selection is updated in the duration filter * chore: panel duration is updated * chore: panel duration is updated * chore: duration bug is solved * chore: function display value is updated --- frontend/package.json | 3 + frontend/src/AppRoutes/pageComponents.ts | 2 +- frontend/src/api/alerts/getGroup.ts | 5 +- frontend/src/api/trace/getFilters.ts | 48 +++ frontend/src/api/trace/getServiceList.ts | 24 -- frontend/src/api/trace/getServiceOperation.ts | 24 -- frontend/src/api/trace/getSpan.ts | 26 -- frontend/src/api/trace/getSpanAggregate.ts | 26 -- frontend/src/api/trace/getSpans.ts | 59 +++ frontend/src/api/trace/getSpansAggregate.ts | 60 +++ frontend/src/api/trace/getTagFilter.ts | 38 ++ frontend/src/api/trace/getTags.ts | 24 -- frontend/src/components/DatePicker/index.tsx | 7 + frontend/src/components/Graph/index.tsx | 2 +- .../Header/CustomDateTimeModal/index.tsx | 12 +- .../Header/DateTimeSelection/index.tsx | 55 +-- .../MetricsApplication/Tabs/Application.tsx | 19 +- .../MetricsApplication/TopEndpointsTable.tsx | 10 +- .../Panel/PanelBody/Common/Checkbox.tsx | 191 +++++++++ .../Filters/Panel/PanelBody/Common/styles.ts | 11 + .../Panel/PanelBody/CommonCheckBox/index.tsx | 36 ++ .../Panel/PanelBody/Duration/index.tsx | 212 ++++++++++ .../Panel/PanelBody/Duration/styles.ts | 27 ++ .../Trace/Filters/Panel/PanelBody/index.tsx | 37 ++ .../Trace/Filters/Panel/PanelBody/styles.ts | 3 + .../Filters/Panel/PanelHeading/index.tsx | 317 ++++++++++++++ .../Filters/Panel/PanelHeading/styles.ts | 49 +++ .../container/Trace/Filters/Panel/index.tsx | 28 ++ .../src/container/Trace/Filters/index.tsx | 27 ++ .../src/container/Trace/Filters/styles.ts | 20 + frontend/src/container/Trace/Graph/config.ts | 123 ++++++ frontend/src/container/Trace/Graph/index.tsx | 48 +++ frontend/src/container/Trace/Graph/styles.ts | 19 + .../Trace/Search/AllTags/Tag/TagKey.tsx | 102 +++++ .../Trace/Search/AllTags/Tag/index.tsx | 130 ++++++ .../Trace/Search/AllTags/Tag/styles.ts | 39 ++ .../container/Trace/Search/AllTags/index.tsx | 158 +++++++ .../container/Trace/Search/AllTags/styles.ts | 56 +++ frontend/src/container/Trace/Search/index.tsx | 163 ++++++++ frontend/src/container/Trace/Search/styles.ts | 17 + frontend/src/container/Trace/Search/util.ts | 84 ++++ .../Trace/TraceGraphFilter/config.ts | 91 ++++ .../Trace/TraceGraphFilter/index.tsx | 85 ++++ .../Trace/TraceGraphFilter/styles.ts | 9 + .../src/container/Trace/TraceTable/index.tsx | 155 +++++++ .../TraceCustomGraph.tsx | 44 -- .../TraceCustomVisualization/config.ts | 56 --- .../TraceCustomVisualization/index.tsx | 127 ------ .../TraceCustomVisualization/styles.ts | 34 -- frontend/src/container/TraceFilter/Filter.tsx | 182 -------- .../src/container/TraceFilter/LatencyForm.tsx | 160 ------- frontend/src/container/TraceFilter/config.ts | 15 - frontend/src/container/TraceFilter/index.tsx | 390 ------------------ frontend/src/container/TraceFilter/styles.ts | 34 -- frontend/src/container/TraceList/index.tsx | 141 ------- frontend/src/container/TraceList/styles.ts | 7 - frontend/src/hooks/useClickOutside.ts | 26 ++ frontend/src/hooks/useDebouncedFunction.ts | 34 ++ .../src/lib/query/convertObjectIntoParams.ts | 15 + frontend/src/pages/Trace/index.tsx | 157 +++++++ frontend/src/pages/Trace/styles.ts | 37 ++ frontend/src/pages/TraceDetails/index.tsx | 76 ---- .../src/store/actions/trace/getInitialData.ts | 201 --------- .../store/actions/trace/getInitialFilter.ts | 173 ++++++++ .../actions/trace/getInitialSpansAggregate.ts | 115 ++++++ frontend/src/store/actions/trace/getSpans.ts | 96 +++++ .../actions/trace/getTraceVisualAgrregates.ts | 92 ----- frontend/src/store/actions/trace/index.ts | 9 - .../store/actions/trace/loadingCompleted.ts | 12 - .../actions/trace/parseFilter/current.ts | 36 ++ .../store/actions/trace/parseFilter/filter.ts | 43 ++ .../trace/parseFilter/filterToFetchData.ts | 37 ++ .../store/actions/trace/parseFilter/index.ts | 8 + .../trace/parseFilter/isFilterExclude.ts | 44 ++ .../actions/trace/parseFilter/minMaxTime.ts | 20 + .../trace/parseFilter/selectedFilter.ts | 43 ++ .../actions/trace/parseFilter/selectedTags.ts | 37 ++ .../trace/parseFilter/skippedSelected.ts | 33 ++ .../store/actions/trace/resetTraceDetails.ts | 10 - .../store/actions/trace/selectTraceFilter.ts | 50 +++ .../store/actions/trace/updateIsTagsError.ts | 17 + .../actions/trace/updateSelectedAggOption.ts | 16 - .../store/actions/trace/updateSelectedData.ts | 164 -------- .../actions/trace/updateSelectedEntity.ts | 16 - .../store/actions/trace/updateSelectedKind.ts | 16 - .../actions/trace/updateSelectedLatency.ts | 16 - .../actions/trace/updateSelectedOperation.ts | 16 - .../actions/trace/updateSelectedService.ts | 16 - .../store/actions/trace/updateSelectedTags.ts | 94 ----- .../store/actions/trace/updateSpanLoading.ts | 16 - .../actions/trace/updateTagPanelVisiblity.ts | 17 + .../store/actions/trace/updateTagsSelected.ts | 17 + frontend/src/store/actions/trace/util.ts | 79 ++++ frontend/src/store/reducers/index.ts | 7 +- frontend/src/store/reducers/trace.ts | 301 +++++++------- frontend/src/types/actions/trace.ts | 251 +++++------ frontend/src/types/api/trace/getFilters.ts | 15 + .../src/types/api/trace/getServiceList.ts | 1 - .../types/api/trace/getServiceOperation.ts | 5 - .../src/types/api/trace/getSpanAggregate.ts | 26 +- frontend/src/types/api/trace/getSpans.ts | 67 +-- frontend/src/types/api/trace/getTagFilters.ts | 13 + frontend/src/types/api/trace/getTags.ts | 10 - frontend/src/types/reducer/trace.ts | 138 +++++-- frontend/webpack.config.js | 19 + frontend/webpack.config.prod.js | 19 + frontend/yarn.lock | 92 ++++- 107 files changed, 4206 insertions(+), 2533 deletions(-) create mode 100644 frontend/src/api/trace/getFilters.ts delete mode 100644 frontend/src/api/trace/getServiceList.ts delete mode 100644 frontend/src/api/trace/getServiceOperation.ts delete mode 100644 frontend/src/api/trace/getSpan.ts delete mode 100644 frontend/src/api/trace/getSpanAggregate.ts create mode 100644 frontend/src/api/trace/getSpans.ts create mode 100644 frontend/src/api/trace/getSpansAggregate.ts create mode 100644 frontend/src/api/trace/getTagFilter.ts delete mode 100644 frontend/src/api/trace/getTags.ts create mode 100644 frontend/src/components/DatePicker/index.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/Common/Checkbox.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/Common/styles.ts create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/CommonCheckBox/index.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/styles.ts create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/index.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelBody/styles.ts create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelHeading/index.tsx create mode 100644 frontend/src/container/Trace/Filters/Panel/PanelHeading/styles.ts create mode 100644 frontend/src/container/Trace/Filters/Panel/index.tsx create mode 100644 frontend/src/container/Trace/Filters/index.tsx create mode 100644 frontend/src/container/Trace/Filters/styles.ts create mode 100644 frontend/src/container/Trace/Graph/config.ts create mode 100644 frontend/src/container/Trace/Graph/index.tsx create mode 100644 frontend/src/container/Trace/Graph/styles.ts create mode 100644 frontend/src/container/Trace/Search/AllTags/Tag/TagKey.tsx create mode 100644 frontend/src/container/Trace/Search/AllTags/Tag/index.tsx create mode 100644 frontend/src/container/Trace/Search/AllTags/Tag/styles.ts create mode 100644 frontend/src/container/Trace/Search/AllTags/index.tsx create mode 100644 frontend/src/container/Trace/Search/AllTags/styles.ts create mode 100644 frontend/src/container/Trace/Search/index.tsx create mode 100644 frontend/src/container/Trace/Search/styles.ts create mode 100644 frontend/src/container/Trace/Search/util.ts create mode 100644 frontend/src/container/Trace/TraceGraphFilter/config.ts create mode 100644 frontend/src/container/Trace/TraceGraphFilter/index.tsx create mode 100644 frontend/src/container/Trace/TraceGraphFilter/styles.ts create mode 100644 frontend/src/container/Trace/TraceTable/index.tsx delete mode 100644 frontend/src/container/TraceCustomVisualization/TraceCustomGraph.tsx delete mode 100644 frontend/src/container/TraceCustomVisualization/config.ts delete mode 100644 frontend/src/container/TraceCustomVisualization/index.tsx delete mode 100644 frontend/src/container/TraceCustomVisualization/styles.ts delete mode 100644 frontend/src/container/TraceFilter/Filter.tsx delete mode 100644 frontend/src/container/TraceFilter/LatencyForm.tsx delete mode 100644 frontend/src/container/TraceFilter/config.ts delete mode 100644 frontend/src/container/TraceFilter/index.tsx delete mode 100644 frontend/src/container/TraceFilter/styles.ts delete mode 100644 frontend/src/container/TraceList/index.tsx delete mode 100644 frontend/src/container/TraceList/styles.ts create mode 100644 frontend/src/hooks/useClickOutside.ts create mode 100644 frontend/src/hooks/useDebouncedFunction.ts create mode 100644 frontend/src/lib/query/convertObjectIntoParams.ts create mode 100644 frontend/src/pages/Trace/index.tsx create mode 100644 frontend/src/pages/Trace/styles.ts delete mode 100644 frontend/src/pages/TraceDetails/index.tsx delete mode 100644 frontend/src/store/actions/trace/getInitialData.ts create mode 100644 frontend/src/store/actions/trace/getInitialFilter.ts create mode 100644 frontend/src/store/actions/trace/getInitialSpansAggregate.ts create mode 100644 frontend/src/store/actions/trace/getSpans.ts delete mode 100644 frontend/src/store/actions/trace/getTraceVisualAgrregates.ts delete mode 100644 frontend/src/store/actions/trace/index.ts delete mode 100644 frontend/src/store/actions/trace/loadingCompleted.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/current.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/filter.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/filterToFetchData.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/index.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/isFilterExclude.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/minMaxTime.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/selectedFilter.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/selectedTags.ts create mode 100644 frontend/src/store/actions/trace/parseFilter/skippedSelected.ts delete mode 100644 frontend/src/store/actions/trace/resetTraceDetails.ts create mode 100644 frontend/src/store/actions/trace/selectTraceFilter.ts create mode 100644 frontend/src/store/actions/trace/updateIsTagsError.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedAggOption.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedData.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedEntity.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedKind.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedLatency.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedOperation.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedService.ts delete mode 100644 frontend/src/store/actions/trace/updateSelectedTags.ts delete mode 100644 frontend/src/store/actions/trace/updateSpanLoading.ts create mode 100644 frontend/src/store/actions/trace/updateTagPanelVisiblity.ts create mode 100644 frontend/src/store/actions/trace/updateTagsSelected.ts create mode 100644 frontend/src/store/actions/trace/util.ts create mode 100644 frontend/src/types/api/trace/getFilters.ts delete mode 100644 frontend/src/types/api/trace/getServiceList.ts delete mode 100644 frontend/src/types/api/trace/getServiceOperation.ts create mode 100644 frontend/src/types/api/trace/getTagFilters.ts delete mode 100644 frontend/src/types/api/trace/getTags.ts diff --git a/frontend/package.json b/frontend/package.json index fbde581ba4..4eaf1f5fd8 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -42,11 +42,14 @@ "d3": "^6.2.0", "d3-flame-graph": "^3.1.1", "d3-tip": "^0.9.1", + "dayjs": "^1.10.7", "dotenv": "8.2.0", "file-loader": "6.1.1", "history": "4.10.1", "html-webpack-plugin": "5.1.0", "jest": "26.6.0", + "less": "^4.1.2", + "less-loader": "^10.2.0", "mini-css-extract-plugin": "2.4.5", "monaco-editor": "^0.30.0", "react": "17.0.0", diff --git a/frontend/src/AppRoutes/pageComponents.ts b/frontend/src/AppRoutes/pageComponents.ts index ee9095f014..feb99a7c24 100644 --- a/frontend/src/AppRoutes/pageComponents.ts +++ b/frontend/src/AppRoutes/pageComponents.ts @@ -19,7 +19,7 @@ export const ServiceMapPage = Loadable( ); export const TraceDetailPages = Loadable( - () => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/TraceDetails'), + () => import(/* webpackChunkName: "TraceDetailPage" */ 'pages/Trace'), ); export const TraceGraphPage = Loadable( diff --git a/frontend/src/api/alerts/getGroup.ts b/frontend/src/api/alerts/getGroup.ts index f1cb2c6d77..adfd0c18e6 100644 --- a/frontend/src/api/alerts/getGroup.ts +++ b/frontend/src/api/alerts/getGroup.ts @@ -3,14 +3,13 @@ import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; import { AxiosError } from 'axios'; import { ErrorResponse, SuccessResponse } from 'types/api'; import { PayloadProps, Props } from 'types/api/alerts/getGroups'; +import convertObjectIntoParams from 'lib/query/convertObjectIntoParams'; const getGroups = async ( props: Props, ): Promise | ErrorResponse> => { try { - const queryParams = Object.keys(props) - .map((e) => `${e}=${props[e]}`) - .join('&'); + const queryParams = convertObjectIntoParams(props); const response = await AxiosAlertManagerInstance.get( `/alerts/groups?${queryParams}`, diff --git a/frontend/src/api/trace/getFilters.ts b/frontend/src/api/trace/getFilters.ts new file mode 100644 index 0000000000..6fb484673f --- /dev/null +++ b/frontend/src/api/trace/getFilters.ts @@ -0,0 +1,48 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps, Props } from 'types/api/trace/getFilters'; +import omitBy from 'lodash-es/omitBy'; + +const getFilters = async ( + props: Props, +): Promise | ErrorResponse> => { + try { + const duration = + omitBy(props.other, (_, key) => !key.startsWith('duration')) || []; + + const nonDuration = omitBy(props.other, (_, key) => + key.startsWith('duration'), + ); + + const exclude: string[] = []; + + props.isFilterExclude.forEach((value, key) => { + if (value) { + exclude.push(key); + } + }); + + const response = await axios.post(`/getSpanFilters`, { + start: props.start, + end: props.end, + getFilters: props.getFilters, + ...nonDuration, + maxDuration: String((duration['duration'] || [])[0] || ''), + minDuration: String((duration['duration'] || [])[1] || ''), + exclude: exclude, + }); + + return { + statusCode: 200, + error: null, + message: 'Success', + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getFilters; diff --git a/frontend/src/api/trace/getServiceList.ts b/frontend/src/api/trace/getServiceList.ts deleted file mode 100644 index 0fc063e721..0000000000 --- a/frontend/src/api/trace/getServiceList.ts +++ /dev/null @@ -1,24 +0,0 @@ -import axios from 'api'; -import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; -import { AxiosError } from 'axios'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps } from 'types/api/trace/getServiceList'; - -const getServiceList = async (): Promise< - SuccessResponse | ErrorResponse -> => { - try { - const response = await axios.get('/services/list'); - - return { - statusCode: 200, - error: null, - message: 'Success', - payload: response.data, - }; - } catch (error) { - return ErrorResponseHandler(error as AxiosError); - } -}; - -export default getServiceList; diff --git a/frontend/src/api/trace/getServiceOperation.ts b/frontend/src/api/trace/getServiceOperation.ts deleted file mode 100644 index 04ee9c954a..0000000000 --- a/frontend/src/api/trace/getServiceOperation.ts +++ /dev/null @@ -1,24 +0,0 @@ -import axios from 'api'; -import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; -import { AxiosError } from 'axios'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps, Props } from 'types/api/trace/getServiceOperation'; - -const getServiceOperation = async ( - props: Props, -): Promise | ErrorResponse> => { - try { - const response = await axios.get(`/service/${props.service}/operations`); - - return { - statusCode: 200, - error: null, - message: 'Success', - payload: response.data, - }; - } catch (error) { - return ErrorResponseHandler(error as AxiosError); - } -}; - -export default getServiceOperation; diff --git a/frontend/src/api/trace/getSpan.ts b/frontend/src/api/trace/getSpan.ts deleted file mode 100644 index 6b36b23cdc..0000000000 --- a/frontend/src/api/trace/getSpan.ts +++ /dev/null @@ -1,26 +0,0 @@ -import axios from 'api'; -import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; -import { AxiosError } from 'axios'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps, Props } from 'types/api/trace/getSpans'; - -const getSpans = async ( - props: Props, -): Promise | ErrorResponse> => { - try { - const response = await axios.get( - `/spans?&start=${props.start}&end=${props.end}&kind=${props.kind}&lookback=${props.lookback}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&limit=${props.limit}&tags=${props.tags}`, - ); - - return { - statusCode: 200, - error: null, - message: 'Success', - payload: response.data, - }; - } catch (error) { - return ErrorResponseHandler(error as AxiosError); - } -}; - -export default getSpans; diff --git a/frontend/src/api/trace/getSpanAggregate.ts b/frontend/src/api/trace/getSpanAggregate.ts deleted file mode 100644 index d233dc25e5..0000000000 --- a/frontend/src/api/trace/getSpanAggregate.ts +++ /dev/null @@ -1,26 +0,0 @@ -import axios from 'api'; -import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; -import { AxiosError } from 'axios'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate'; - -const getSpansAggregate = async ( - props: Props, -): Promise | ErrorResponse> => { - try { - const response = await axios.get( - `/spans/aggregates?start=${props.start}&end=${props.end}&aggregation_option=${props.aggregation_option}&dimension=${props.dimension}&kind=${props.kind}&maxDuration=${props.maxDuration}&minDuration=${props.minDuration}&operation=${props.operation}&service=${props.service}&step=${props.step}&tags=${props.tags}`, - ); - - return { - statusCode: 200, - error: null, - message: 'Success', - payload: response.data, - }; - } catch (error) { - return ErrorResponseHandler(error as AxiosError); - } -}; - -export default getSpansAggregate; diff --git a/frontend/src/api/trace/getSpans.ts b/frontend/src/api/trace/getSpans.ts new file mode 100644 index 0000000000..8527eb2732 --- /dev/null +++ b/frontend/src/api/trace/getSpans.ts @@ -0,0 +1,59 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import omitBy from 'lodash-es/omitBy'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps, Props } from 'types/api/trace/getSpans'; + +const getSpans = async ( + props: Props, +): Promise | ErrorResponse> => { + try { + const updatedSelectedTags = props.selectedTags.map((e) => ({ + Key: e.Key[0], + Operator: e.Operator, + Values: e.Values, + })); + + const exclude: string[] = []; + + props.isFilterExclude.forEach((value, key) => { + if (value) { + exclude.push(key); + } + }); + + const other = Object.fromEntries(props.selectedFilter); + + const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || []; + + const nonDuration = omitBy(other, (_, key) => key.startsWith('duration')); + + const response = await axios.post( + `/getFilteredSpans/aggregates`, + { + start: String(props.start), + end: String(props.end), + function: props.function, + groupBy: props.groupBy, + step: props.step, + tags: updatedSelectedTags, + ...nonDuration, + maxDuration: String((duration['duration'] || [])[0] || ''), + minDuration: String((duration['duration'] || [])[1] || ''), + exclude, + }, + ); + + return { + statusCode: 200, + error: null, + message: 'Success', + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getSpans; diff --git a/frontend/src/api/trace/getSpansAggregate.ts b/frontend/src/api/trace/getSpansAggregate.ts new file mode 100644 index 0000000000..1f63f034e4 --- /dev/null +++ b/frontend/src/api/trace/getSpansAggregate.ts @@ -0,0 +1,60 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import omitBy from 'lodash-es/omitBy'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps, Props } from 'types/api/trace/getSpanAggregate'; +import { TraceFilterEnum } from 'types/reducer/trace'; + +const getSpanAggregate = async ( + props: Props, +): Promise | ErrorResponse> => { + try { + const preProps = { + start: String(props.start), + end: String(props.end), + limit: props.limit, + offset: props.offset, + }; + + const exclude: TraceFilterEnum[] = []; + + props.isFilterExclude.forEach((value, key) => { + if (value) { + exclude.push(key); + } + }); + + const updatedSelectedTags = props.selectedTags.map((e) => ({ + Key: e.Key[0], + Operator: e.Operator, + Values: e.Values, + })); + + const other = Object.fromEntries(props.selectedFilter); + + const duration = omitBy(other, (_, key) => !key.startsWith('duration')) || []; + + const nonDuration = omitBy(other, (_, key) => key.startsWith('duration')); + + const response = await axios.post(`/getFilteredSpans`, { + ...preProps, + tags: updatedSelectedTags, + ...nonDuration, + maxDuration: String((duration['duration'] || [])[0] || ''), + minDuration: String((duration['duration'] || [])[1] || ''), + exclude, + }); + + return { + statusCode: 200, + error: null, + message: 'Success', + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getSpanAggregate; diff --git a/frontend/src/api/trace/getTagFilter.ts b/frontend/src/api/trace/getTagFilter.ts new file mode 100644 index 0000000000..d54ceb8905 --- /dev/null +++ b/frontend/src/api/trace/getTagFilter.ts @@ -0,0 +1,38 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import { omitBy } from 'lodash-es'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps, Props } from 'types/api/trace/getTagFilters'; + +const getTagFilters = async ( + props: Props, +): Promise | ErrorResponse> => { + try { + const duration = + omitBy(props.other, (_, key) => !key.startsWith('duration')) || []; + + const nonDuration = omitBy(props.other, (_, key) => + key.startsWith('duration'), + ); + + const response = await axios.post(`/getTagFilters`, { + start: String(props.start), + end: String(props.end), + ...nonDuration, + maxDuration: String((duration['duration'] || [])[0] || ''), + minDuration: String((duration['duration'] || [])[1] || ''), + }); + + return { + statusCode: 200, + error: null, + message: 'Success', + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getTagFilters; diff --git a/frontend/src/api/trace/getTags.ts b/frontend/src/api/trace/getTags.ts deleted file mode 100644 index 430c25381a..0000000000 --- a/frontend/src/api/trace/getTags.ts +++ /dev/null @@ -1,24 +0,0 @@ -import axios from 'api'; -import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; -import { AxiosError } from 'axios'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps, Props } from 'types/api/trace/getTags'; - -const getTags = async ( - props: Props, -): Promise | ErrorResponse> => { - try { - const response = await axios.get(`/tags?service=${props.service}`); - - return { - statusCode: 200, - error: null, - message: 'Success', - payload: response.data, - }; - } catch (error) { - return ErrorResponseHandler(error as AxiosError); - } -}; - -export default getTags; diff --git a/frontend/src/components/DatePicker/index.tsx b/frontend/src/components/DatePicker/index.tsx new file mode 100644 index 0000000000..f76439d9d2 --- /dev/null +++ b/frontend/src/components/DatePicker/index.tsx @@ -0,0 +1,7 @@ +import { Dayjs } from 'dayjs'; +import dayjsGenerateConfig from 'rc-picker/lib/generate/dayjs'; +import generatePicker from 'antd/es/date-picker/generatePicker'; + +const DatePicker = generatePicker(dayjsGenerateConfig); + +export default DatePicker; diff --git a/frontend/src/components/Graph/index.tsx b/frontend/src/components/Graph/index.tsx index b6905495d4..eac5ae2712 100644 --- a/frontend/src/components/Graph/index.tsx +++ b/frontend/src/components/Graph/index.tsx @@ -148,7 +148,7 @@ const Graph = ({ useEffect(() => { buildChart(); - }, [buildChart]); + }, []); return (
diff --git a/frontend/src/container/Header/CustomDateTimeModal/index.tsx b/frontend/src/container/Header/CustomDateTimeModal/index.tsx index 8ebb25bd32..168a0c8117 100644 --- a/frontend/src/container/Header/CustomDateTimeModal/index.tsx +++ b/frontend/src/container/Header/CustomDateTimeModal/index.tsx @@ -1,8 +1,8 @@ -import { DatePicker, Modal } from 'antd'; -import { Moment } from 'moment'; -import moment from 'moment'; +import { Modal } from 'antd'; import React, { useState } from 'react'; -export type DateTimeRangeType = [Moment | null, Moment | null] | null; +export type DateTimeRangeType = [Dayjs | null, Dayjs | null] | null; +import DatePicker from 'components/DatePicker'; +import dayjs, { Dayjs } from 'dayjs'; const { RangePicker } = DatePicker; @@ -20,8 +20,8 @@ const CustomDateTimeModal = ({ setCustomDateTimeRange(date_time); } - function disabledDate(current: Moment): boolean { - if (current > moment()) { + function disabledDate(current: Dayjs): boolean { + if (current > dayjs()) { return true; } else { return false; diff --git a/frontend/src/container/Header/DateTimeSelection/index.tsx b/frontend/src/container/Header/DateTimeSelection/index.tsx index c6c5ff4015..ce7dcce6d6 100644 --- a/frontend/src/container/Header/DateTimeSelection/index.tsx +++ b/frontend/src/container/Header/DateTimeSelection/index.tsx @@ -8,7 +8,7 @@ import getLocalStorageKey from 'api/browser/localstorage/get'; import setLocalStorageKey from 'api/browser/localstorage/set'; import { LOCAL_STORAGE } from 'constants/localStorage'; import getTimeString from 'lib/getTimeString'; -import moment from 'moment'; +import dayjs, { Dayjs } from 'dayjs'; import { connect, useSelector } from 'react-redux'; import { RouteComponentProps, withRouter } from 'react-router'; import { bindActionCreators, Dispatch } from 'redux'; @@ -37,26 +37,18 @@ const DateTimeSelection = ({ const getTime = useCallback((): [number, number] | undefined => { if (searchEndTime && searchStartTime) { - const startMoment = moment( + const startDate = dayjs( new Date(parseInt(getTimeString(searchStartTime), 10)), ); - const endMoment = moment( - new Date(parseInt(getTimeString(searchEndTime), 10)), - ); + const endDate = dayjs(new Date(parseInt(getTimeString(searchEndTime), 10))); - return [ - startMoment.toDate().getTime() || 0, - endMoment.toDate().getTime() || 0, - ]; + return [startDate.toDate().getTime() || 0, endDate.toDate().getTime() || 0]; } if (localstorageStartTime && localstorageEndTime) { - const startMoment = moment(localstorageStartTime); - const endMoment = moment(localstorageEndTime); + const startDate = dayjs(localstorageStartTime); + const endDate = dayjs(localstorageEndTime); - return [ - startMoment.toDate().getTime() || 0, - endMoment.toDate().getTime() || 0, - ]; + return [startDate.toDate().getTime() || 0, endDate.toDate().getTime() || 0]; } return undefined; }, [ @@ -66,8 +58,8 @@ const DateTimeSelection = ({ searchStartTime, ]); - const [startTime, setStartTime] = useState(); - const [endTime, setEndTime] = useState(); + const [startTime, setStartTime] = useState(); + const [endTime, setEndTime] = useState(); const [options, setOptions] = useState(getOptions(location.pathname)); const [refreshButtonHidden, setRefreshButtonHidden] = useState(false); @@ -136,8 +128,8 @@ const DateTimeSelection = ({ }; const getInputLabel = ( - startTime?: moment.Moment, - endTime?: moment.Moment, + startTime?: Dayjs, + endTime?: Dayjs, timeInterval: Time = '15min', ): string | Time => { if (startTime && endTime && timeInterval === 'custom') { @@ -153,18 +145,18 @@ const DateTimeSelection = ({ }; const onLastRefreshHandler = useCallback(() => { - const currentTime = moment(); + const currentTime = dayjs(); - const lastRefresh = moment( + const lastRefresh = dayjs( selectedTimeInterval === 'custom' ? minTime / 1000000 : maxTime / 1000000, ); - const duration = moment.duration(currentTime.diff(lastRefresh)); - const secondsDiff = Math.floor(duration.asSeconds()); - const minutedDiff = Math.floor(duration.asMinutes()); - const hoursDiff = Math.floor(duration.asHours()); - const daysDiff = Math.floor(duration.asDays()); - const monthsDiff = Math.floor(duration.asMonths()); + const secondsDiff = currentTime.diff(lastRefresh, 'seconds'); + + const minutedDiff = currentTime.diff(lastRefresh, 'minutes'); + const hoursDiff = currentTime.diff(lastRefresh, 'hours'); + const daysDiff = currentTime.diff(lastRefresh, 'days'); + const monthsDiff = currentTime.diff(lastRefresh, 'months'); if (monthsDiff > 0) { return `Last refresh -${monthsDiff} months ago`; @@ -242,8 +234,8 @@ const DateTimeSelection = ({ const [preStartTime = 0, preEndTime = 0] = getTime() || []; - setStartTime(moment(preStartTime)); - setEndTime(moment(preEndTime)); + setStartTime(dayjs(preStartTime)); + setEndTime(dayjs(preEndTime)); updateTimeInterval(updatedTime, [preStartTime, preEndTime]); }, [ @@ -318,8 +310,3 @@ const mapDispatchToProps = ( type Props = DispatchProps & RouteComponentProps; export default connect(null, mapDispatchToProps)(withRouter(DateTimeSelection)); - -// DateTimeSelection.whyDidYouRender = { -// logOnDifferentValues: true, -// customName: 'DateTimeSelection', -// }; diff --git a/frontend/src/container/MetricsApplication/Tabs/Application.tsx b/frontend/src/container/MetricsApplication/Tabs/Application.tsx index 6c126216cc..005033f999 100644 --- a/frontend/src/container/MetricsApplication/Tabs/Application.tsx +++ b/frontend/src/container/MetricsApplication/Tabs/Application.tsx @@ -32,11 +32,12 @@ const Application = ({ getWidget }: DashboardProps): JSX.Element => { const urlParams = new URLSearchParams(); urlParams.set(METRICS_PAGE_QUERY_PARAM.startTime, currentTime.toString()); urlParams.set(METRICS_PAGE_QUERY_PARAM.endTime, tPlusOne.toString()); - if (servicename) { - urlParams.set(METRICS_PAGE_QUERY_PARAM.service, servicename); - } - history.push(`${ROUTES.TRACE}?${urlParams.toString()}`); + history.replace( + `${ + ROUTES.TRACE + }?${urlParams.toString()}&selected={"serviceName":["${servicename}"],"status":["ok","error"]}&filterToFetchData=["duration","status","serviceName"]&userSelectedFilter={"status":["error","ok"],"serviceName":["${servicename}"]}&isSelectedFilterSkipped=true`, + ); }; const onClickhandler = async ( @@ -85,12 +86,12 @@ const Application = ({ getWidget }: DashboardProps): JSX.Element => { const urlParams = new URLSearchParams(); urlParams.set(METRICS_PAGE_QUERY_PARAM.startTime, currentTime.toString()); urlParams.set(METRICS_PAGE_QUERY_PARAM.endTime, tPlusOne.toString()); - if (servicename) { - urlParams.set(METRICS_PAGE_QUERY_PARAM.service, servicename); - } - urlParams.set(METRICS_PAGE_QUERY_PARAM.error, 'true'); - history.push(`${ROUTES.TRACE}?${urlParams.toString()}`); + history.replace( + `${ + ROUTES.TRACE + }?${urlParams.toString()}&selected={"serviceName":["${servicename}"],"status":["error"]}&filterToFetchData=["duration","status","serviceName"]&userSelectedFilter={"status":["error"],"serviceName":["${servicename}"]}&isSelectedFilterSkipped=true`, + ); }; return ( diff --git a/frontend/src/container/MetricsApplication/TopEndpointsTable.tsx b/frontend/src/container/MetricsApplication/TopEndpointsTable.tsx index 914423f418..9cd8f54821 100644 --- a/frontend/src/container/MetricsApplication/TopEndpointsTable.tsx +++ b/frontend/src/container/MetricsApplication/TopEndpointsTable.tsx @@ -28,12 +28,12 @@ const TopEndpointsTable = (props: TopEndpointsTableProps): JSX.Element => { METRICS_PAGE_QUERY_PARAM.endTime, (maxTime / 1000000).toString(), ); - if (servicename) { - urlParams.set(METRICS_PAGE_QUERY_PARAM.service, servicename); - } - urlParams.set(METRICS_PAGE_QUERY_PARAM.operation, operation); - history.push(`${ROUTES.TRACE}?${urlParams.toString()}`); + history.push( + `${ + ROUTES.TRACE + }?${urlParams.toString()}&selected={"status":["error","ok"],"serviceName":["${servicename}"],"operation":["${operation}"]}&filterToFetchData=["duration","status","serviceName","operation"]&isSelectedFilterSkipped=true&userSelectedFilter={"status":["error","ok"],"serviceName":["${servicename}"],"operation":["${operation}"]}&isSelectedFilterSkipped=true`, + ); }; const columns: ColumnsType = [ diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/Checkbox.tsx b/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/Checkbox.tsx new file mode 100644 index 0000000000..7ee07f2c20 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/Checkbox.tsx @@ -0,0 +1,191 @@ +import React, { useState } from 'react'; +import { CheckBoxContainer } from './styles'; +import { Checkbox, notification, Typography } from 'antd'; +import { connect, useDispatch, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; + +import { SelectedTraceFilter } from 'store/actions/trace/selectTraceFilter'; +import AppActions from 'types/actions'; +import { ThunkDispatch } from 'redux-thunk'; +import { bindActionCreators, Dispatch } from 'redux'; +import { getFilter, updateURL } from 'store/actions/trace/util'; +import getFilters from 'api/trace/getFilters'; +import { AxiosError } from 'axios'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { UPDATE_ALL_FILTERS } from 'types/actions/trace'; + +const CheckBoxComponent = (props: CheckBoxProps): JSX.Element => { + const { + selectedFilter, + filterLoading, + filterToFetchData, + spansAggregate, + selectedTags, + filter, + userSelectedFilter, + isFilterExclude, + } = useSelector((state) => state.traces); + + const globalTime = useSelector( + (state) => state.globalTime, + ); + + const dispatch = useDispatch>(); + + const [isLoading, setIsLoading] = useState(false); + + const isUserSelected = + (userSelectedFilter.get(props.name) || []).find( + (e) => e === props.keyValue, + ) !== undefined; + + const onCheckHandler = async () => { + try { + setIsLoading(true); + + const newSelectedMap = new Map(selectedFilter); + const preUserSelectedMap = new Map(userSelectedFilter); + const preIsFilterExclude = new Map(isFilterExclude); + + const isTopicPresent = preUserSelectedMap.get(props.name); + + // append the value + if (!isTopicPresent) { + preUserSelectedMap.set(props.name, [props.keyValue]); + } else { + const isValuePresent = + isTopicPresent.find((e) => e === props.keyValue) !== undefined; + + // check the value if present then remove the value or isChecked + if (isValuePresent) { + preUserSelectedMap.set( + props.name, + isTopicPresent.filter((e) => e !== props.keyValue), + ); + } else { + // if not present add into the array of string + preUserSelectedMap.set(props.name, [...isTopicPresent, props.keyValue]); + } + } + + if (newSelectedMap.get(props.name)?.find((e) => e === props.keyValue)) { + newSelectedMap.set(props.name, [ + ...(newSelectedMap.get(props.name) || []).filter( + (e) => e !== props.keyValue, + ), + ]); + } else { + newSelectedMap.set(props.name, [ + ...new Set([...(newSelectedMap.get(props.name) || []), props.keyValue]), + ]); + } + + if (preIsFilterExclude.get(props.name) !== false) { + preIsFilterExclude.set(props.name, true); + } + + const response = await getFilters({ + other: Object.fromEntries(newSelectedMap), + end: String(globalTime.maxTime), + start: String(globalTime.minTime), + getFilters: filterToFetchData.filter((e) => e !== props.name), + isFilterExclude: preIsFilterExclude, + }); + + if (response.statusCode === 200) { + const updatedFilter = getFilter(response.payload); + + updatedFilter.forEach((value, key) => { + if (key !== 'duration' && props.name !== key) { + preUserSelectedMap.set(key, Object.keys(value)); + } + }); + + updatedFilter.set(props.name, { + [`${props.keyValue}`]: '-1', + ...(filter.get(props.name) || {}), + ...(updatedFilter.get(props.name) || {}), + }); + + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + selectedTags, + current: spansAggregate.currentPage, + filter: updatedFilter, + filterToFetchData, + selectedFilter: newSelectedMap, + userSelected: preUserSelectedMap, + isFilterExclude: preIsFilterExclude, + }, + }); + + setIsLoading(false); + + updateURL( + newSelectedMap, + filterToFetchData, + spansAggregate.currentPage, + selectedTags, + updatedFilter, + preIsFilterExclude, + preUserSelectedMap, + ); + } else { + setIsLoading(false); + + notification.error({ + message: response.error || 'Something went wrong', + }); + } + } catch (error) { + notification.error({ + message: (error as AxiosError).toString() || 'Something went wrong', + }); + setIsLoading(false); + } + }; + + const isCheckBoxSelected = isUserSelected; + + return ( + + + {props.keyValue} + + {isCheckBoxSelected ? ( + {props.value} + ) : ( + - + )} + + ); +}; + +interface DispatchProps { + selectedTraceFilter: (props: { + topic: TraceFilterEnum; + value: string; + }) => void; +} + +interface CheckBoxProps extends DispatchProps { + keyValue: string; + value: string; + name: TraceFilterEnum; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + selectedTraceFilter: bindActionCreators(SelectedTraceFilter, dispatch), +}); + +export default connect(null, mapDispatchToProps)(CheckBoxComponent); diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/styles.ts b/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/styles.ts new file mode 100644 index 0000000000..33a51a5112 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/Common/styles.ts @@ -0,0 +1,11 @@ +import styled from 'styled-components'; + +export const CheckBoxContainer = styled.div` + display: flex; + justify-content: space-between; + margin-left: 1rem; + margin-right: 1rem; + + margin-top: 0.5rem; + margin-bottom: 0.5rem; +`; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/CommonCheckBox/index.tsx b/frontend/src/container/Trace/Filters/Panel/PanelBody/CommonCheckBox/index.tsx new file mode 100644 index 0000000000..a19e7c4ecd --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/CommonCheckBox/index.tsx @@ -0,0 +1,36 @@ +import React from 'react'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import CheckBoxComponent from '../Common/Checkbox'; + +const CommonCheckBox = (props: CommonCheckBoxProps): JSX.Element => { + const { filter } = useSelector( + (state) => state.traces, + ); + + const status = filter.get(props.name) || {}; + + const statusObj = Object.keys(status); + + return ( + <> + {statusObj.map((e) => ( + + ))} + + ); +}; + +interface CommonCheckBoxProps { + name: TraceFilterEnum; +} + +export default CommonCheckBox; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx new file mode 100644 index 0000000000..f6025b8fbd --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/index.tsx @@ -0,0 +1,212 @@ +import React, { useState } from 'react'; + +import { Input, Slider } from 'antd'; +import { Container, InputContainer, Text } from './styles'; +import { useDispatch, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import useDebouncedFn from 'hooks/useDebouncedFunction'; +import { getFilter, updateURL } from 'store/actions/trace/util'; +import dayjs from 'dayjs'; +import durationPlugin from 'dayjs/plugin/duration'; +import { Dispatch } from 'redux'; +import AppActions from 'types/actions'; +import { UPDATE_ALL_FILTERS } from 'types/actions/trace'; +import getFilters from 'api/trace/getFilters'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { SliderRangeProps } from 'antd/lib/slider'; + +dayjs.extend(durationPlugin); + +const getMs = (value: string) => { + return dayjs + .duration({ + milliseconds: parseInt(value, 10) / 1000000, + }) + .format('SSS'); +}; + +const Duration = (): JSX.Element => { + const { + filter, + selectedFilter, + filterToFetchData, + spansAggregate, + selectedTags, + userSelectedFilter, + isFilterExclude, + } = useSelector((state) => state.traces); + + const dispatch = useDispatch>(); + const globalTime = useSelector( + (state) => state.globalTime, + ); + + const getDuration = () => { + const selectedDuration = selectedFilter.get('duration'); + + if (selectedDuration) { + return { + maxDuration: selectedDuration[0], + minDuration: selectedDuration[1], + }; + } + + return filter.get('duration') || {}; + }; + + const duration = getDuration(); + + const maxDuration = duration['maxDuration'] || '0'; + const minDuration = duration['minDuration'] || '0'; + + const [localMax, setLocalMax] = useState(maxDuration); + const [localMin, setLocalMin] = useState(minDuration); + + const defaultValue = [parseFloat(minDuration), parseFloat(maxDuration)]; + + const updatedUrl = async (min: number, max: number) => { + const preSelectedFilter = new Map(selectedFilter); + const preUserSelected = new Map(userSelectedFilter); + + preSelectedFilter.set('duration', [String(max), String(min)]); + + console.log('on the update Url'); + const response = await getFilters({ + end: String(globalTime.maxTime), + getFilters: filterToFetchData, + other: Object.fromEntries(preSelectedFilter), + start: String(globalTime.minTime), + isFilterExclude, + }); + + if (response.statusCode === 200) { + const preFilter = getFilter(response.payload); + + preFilter.forEach((value, key) => { + if (key !== 'duration') { + preUserSelected.set(key, Object.keys(value)); + } + }); + + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + current: spansAggregate.currentPage, + filter: preFilter, + filterToFetchData, + selectedFilter: preSelectedFilter, + selectedTags, + userSelected: preUserSelected, + isFilterExclude, + }, + }); + + updateURL( + preSelectedFilter, + filterToFetchData, + spansAggregate.currentPage, + selectedTags, + preFilter, + isFilterExclude, + userSelectedFilter, + ); + } + }; + + const onRangeSliderHandler = (number: [number, number]) => { + const [min, max] = number; + + setLocalMin(min.toString()); + setLocalMax(max.toString()); + }; + + const debouncedFunction = useDebouncedFn( + (min, max) => { + console.log('debounce function'); + updatedUrl(min, max); + }, + 500, + undefined, + [], + ); + + const onChangeMaxHandler: React.ChangeEventHandler = ( + event, + ) => { + const value = event.target.value; + const min = parseFloat(localMin); + const max = parseFloat(value) * 1000000; + + console.log('on change in max'); + + onRangeSliderHandler([min, max]); + debouncedFunction(min, max); + }; + + const onChangeMinHandler: React.ChangeEventHandler = ( + event, + ) => { + const value = event.target.value; + const min = parseFloat(value) * 1000000; + const max = parseFloat(localMax); + onRangeSliderHandler([min, max]); + console.log('on change in min'); + debouncedFunction(min, max); + }; + + const onRangeHandler: SliderRangeProps['onChange'] = ([min, max]) => { + updatedUrl(min, max); + }; + + return ( +
+ + + Min + + + + + Max + + + + + + { + if (value === undefined) { + return ''; + } + return
{`${getMs(value.toString())}ms`}
; + }} + onChange={([min, max]) => { + onRangeSliderHandler([min, max]); + }} + onAfterChange={onRangeHandler} + // onAfterChange={([min, max]) => { + // const returnFunction = debounce((min, max) => updatedUrl(min, max)); + + // returnFunction(min, max); + // }} + value={[parseFloat(localMin), parseFloat(localMax)]} + /> +
+
+ ); +}; + +export default Duration; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/styles.ts b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/styles.ts new file mode 100644 index 0000000000..150391fce2 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/Duration/styles.ts @@ -0,0 +1,27 @@ +import styled from 'styled-components'; +import { Typography } from 'antd'; + +export const DurationText = styled.div` + display: flex; + align-items: center; + justify-content: space-around; + min-height: 8vh; + flex-direction: column; +`; + +export const InputContainer = styled.div` + width: 100%; + margin-top: 0.5rem; + margin-bottom: 0.2rem; +`; + +export const Text = styled(Typography)` + &&& { + text-align: left; + } +`; + +export const Container = styled.div` + padding-left: 1rem; + padding-right: 1rem; +`; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/index.tsx b/frontend/src/container/Trace/Filters/Panel/PanelBody/index.tsx new file mode 100644 index 0000000000..98364e14d4 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/index.tsx @@ -0,0 +1,37 @@ +import React from 'react'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { Card } from 'antd'; + +import Duration from './Duration'; +import CommonCheckBox from './CommonCheckBox'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import Spinner from 'components/Spinner'; + +const PanelBody = (props: PanelBodyProps): JSX.Element => { + const { type } = props; + + const { filterLoading } = useSelector( + (state) => state.traces, + ); + + if (filterLoading) { + return ( + + + + ); + } + + return ( + + {type === 'duration' ? : } + + ); +}; + +interface PanelBodyProps { + type: TraceFilterEnum; +} + +export default PanelBody; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelBody/styles.ts b/frontend/src/container/Trace/Filters/Panel/PanelBody/styles.ts new file mode 100644 index 0000000000..c33898348f --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelBody/styles.ts @@ -0,0 +1,3 @@ +import styled from 'styled-components'; + +export const Container = styled.div``; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelHeading/index.tsx b/frontend/src/container/Trace/Filters/Panel/PanelHeading/index.tsx new file mode 100644 index 0000000000..96fc5d5dc7 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelHeading/index.tsx @@ -0,0 +1,317 @@ +import React, { useState } from 'react'; +import { DownOutlined, RightOutlined } from '@ant-design/icons'; +import { Card, Typography, Divider, notification } from 'antd'; + +import { + ButtonComponent, + ButtonContainer, + Container, + IconContainer, + TextCotainer, +} from './styles'; +import { useDispatch, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +const { Text } = Typography; + +import { AllPanelHeading } from 'types/reducer/trace'; +import getFilters from 'api/trace/getFilters'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { getFilter, updateURL } from 'store/actions/trace/util'; +import AppActions from 'types/actions'; +import { Dispatch } from 'redux'; +import { UPDATE_ALL_FILTERS } from 'types/actions/trace'; +import { AxiosError } from 'axios'; + +const PanelHeading = (props: PanelHeadingProps): JSX.Element => { + const { + filterLoading, + filterToFetchData, + selectedFilter, + spansAggregate, + selectedTags, + filter, + isFilterExclude, + userSelectedFilter, + } = useSelector((state) => state.traces); + + const isDefaultOpen = + filterToFetchData.find((e) => e === props.name) !== undefined; + + const [isLoading, setIsLoading] = useState(false); + + const global = useSelector( + (state) => state.globalTime, + ); + + const dispatch = useDispatch>(); + + const onExpandHandler: React.MouseEventHandler = async (e) => { + try { + e.preventDefault(); + e.stopPropagation(); + + setIsLoading(true); + let updatedFilterData: TraceReducer['filterToFetchData'] = []; + const getprepdatedSelectedFilter = new Map(selectedFilter); + const getPreUserSelected = new Map(userSelectedFilter); + + if (!isDefaultOpen) { + updatedFilterData = [props.name]; + } else { + // removing the selected filter + updatedFilterData = [ + ...filterToFetchData.filter((name) => name !== props.name), + ]; + getprepdatedSelectedFilter.delete(props.name); + getPreUserSelected.delete(props.name); + } + + const response = await getFilters({ + end: String(global.maxTime), + start: String(global.minTime), + getFilters: updatedFilterData, + other: Object.fromEntries(getprepdatedSelectedFilter), + isFilterExclude, + }); + + if (response.statusCode === 200) { + const updatedFilter = getFilter(response.payload); + + // is closed + if (!isDefaultOpen) { + // getprepdatedSelectedFilter.set( + // props.name, + // Object.keys(updatedFilter.get(props.name) || {}), + // ); + + getPreUserSelected.set( + props.name, + Object.keys(updatedFilter.get(props.name) || {}), + ); + + updatedFilterData = [...filterToFetchData, props.name]; + } + + // now append the non prop.name trace filter enum over the list + // selectedFilter.forEach((value, key) => { + // if (key !== props.name) { + // getprepdatedSelectedFilter.set(key, value); + // } + // }); + + getPreUserSelected.forEach((value, key) => { + if (key !== props.name) { + getPreUserSelected.set(key, value); + } + }); + filter.forEach((value, key) => { + if (key !== props.name) { + updatedFilter.set(key, value); + } + }); + + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + current: spansAggregate.currentPage, + filter: updatedFilter, + filterToFetchData: updatedFilterData, + selectedFilter: getprepdatedSelectedFilter, + selectedTags, + userSelected: getPreUserSelected, + isFilterExclude, + }, + }); + + updateURL( + getprepdatedSelectedFilter, + updatedFilterData, + spansAggregate.currentPage, + selectedTags, + updatedFilter, + isFilterExclude, + getPreUserSelected, + ); + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + } + + setIsLoading(false); + } catch (error) { + notification.error({ + message: (error as AxiosError).toString() || 'Something went wrong', + }); + } + }; + + const onClearAllHandler = async () => { + try { + setIsLoading(true); + const updatedFilter = new Map(selectedFilter); + const preUserSelected = new Map(userSelectedFilter); + + updatedFilter.delete(props.name); + preUserSelected.delete(props.name); + + const postIsFilterExclude = new Map(isFilterExclude); + + postIsFilterExclude.set(props.name, false); + + const response = await getFilters({ + end: String(global.maxTime), + start: String(global.minTime), + getFilters: filterToFetchData, + other: Object.fromEntries(preUserSelected), + isFilterExclude: postIsFilterExclude, + }); + + if (response.statusCode === 200 && response.payload) { + const getUpatedFilter = getFilter(response.payload); + + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + current: spansAggregate.currentPage, + filter: getUpatedFilter, + filterToFetchData, + selectedFilter: updatedFilter, + selectedTags, + userSelected: preUserSelected, + isFilterExclude: postIsFilterExclude, + }, + }); + + updateURL( + updatedFilter, + filterToFetchData, + spansAggregate.currentPage, + selectedTags, + getUpatedFilter, + postIsFilterExclude, + preUserSelected, + ); + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + } + setIsLoading(false); + } catch (error) { + notification.error({ + message: (error as AxiosError).toString(), + }); + setIsLoading(false); + } + }; + + // const onSelectAllHandler = async () => { + // try { + // setIsLoading(true); + // const preFilter = new Map(filter); + // const preSelectedFilter = new Map(selectedFilter); + + // preSelectedFilter.set( + // props.name, + // Object.keys(preFilter.get(props.name) || {}), + // ); + + // const response = await getFilters({ + // end: String(global.maxTime), + // start: String(global.minTime), + // getFilters: filterToFetchData, + // other: Object.fromEntries(preSelectedFilter), + // }); + + // if (response.statusCode === 200 && response.payload) { + // const getUpatedFilter = getFilter(response.payload); + + // preSelectedFilter.set( + // props.name, + // Object.keys(getUpatedFilter.get(props.name) || {}), + // ); + + // dispatch({ + // type: UPDATE_ALL_FILTERS, + // payload: { + // current: spansAggregate.currentPage, + // filter: preFilter, + // filterToFetchData, + // selectedFilter: preSelectedFilter, + // selectedTags, + // }, + // }); + + // updateURL( + // preSelectedFilter, + // filterToFetchData, + // spansAggregate.currentPage, + // selectedTags, + // preFilter, + // ); + // } + // setIsLoading(false); + // } catch (error) { + // setIsLoading(false); + + // notification.error({ + // message: (error as AxiosError).toString(), + // }); + // } + // }; + + return ( + <> + {props.name !== 'duration' && } + + + + + + {!props.isOpen ? : } + + + + {AllPanelHeading.find((e) => e.key === props.name)?.displayValue || ''} + + + + {props.name !== 'duration' && ( + + {/* + Select All + */} + + + Clear All + + + )} + + + + ); +}; + +interface PanelHeadingProps { + name: TraceFilterEnum; + isOpen: boolean; +} + +export default PanelHeading; diff --git a/frontend/src/container/Trace/Filters/Panel/PanelHeading/styles.ts b/frontend/src/container/Trace/Filters/Panel/PanelHeading/styles.ts new file mode 100644 index 0000000000..aae92d18d6 --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/PanelHeading/styles.ts @@ -0,0 +1,49 @@ +import { Button } from 'antd'; +import styled, { css } from 'styled-components'; + +interface Props { + disabled: boolean; +} + +export const Container = styled.div` + &&& { + display: flex; + justify-content: space-between; + align-items: center; + padding-left: 0.5rem; + min-height: 5vh; + + cursor: ${({ disabled }) => disabled && 'not-allowed'}; + + ${({ disabled }) => + disabled && + css` + opacity: 0.5; + `} + } +`; + +export const IconContainer = styled.div` + &&& { + margin-right: 0.5rem; + } +`; + +export const TextCotainer = styled.div` + &&& { + display: flex; + cursor: pointer; + } +`; + +export const ButtonComponent = styled(Button)` + &&& { + font-size: 0.75rem; + } +`; + +export const ButtonContainer = styled.div` + &&& { + display: flex; + } +`; diff --git a/frontend/src/container/Trace/Filters/Panel/index.tsx b/frontend/src/container/Trace/Filters/Panel/index.tsx new file mode 100644 index 0000000000..a6aee59c2d --- /dev/null +++ b/frontend/src/container/Trace/Filters/Panel/index.tsx @@ -0,0 +1,28 @@ +import React from 'react'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; + +import PanelBody from './PanelBody'; +import PanelHeading from './PanelHeading'; + +const Panel = (props: PanelProps): JSX.Element => { + const traces = useSelector((state) => state.traces); + + const isDefaultOpen = + traces.filterToFetchData.find((e) => e === props.name) !== undefined; + + return ( + <> + + + {isDefaultOpen && } + + ); +}; + +interface PanelProps { + name: TraceFilterEnum; +} + +export default Panel; diff --git a/frontend/src/container/Trace/Filters/index.tsx b/frontend/src/container/Trace/Filters/index.tsx new file mode 100644 index 0000000000..0090e29726 --- /dev/null +++ b/frontend/src/container/Trace/Filters/index.tsx @@ -0,0 +1,27 @@ +import React from 'react'; +import { TraceFilterEnum } from 'types/reducer/trace'; + +import Panel from './Panel'; + +export const AllTraceFilterEnum: TraceFilterEnum[] = [ + 'duration', + 'status', + 'serviceName', + 'operation', + 'component', + 'httpCode', + 'httpHost', + 'httpMethod', + 'httpRoute', + 'httpUrl', +]; + +const Filters = (): JSX.Element => ( + + {AllTraceFilterEnum.map((panelName) => ( + + ))} + +); + +export default Filters; diff --git a/frontend/src/container/Trace/Filters/styles.ts b/frontend/src/container/Trace/Filters/styles.ts new file mode 100644 index 0000000000..a5a63ab931 --- /dev/null +++ b/frontend/src/container/Trace/Filters/styles.ts @@ -0,0 +1,20 @@ +import { Button, Input } from 'antd'; +import styled from 'styled-components'; + +export const DurationContainer = styled.div` + display: flex; + justify-content: space-between; + align-items: center; +`; + +export const InputComponent = styled(Input)` + &&& { + margin-left: 0.5rem; + margin-right: 0.5rem; + } +`; + +export const CheckBoxContainer = styled.div` + display: flex; + flex-direction: column; +`; diff --git a/frontend/src/container/Trace/Graph/config.ts b/frontend/src/container/Trace/Graph/config.ts new file mode 100644 index 0000000000..17344b52ef --- /dev/null +++ b/frontend/src/container/Trace/Graph/config.ts @@ -0,0 +1,123 @@ +import { ChartData, ChartDataset, ChartDatasetProperties } from 'chart.js'; +import { TraceReducer } from 'types/reducer/trace'; +import dayjs from 'dayjs'; +import { colors } from 'lib/getRandomColor'; + +function transposeArray(array: number[][], arrayLength: number) { + let newArray: number[][] = []; + for (let i = 0; i < array.length; i++) { + newArray.push([]); + } + + for (let i = 0; i < array.length; i++) { + for (let j = 0; j < arrayLength; j++) { + newArray[j]?.push(array[i][j]); + } + } + + return newArray; +} + +export const getChartData = ( + data: TraceReducer['spansGraph']['payload'], +): ChartData<'line'> => { + const allDataPoints = data.items; + + const chartDataset: ChartDatasetProperties<'line', number[]> = { + data: [], + type: 'line', + }; + + const chartLabels: ChartData<'line'>['labels'] = []; + + Object.keys(allDataPoints).forEach((timestamp) => { + const key = allDataPoints[timestamp]; + if (key.value) { + chartDataset.data.push(key.value); + const date = dayjs(key.timestamp / 1000000); + chartLabels.push(date.toDate().getTime()); + } + }); + + return { + datasets: [ + { + ...chartDataset, + borderWidth: 1.5, + spanGaps: true, + borderColor: colors[0] || 'red', + showLine: true, + pointRadius: 0, + }, + ], + labels: chartLabels, + }; +}; + +export const getChartDataforGroupBy = ( + props: TraceReducer['spansGraph']['payload'], +): ChartData => { + const items = props.items; + + const chartData: ChartData = { + datasets: [], + labels: [], + }; + + let max = 0; + + const allGroupBy = Object.keys(items).map((e) => items[e].groupBy); + + Object.keys(allGroupBy).map((e) => { + const length = Object.keys(allGroupBy[e]).length; + + if (length >= max) { + max = length; + } + }); + + const numberOfGraphs = max; + + const spansGraph: number[][] = []; + + const names: string[] = []; + + // number of data points + Object.keys(items).forEach((item) => { + const spanData = items[item]; + const date = dayjs(Number(item) / 1000000) + .toDate() + .getTime(); + + chartData.labels?.push(date); + + const groupBy = spanData.groupBy; + const preData: number[] = []; + + if (groupBy) { + Object.keys(groupBy).forEach((key) => { + const value = groupBy[key]; + preData.push(value); + names.push(key); + }); + + spansGraph.push(preData); + } + }); + + const updatedName = [...new Set(names)]; + + transposeArray(spansGraph, numberOfGraphs).forEach((values, index) => { + chartData.datasets.push({ + data: values.map((e) => e || 0), + borderWidth: 1.5, + spanGaps: true, + borderColor: colors[index] || 'red', + showLine: true, + pointRadius: 0, + label: updatedName[index], + }); + }); + + return chartData; +}; diff --git a/frontend/src/container/Trace/Graph/index.tsx b/frontend/src/container/Trace/Graph/index.tsx new file mode 100644 index 0000000000..a91fe78930 --- /dev/null +++ b/frontend/src/container/Trace/Graph/index.tsx @@ -0,0 +1,48 @@ +import React, { useMemo } from 'react'; + +import Graph from 'components/Graph'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import Spinner from 'components/Spinner'; +import { Container } from './styles'; +import { Typography } from 'antd'; +import { getChartData, getChartDataforGroupBy } from './config'; + +const TraceGraph = () => { + const { spansGraph, selectedGroupBy } = useSelector( + (state) => state.traces, + ); + + const { loading, error, errorMessage, payload } = spansGraph; + + const ChartData = useMemo(() => { + return selectedGroupBy.length === 0 + ? getChartData(payload) + : getChartDataforGroupBy(payload); + }, [payload]); + + if (error) { + return ( + + {errorMessage || 'Something went wrong'} + + ); + } + + if (loading || payload === undefined) { + return ( + + + + ); + } + + return ( + + + + ); +}; + +export default TraceGraph; diff --git a/frontend/src/container/Trace/Graph/styles.ts b/frontend/src/container/Trace/Graph/styles.ts new file mode 100644 index 0000000000..51a3e72ffc --- /dev/null +++ b/frontend/src/container/Trace/Graph/styles.ts @@ -0,0 +1,19 @@ +import styled, { css } from 'styled-components'; + +interface Props { + center?: boolean; +} + +export const Container = styled.div` + height: 25vh; + margin-top: 1rem; + margin-bottom: 1rem; + + ${({ center }) => + center && + css` + display: flex; + justify-content: center; + align-items: center; + `} +`; diff --git a/frontend/src/container/Trace/Search/AllTags/Tag/TagKey.tsx b/frontend/src/container/Trace/Search/AllTags/Tag/TagKey.tsx new file mode 100644 index 0000000000..f82a39a291 --- /dev/null +++ b/frontend/src/container/Trace/Search/AllTags/Tag/TagKey.tsx @@ -0,0 +1,102 @@ +import { AutoComplete, AutoCompleteProps, Input, notification } from 'antd'; +import getTagFilters from 'api/trace/getTagFilter'; +import React, { useEffect, useState } from 'react'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { TraceReducer } from 'types/reducer/trace'; + +const TagsKey = (props: TagsKeysProps): JSX.Element => { + const [selectLoading, setSelectLoading] = useState(false); + const globalTime = useSelector( + (state) => state.globalTime, + ); + + const [selectedKey, setSelectedKey] = useState(props.tag.Key[0] || ''); + + const traces = useSelector((state) => state.traces); + + const [options, setOptions] = useState([]); + + const onSearchHandler = async () => { + try { + setSelectLoading(true); + const response = await getTagFilters({ + start: globalTime.minTime, + end: globalTime.maxTime, + other: Object.fromEntries(traces.selectedFilter), + }); + + if (response.statusCode === 200) { + if (response.payload === null) { + setOptions([ + { + value: '', + label: 'No tags available', + }, + ]); + } else { + setOptions( + response.payload.map((e) => ({ + value: e.tagKeys, + label: e.tagKeys, + })), + ); + } + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + } + setSelectLoading(false); + } catch (error) { + notification.error({ + message: 'Something went wrong', + }); + setSelectLoading(false); + } + }; + + useEffect(() => { + onSearchHandler(); + }, []); + + return ( + { + if (options && options.find((option) => option.value === value)) { + setSelectedKey(value); + + props.setLocalSelectedTags((tags) => [ + ...tags.slice(0, props.index), + { + Key: [value], + Operator: props.tag.Operator, + Values: props.tag.Values, + }, + ...tags.slice(props.index + 1, tags.length), + ]); + } else { + setSelectedKey(''); + } + }} + > + + + ); +}; + +interface TagsKeysProps { + index: number; + tag: FlatArray; + setLocalSelectedTags: React.Dispatch< + React.SetStateAction + >; +} + +export default TagsKey; diff --git a/frontend/src/container/Trace/Search/AllTags/Tag/index.tsx b/frontend/src/container/Trace/Search/AllTags/Tag/index.tsx new file mode 100644 index 0000000000..a377de487f --- /dev/null +++ b/frontend/src/container/Trace/Search/AllTags/Tag/index.tsx @@ -0,0 +1,130 @@ +import React from 'react'; + +import { Select } from 'antd'; +import { + Container, + IconContainer, + SelectComponent, + ValueSelect, +} from './styles'; +import { connect, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import { CloseOutlined } from '@ant-design/icons'; +import { SelectValue } from 'antd/lib/select'; +import { ThunkDispatch } from 'redux-thunk'; +import AppActions from 'types/actions'; +import { bindActionCreators } from 'redux'; +import { UpdateSelectedTags } from 'store/actions/trace/updateTagsSelected'; +import TagsKey from './TagKey'; +const { Option } = Select; + +type Tags = FlatArray['Operator']; + +const AllMenu: AllMenu[] = [ + { + key: 'in', + value: 'IN', + }, + { + key: 'not in', + value: 'NOT IN', + }, +]; + +interface AllMenu { + key: Tags | ''; + value: string; +} + +const SingleTags = (props: AllTagsProps): JSX.Element => { + const traces = useSelector((state) => state.traces); + const { + Key: selectedKey, + Operator: selectedOperator, + Values: selectedValues, + } = props.tag; + + const onDeleteTagHandler = (index: number) => { + props.onCloseHandler(index); + }; + + const onChangeOperatorHandler = (key: SelectValue) => { + props.setLocalSelectedTags([ + ...traces.selectedTags.slice(0, props.index), + { + Key: selectedKey, + Values: selectedValues, + Operator: key as Tags, + }, + ...traces.selectedTags.slice(props.index + 1, traces.selectedTags.length), + ]); + }; + + return ( + <> + + + + e.key === selectedOperator)?.value || ''} + > + {AllMenu.map((e) => ( + + ))} + + + { + props.setLocalSelectedTags((tags) => [ + ...tags.slice(0, props.index), + { + Key: selectedKey, + Operator: selectedOperator, + Values: value as string[], + }, + ...tags.slice(props.index + 1, tags.length), + ]); + }} + mode="tags" + /> + + onDeleteTagHandler(props.index)} + > + + + + + ); +}; + +interface DispatchProps { + updateSelectedTags: (props: TraceReducer['selectedTags']) => void; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + updateSelectedTags: bindActionCreators(UpdateSelectedTags, dispatch), +}); + +interface AllTagsProps extends DispatchProps { + onCloseHandler: (index: number) => void; + index: number; + tag: FlatArray; + setLocalSelectedTags: React.Dispatch< + React.SetStateAction + >; +} + +export default connect(null, mapDispatchToProps)(SingleTags); diff --git a/frontend/src/container/Trace/Search/AllTags/Tag/styles.ts b/frontend/src/container/Trace/Search/AllTags/Tag/styles.ts new file mode 100644 index 0000000000..91da16b124 --- /dev/null +++ b/frontend/src/container/Trace/Search/AllTags/Tag/styles.ts @@ -0,0 +1,39 @@ +import styled from 'styled-components'; +import { Button, Select, Space } from 'antd'; + +export const SpaceComponent = styled(Space)` + &&& { + width: 100%; + } +`; + +export const SelectComponent = styled(Select)` + &&& { + min-width: 170px; + margin-right: 21.91px; + margin-left: 21.92px; + } +`; + +export const ValueSelect = styled(Select)` + &&& { + width: 100%; + } +`; + +export const Container = styled.div` + &&& { + display: flex; + margin-top: 1rem; + margin-bottom: 1rem; + } +`; + +export const IconContainer = styled.div` + display: flex; + justify-content: center; + align-items: center; + cursor: pointer; + + margin-left: 1.125rem; +`; diff --git a/frontend/src/container/Trace/Search/AllTags/index.tsx b/frontend/src/container/Trace/Search/AllTags/index.tsx new file mode 100644 index 0000000000..e2a449ff6d --- /dev/null +++ b/frontend/src/container/Trace/Search/AllTags/index.tsx @@ -0,0 +1,158 @@ +import React, { useEffect, useState } from 'react'; + +import { Button, Space, Typography } from 'antd'; +import { CaretRightFilled } from '@ant-design/icons'; +import { + Container, + ButtonContainer, + CurrentTagsContainer, + Wrapper, + ErrorContainer, +} from './styles'; +import Tags from './Tag'; +const { Text } = Typography; +import { PlusOutlined } from '@ant-design/icons'; +import { connect, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import { bindActionCreators } from 'redux'; +import { ThunkDispatch } from 'redux-thunk'; +import AppActions from 'types/actions'; +import { UpdateTagIsError } from 'store/actions/trace/updateIsTagsError'; +import { parseTagsToQuery } from '../util'; +import { isEqual } from 'lodash-es'; +import { UpdateTagVisiblity } from 'store/actions/trace/updateTagPanelVisiblity'; + +const { Paragraph } = Typography; + +const AllTags = ({ + updateTagIsError, + onChangeHandler, + updateTagVisiblity, + updateFilters, +}: AllTagsProps): JSX.Element => { + const traces = useSelector((state) => state.traces); + + const [localSelectedTags, setLocalSelectedTags] = useState< + TraceReducer['selectedTags'] + >(traces.selectedTags); + + const onTagAddHandler = () => { + setLocalSelectedTags((tags) => [ + ...tags, + { + Key: [], + Operator: 'in', + Values: [], + }, + ]); + }; + + useEffect(() => { + if (!isEqual(traces.selectedTags, localSelectedTags)) { + setLocalSelectedTags(traces.selectedTags); + } + }, [traces.selectedTags]); + + const onCloseHandler = (index: number) => { + setLocalSelectedTags([ + ...localSelectedTags.slice(0, index), + ...localSelectedTags.slice(index + 1, localSelectedTags.length), + ]); + }; + + const onRunQueryHandler = () => { + const parsedQuery = parseTagsToQuery(localSelectedTags); + + if (parsedQuery.isError) { + updateTagIsError(true); + } else { + onChangeHandler(parsedQuery.payload); + updateFilters(localSelectedTags); + updateTagIsError(false); + updateTagVisiblity(false); + } + }; + + const onResetHandler = () => { + setLocalSelectedTags([]); + }; + + if (traces.isTagModalError) { + return ( + + + Unrecognised query format. Please reset your query by clicking `X` in the + search bar above. + + + + Please click on the search bar to get a drop down to select relevant tags + + + ); + } + + return ( + <> + + + Tags + + + {localSelectedTags.map((tags, index) => ( + onCloseHandler(index)} + setLocalSelectedTags={setLocalSelectedTags} + /> + ))} + + + + + + + Results will include spans with ALL the specified tags ( Rows are `anded` + ) + + + + + + + + + + + ); +}; + +interface DispatchProps { + updateTagIsError: (value: boolean) => void; + updateTagVisiblity: (value: boolean) => void; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + updateTagIsError: bindActionCreators(UpdateTagIsError, dispatch), + updateTagVisiblity: bindActionCreators(UpdateTagVisiblity, dispatch), +}); + +interface AllTagsProps extends DispatchProps { + updateFilters: (tags: TraceReducer['selectedTags']) => void; + onChangeHandler: (search: string) => void; +} + +export default connect(null, mapDispatchToProps)(AllTags); diff --git a/frontend/src/container/Trace/Search/AllTags/styles.ts b/frontend/src/container/Trace/Search/AllTags/styles.ts new file mode 100644 index 0000000000..ef875287a1 --- /dev/null +++ b/frontend/src/container/Trace/Search/AllTags/styles.ts @@ -0,0 +1,56 @@ +import styled from 'styled-components'; +import { Card } from 'antd'; + +export const Container = styled(Card)` + top: 120%; + min-height: 20vh; + width: 100%; + z-index: 2; + position: absolute; + + .ant-card-body { + padding-bottom: 0; + padding-right: 0; + padding-left: 0; + } +`; + +export const ErrorContainer = styled(Card)` + top: 120%; + min-height: 20vh; + width: 100%; + z-index: 2; + position: absolute; + + display: flex; + justify-content: center; + align-items: center; + flex-direction: column; +`; + +export const Wrapper = styled.div` + &&& { + padding-right: 2rem; + padding-left: 2rem; + } +`; + +export const ButtonContainer = styled.div` + display: flex; + justify-content: flex-end; + align-items: center; + background-color: #303030; + padding-top: 11px; + padding-bottom: 11px; + padding-right: 38.01px; + + margin-top: 1rem; + + > button:nth-child(1) { + margin-right: 1rem; + } +`; + +export const CurrentTagsContainer = styled.div` + margin-bottom: 1rem; +`; diff --git a/frontend/src/container/Trace/Search/index.tsx b/frontend/src/container/Trace/Search/index.tsx new file mode 100644 index 0000000000..1b6e59eba6 --- /dev/null +++ b/frontend/src/container/Trace/Search/index.tsx @@ -0,0 +1,163 @@ +import React, { useEffect, useRef, useState } from 'react'; +import { Space } from 'antd'; +import { Container, SearchComponent } from './styles'; +import useClickOutside from 'hooks/useClickOutside'; +import Tags from './AllTags'; +import { connect, useDispatch, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import { ThunkDispatch } from 'redux-thunk'; +import AppActions from 'types/actions'; +import { bindActionCreators, Dispatch } from 'redux'; +import { UpdateTagVisiblity } from 'store/actions/trace/updateTagPanelVisiblity'; +import { parseQueryToTags, parseTagsToQuery } from './util'; +import { UpdateTagIsError } from 'store/actions/trace/updateIsTagsError'; +import { CaretRightFilled } from '@ant-design/icons'; +import { updateURL } from 'store/actions/trace/util'; +import { UPDATE_ALL_FILTERS } from 'types/actions/trace'; + +const Search = ({ + updateTagVisiblity, + updateTagIsError, +}: SearchProps): JSX.Element => { + const traces = useSelector((state) => state.traces); + + const [value, setValue] = useState(''); + const dispatch = useDispatch>(); + + useEffect(() => { + if (traces.filterLoading) { + const initialTags = parseTagsToQuery(traces.selectedTags); + if (!initialTags.isError) { + setValue(initialTags.payload); + } + } + }, [traces.selectedTags, traces.filterLoading]); + + useEffect(() => { + if (value.length === 0 && traces.isTagModalError) { + updateTagIsError(false); + } + }, [traces.isTagModalError, value]); + + const tagRef = useRef(null); + + useClickOutside(tagRef, (e: HTMLElement) => { + // using this hack as overlay span is voilating this condition + if ( + e.nodeName === 'svg' || + e.nodeName === 'path' || + e.nodeName === 'span' || + e.nodeName === 'button' + ) { + return; + } + + if ( + e.nodeName === 'DIV' && + ![ + 'ant-select-item-option-content', + 'ant-empty-image', + 'ant-select-item', + 'ant-col', + 'ant-select-item-option-active', + ].find((p) => p.indexOf(e.className) !== -1) && + !(e.ariaSelected === 'true') && + traces.isTagModalOpen + ) { + updateTagVisiblity(false); + } + }); + + const onChangeHandler = (search: string) => { + setValue(search); + }; + + const setIsTagsModalHandler = (value: boolean) => { + updateTagVisiblity(value); + }; + + const onFocusHandler: React.FocusEventHandler = (e) => { + e.preventDefault(); + setIsTagsModalHandler(true); + }; + + const updateFilters = async (selectedTags: TraceReducer['selectedTags']) => { + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + selectedTags, + current: traces.spansAggregate.currentPage, + filter: traces.filter, + filterToFetchData: traces.filterToFetchData, + selectedFilter: traces.selectedFilter, + userSelected: traces.userSelectedFilter, + isFilterExclude: traces.isFilterExclude, + }, + }); + + updateURL( + traces.selectedFilter, + traces.filterToFetchData, + traces.spansAggregate.currentPage, + selectedTags, + traces.filter, + traces.isFilterExclude, + traces.userSelectedFilter, + ); + }; + + return ( + + + onChangeHandler(event.target.value)} + value={value} + allowClear + disabled={traces.filterLoading} + onFocus={onFocusHandler} + placeholder="Click to filter by tags" + type={'search'} + enterButton={} + onSearch={(string) => { + if (string.length === 0) { + updateTagVisiblity(false); + updateFilters([]); + return; + } + + const { isError, payload } = parseQueryToTags(string); + + if (isError) { + updateTagIsError(true); + } else { + updateTagIsError(false); + updateTagVisiblity(false); + updateFilters(payload); + } + }} + /> + + {traces.isTagModalOpen && ( + + )} + + + ); +}; + +interface DispatchProps { + updateTagVisiblity: (value: boolean) => void; + updateTagIsError: (value: boolean) => void; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + updateTagVisiblity: bindActionCreators(UpdateTagVisiblity, dispatch), + updateTagIsError: bindActionCreators(UpdateTagIsError, dispatch), +}); + +type SearchProps = DispatchProps; + +export default connect(null, mapDispatchToProps)(Search); diff --git a/frontend/src/container/Trace/Search/styles.ts b/frontend/src/container/Trace/Search/styles.ts new file mode 100644 index 0000000000..f6f342aca9 --- /dev/null +++ b/frontend/src/container/Trace/Search/styles.ts @@ -0,0 +1,17 @@ +import styled from 'styled-components'; +import { Input } from 'antd'; + +const { Search } = Input; + +export const Container = styled.div` + display: flex; + position: relative; +`; + +export const SearchComponent = styled(Search)` + .ant-btn-primary { + svg { + transform: scale(1.5); + } + } +`; diff --git a/frontend/src/container/Trace/Search/util.ts b/frontend/src/container/Trace/Search/util.ts new file mode 100644 index 0000000000..d74a133229 --- /dev/null +++ b/frontend/src/container/Trace/Search/util.ts @@ -0,0 +1,84 @@ +import { TraceReducer } from 'types/reducer/trace'; + +type Tags = TraceReducer['selectedTags']; + +interface PayloadProps { + isError: boolean; + payload: T; +} + +export const parseQueryToTags = (query: string): PayloadProps => { + let isError = false; + + const noOfTags = query.split(' AND '); + + const tags: Tags = noOfTags.map((filter) => { + const isInPresent = filter.includes('IN'); + const isNotInPresent = filter.includes('NOT_IN'); + + if (!isNotInPresent || !isInPresent) { + isError = true; + } + + const splitBy = isNotInPresent ? 'NOT_IN' : isInPresent ? 'IN' : ''; + + if (splitBy.length === 0) { + isError = true; + } + + const filteredtags = filter.split(splitBy).map((e) => e.trim()); + + if (filteredtags.length !== 2) { + isError = true; + } + + const filterForTags = filteredtags[1]; + + if (!filterForTags) { + isError = true; + } + + const removingFirstAndLastBrackets = `${filterForTags?.slice(1, -1)}`; + + const noofFilters = removingFirstAndLastBrackets.split(','); + + noofFilters.forEach((e) => { + const firstChar = e.charAt(0); + const lastChar = e.charAt(e.length - 1); + + if (!(firstChar === '"' && lastChar === '"')) { + isError = true; + } + }); + + return { + Key: [filteredtags[0]], + Values: noofFilters, + Operator: splitBy as FlatArray['Operator'], + }; + }); + + return { + isError, + payload: tags, + }; +}; + +export const parseTagsToQuery = (tags: Tags): PayloadProps => { + let isError = false; + + const payload = tags + .map(({ Values, Key, Operator }) => { + if (Key[0] === undefined) { + isError = true; + } + + return `${Key[0]} ${Operator} (${Values.map((e) => `"${e}"`).join(',')})`; + }) + .join(' AND '); + + return { + isError, + payload, + }; +}; diff --git a/frontend/src/container/Trace/TraceGraphFilter/config.ts b/frontend/src/container/Trace/TraceGraphFilter/config.ts new file mode 100644 index 0000000000..5d7a942b35 --- /dev/null +++ b/frontend/src/container/Trace/TraceGraphFilter/config.ts @@ -0,0 +1,91 @@ +interface Dropdown { + key: string; + displayValue: string; +} + +export const groupBy: Dropdown[] = [ + { + key: '', + displayValue: 'None', + }, + { + key: 'serviceName', + displayValue: 'Service Name', + }, + + { + displayValue: 'Operation', + key: 'operation', + }, + { + displayValue: 'HTTP url', + key: 'httpUrl', + }, + { + displayValue: 'HTTP method', + key: 'httpMethod', + }, + { + displayValue: 'HTTP host', + key: 'httpHost', + }, + { + displayValue: 'HTTP route', + key: 'httpRoute', + }, + { + displayValue: 'HTTP status code', + key: 'httpCode', + }, + { + displayValue: 'Database name', + key: 'dbName', + }, + { + displayValue: 'Database operation', + key: 'dbSystem', + }, + { + displayValue: 'Messaging System', + key: 'msgSystem', + }, + { + displayValue: 'Messaging Operation', + key: 'msgOperation', + }, + { + displayValue: 'Component', + key: 'component', + }, +]; + +export const functions: Dropdown[] = [ + { displayValue: 'Count', key: 'count' }, + { displayValue: 'Rate per sec', key: 'ratePerSec' }, + { displayValue: 'Sum(duration in ns)', key: 'sum' }, + { displayValue: 'Avg(duration in ns)', key: 'avg' }, + { + displayValue: 'Max(duration in ns)', + key: 'max', + }, + { + displayValue: 'Min(duration in ns)', + key: 'min', + }, + { + displayValue: '50th percentile(duration in ns)', + key: 'p50', + }, + { + displayValue: '90th percentile(duration in ns', + key: 'p90', + }, + { + displayValue: '95th percentile(duration in ns)', + key: 'p95', + }, + { + displayValue: '99th percentile(duration in ns)', + key: 'p99', + }, +]; diff --git a/frontend/src/container/Trace/TraceGraphFilter/index.tsx b/frontend/src/container/Trace/TraceGraphFilter/index.tsx new file mode 100644 index 0000000000..bd06764aad --- /dev/null +++ b/frontend/src/container/Trace/TraceGraphFilter/index.tsx @@ -0,0 +1,85 @@ +import React from 'react'; +import { Space, SelectProps } from 'antd'; +import { functions, groupBy } from './config'; +import { useDispatch, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import AppActions from 'types/actions'; +import { + UPDATE_SELECTED_FUNCTION, + UPDATE_SELECTED_GROUP_BY, +} from 'types/actions/trace'; +import { Dispatch } from 'redux'; +import { SelectComponent } from './styles'; +import { SelectValue } from 'antd/lib/select'; + +const { Option } = SelectComponent; + +const TraceGraphFilter = () => { + const { selectedFunction, selectedGroupBy } = useSelector< + AppState, + TraceReducer + >((state) => state.traces); + const dispatch = useDispatch>(); + + const onClickSelectedFunctionHandler: SelectProps['onChange'] = ( + ev, + ) => { + const selected = functions.find((e) => e.key === ev); + if (selected) { + dispatch({ + type: UPDATE_SELECTED_FUNCTION, + payload: { + selectedFunction: selected.key, + }, + }); + } + }; + + const onClickSelectedGroupByHandler: SelectProps['onChange'] = ( + ev, + ) => { + const selected = groupBy.find((e) => e.key === ev); + if (selected) { + dispatch({ + type: UPDATE_SELECTED_GROUP_BY, + payload: { + selectedGroupBy: selected.key, + }, + }); + } + }; + + return ( + + + + selectedFunction === e.key)?.displayValue} + onChange={onClickSelectedFunctionHandler} + > + {functions.map((value) => ( + + ))} + + + + selectedGroupBy === e.key)?.displayValue} + onChange={onClickSelectedGroupByHandler} + > + {groupBy.map((value) => ( + + ))} + + + ); +}; + +export default TraceGraphFilter; diff --git a/frontend/src/container/Trace/TraceGraphFilter/styles.ts b/frontend/src/container/Trace/TraceGraphFilter/styles.ts new file mode 100644 index 0000000000..be712d4cba --- /dev/null +++ b/frontend/src/container/Trace/TraceGraphFilter/styles.ts @@ -0,0 +1,9 @@ +import { Select } from 'antd'; + +import styled from 'styled-components'; + +export const SelectComponent = styled(Select)` + &&& { + min-width: 10rem; + } +`; diff --git a/frontend/src/container/Trace/TraceTable/index.tsx b/frontend/src/container/Trace/TraceTable/index.tsx new file mode 100644 index 0000000000..ce6133a089 --- /dev/null +++ b/frontend/src/container/Trace/TraceTable/index.tsx @@ -0,0 +1,155 @@ +import React from 'react'; + +import Table, { ColumnsType } from 'antd/lib/table'; +import { TableProps, Tag } from 'antd'; + +import { connect, useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; +import { TraceReducer } from 'types/reducer/trace'; +import { bindActionCreators } from 'redux'; +import { ThunkDispatch } from 'redux-thunk'; +import AppActions from 'types/actions'; +import { + GetSpansAggregate, + GetSpansAggregateProps, +} from 'store/actions/trace/getInitialSpansAggregate'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import dayjs from 'dayjs'; +import duration from 'dayjs/plugin/duration'; +import history from 'lib/history'; +import ROUTES from 'constants/routes'; +dayjs.extend(duration); + +const TraceTable = ({ getSpansAggregate }: TraceProps) => { + const { + spansAggregate, + selectedFilter, + selectedTags, + filterLoading, + } = useSelector((state) => state.traces); + + const globalTime = useSelector( + (state) => state.globalTime, + ); + + const { loading, total } = spansAggregate; + + type TableType = FlatArray; + + const columns: ColumnsType = [ + { + title: 'Date', + dataIndex: 'timestamp', + key: 'timestamp', + render: (value: TableType['timestamp']) => { + const day = dayjs(value); + return
{day.format('DD/MM/YYYY HH:MM:ss A')}
; + }, + sorter: (a, b) => dayjs(a.timestamp).diff(dayjs(b.timestamp)), + }, + { + title: 'Service', + dataIndex: 'serviceName', + key: 'serviceName', + sorter: (a, b) => a.serviceName.length - b.serviceName.length, + }, + { + title: 'Operation', + dataIndex: 'operation', + key: 'operation', + }, + { + title: 'Duration', + dataIndex: 'durationNano', + key: 'durationNano', + sorter: (a, b) => a.durationNano - b.durationNano, + render: (value: TableType['durationNano']) => { + return ( +
+ {`${dayjs + .duration({ milliseconds: value / 1000000 }) + .asMilliseconds()} ms`} +
+ ); + }, + }, + { + title: 'Method', + dataIndex: 'httpMethod', + key: 'httpMethod', + render: (value: TableType['httpMethod']) => { + if (value.length === 0) { + return
-
; + } + return {value}; + }, + }, + { + title: 'Status Code', + dataIndex: 'httpCode', + key: 'httpCode', + sorter: (a, b) => a.httpCode.length - b.httpCode.length, + render: (value: TableType['httpCode']) => { + if (value.length === 0) { + return
-
; + } + return {value}; + }, + }, + ]; + + const onChangeHandler: TableProps['onChange'] = (props) => { + if (props.current && props.pageSize) { + getSpansAggregate({ + maxTime: globalTime.maxTime, + minTime: globalTime.minTime, + selectedFilter, + current: props.current, + pageSize: props.pageSize, + selectedTags, + }); + } + }; + + return ( + ({ + onClick: () => { + history.push({ + pathname: ROUTES.TRACE + '/' + record.traceID, + state: { + spanId: record.spanID, + }, + }); + }, + })} + size="middle" + rowKey={'timestamp'} + pagination={{ + current: spansAggregate.currentPage, + pageSize: spansAggregate.pageSize, + responsive: true, + position: ['bottomLeft'], + total: total, + }} + /> + ); +}; + +interface DispatchProps { + getSpansAggregate: (props: GetSpansAggregateProps) => void; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + getSpansAggregate: bindActionCreators(GetSpansAggregate, dispatch), +}); + +type TraceProps = DispatchProps; + +export default connect(null, mapDispatchToProps)(TraceTable); diff --git a/frontend/src/container/TraceCustomVisualization/TraceCustomGraph.tsx b/frontend/src/container/TraceCustomVisualization/TraceCustomGraph.tsx deleted file mode 100644 index 9f9fd79746..0000000000 --- a/frontend/src/container/TraceCustomVisualization/TraceCustomGraph.tsx +++ /dev/null @@ -1,44 +0,0 @@ -import Graph from 'components/Graph'; -import convertToNanoSecondsToSecond from 'lib/convertToNanoSecondsToSecond'; -import { colors } from 'lib/getRandomColor'; -import React, { memo } from 'react'; -import { useSelector } from 'react-redux'; -import { AppState } from 'store/reducers'; -import { TraceReducer } from 'types/reducer/trace'; - -import { CustomGraphContainer } from './styles'; - -const TraceCustomGraph = ({ - spansAggregate, -}: TraceCustomGraphProps): JSX.Element => { - const { selectedEntity } = useSelector( - (state) => state.trace, - ); - - return ( - - new Date(s.timestamp / 1000000)), - datasets: [ - { - data: spansAggregate.map((e) => - selectedEntity === 'duration' - ? parseFloat(convertToNanoSecondsToSecond(e.value)) - : e.value, - ), - borderColor: colors[0], - }, - ], - }} - /> - - ); -}; - -interface TraceCustomGraphProps { - spansAggregate: TraceReducer['spansAggregate']; -} - -export default memo(TraceCustomGraph); diff --git a/frontend/src/container/TraceCustomVisualization/config.ts b/frontend/src/container/TraceCustomVisualization/config.ts deleted file mode 100644 index 0a3d5fbf9b..0000000000 --- a/frontend/src/container/TraceCustomVisualization/config.ts +++ /dev/null @@ -1,56 +0,0 @@ -export const entity = [ - { - title: 'Calls', - key: 'calls', - dataindex: 'calls', - }, - { - title: 'Duration', - key: 'duration', - dataindex: 'duration', - }, - { - title: 'Error', - key: 'error', - dataindex: 'error', - }, - { - title: 'Status Code', - key: 'status_code', - dataindex: 'status_code', - }, -]; - -export const aggregation_options = [ - { - linked_entity: 'calls', - default_selected: { title: 'count', dataindex: 'count' }, - options_available: [ - { title: 'Count', dataindex: 'count' }, - { title: 'Rate (per sec)', dataindex: 'rate_per_sec' }, - ], - }, - { - linked_entity: 'duration', - default_selected: { title: 'p99', dataindex: 'p99' }, - // options_available: [ {title:'Avg', dataindex:'avg'}, {title:'Max', dataindex:'max'},{title:'Min', dataindex:'min'}, {title:'p50', dataindex:'p50'},{title:'p95', dataindex:'p95'}, {title:'p95', dataindex:'p95'}] - options_available: [ - { title: 'p50', dataindex: 'p50' }, - { title: 'p95', dataindex: 'p95' }, - { title: 'p99', dataindex: 'p99' }, - ], - }, - { - linked_entity: 'error', - default_selected: { title: 'count', dataindex: 'count' }, - options_available: [ - { title: 'count', dataindex: 'count' }, - { title: 'Rate (per sec)', dataindex: 'rate_per_sec' }, - ], - }, - { - linked_entity: 'status_code', - default_selected: { title: 'count', dataindex: 'count' }, - options_available: [{ title: 'count', dataindex: 'count' }], - }, -]; diff --git a/frontend/src/container/TraceCustomVisualization/index.tsx b/frontend/src/container/TraceCustomVisualization/index.tsx deleted file mode 100644 index dbc4c46ed0..0000000000 --- a/frontend/src/container/TraceCustomVisualization/index.tsx +++ /dev/null @@ -1,127 +0,0 @@ -import { Form, Select } from 'antd'; -import Spinner from 'components/Spinner'; -import React from 'react'; -import { connect, useSelector } from 'react-redux'; -import { AppState } from 'store/reducers'; -const { Option } = Select; -import { Store } from 'rc-field-form/lib/interface'; -import { bindActionCreators } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { - GetTraceVisualAggregates, - GetTraceVisualAggregatesProps, -} from 'store/actions/trace/getTraceVisualAgrregates'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -import { aggregation_options, entity } from './config'; -import { Card, CustomVisualizationsTitle, FormItem, Space } from './styles'; -import TraceCustomGraph from './TraceCustomGraph'; - -const TraceCustomVisualisation = ({ - getTraceVisualAggregates, -}: TraceCustomVisualisationProps): JSX.Element => { - const { - selectedEntity, - spansLoading, - selectedAggOption, - spansAggregate, - } = useSelector((state) => state.trace); - - const [form] = Form.useForm(); - - if (spansLoading) { - return ; - } - - const handleFormValuesChange = (changedValues: Store): void => { - const formFieldName = Object.keys(changedValues)[0]; - if (formFieldName === 'entity') { - const temp_entity = aggregation_options.filter( - (item) => item.linked_entity === changedValues[formFieldName], - )[0]; - - form.setFieldsValue({ - agg_options: temp_entity.default_selected.title, - }); - - const values = form.getFieldsValue(['agg_options', 'entity']); - - getTraceVisualAggregates({ - selectedAggOption: values.agg_options, - selectedEntity: values.entity, - }); - } - - if (formFieldName === 'agg_options') { - getTraceVisualAggregates({ - selectedAggOption: changedValues[formFieldName], - selectedEntity, - }); - } - }; - - return ( - - Custom Visualizations -
- - - - - - - - - - - - -
- ); -}; - -interface DispatchProps { - getTraceVisualAggregates: (props: GetTraceVisualAggregatesProps) => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - getTraceVisualAggregates: bindActionCreators( - GetTraceVisualAggregates, - dispatch, - ), -}); - -type TraceCustomVisualisationProps = DispatchProps; - -export default connect(null, mapDispatchToProps)(TraceCustomVisualisation); diff --git a/frontend/src/container/TraceCustomVisualization/styles.ts b/frontend/src/container/TraceCustomVisualization/styles.ts deleted file mode 100644 index 069feaad73..0000000000 --- a/frontend/src/container/TraceCustomVisualization/styles.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { - Card as CardComponent, - Form, - Space as SpaceComponent, - Typography, -} from 'antd'; -import styled from 'styled-components'; - -export const CustomGraphContainer = styled.div` - height: 30vh; -`; - -export const Card = styled(CardComponent)` - .ant-card-body { - padding-bottom: 0; - } -`; - -export const CustomVisualizationsTitle = styled(Typography)` - margin-bottom: 1rem; -`; - -export const FormItem = styled(Form.Item)` - &&& { - margin: 0; - } -`; - -export const Space = styled(SpaceComponent)` - &&& { - display: flex; - flex-wrap: wrap; - } -`; diff --git a/frontend/src/container/TraceFilter/Filter.tsx b/frontend/src/container/TraceFilter/Filter.tsx deleted file mode 100644 index ca4a2754a1..0000000000 --- a/frontend/src/container/TraceFilter/Filter.tsx +++ /dev/null @@ -1,182 +0,0 @@ -import { Tag } from 'antd'; -import { METRICS_PAGE_QUERY_PARAM } from 'constants/query'; -import React from 'react'; -import { connect, useSelector } from 'react-redux'; -import { bindActionCreators, Dispatch } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { TagItem } from 'store/actions'; -import { UpdateSelectedTags } from 'store/actions/trace'; -import { - UpdateSelectedData, - UpdateSelectedDataProps, -} from 'store/actions/trace/updateSelectedData'; -import { AppState } from 'store/reducers'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -import { Card } from './styles'; - -const Filter = ({ - updatedQueryParams, - updateSelectedData, - updateSelectedTags, -}: FilterProps): JSX.Element => { - const { - selectedService, - selectedOperation, - selectedLatency, - selectedTags, - selectedKind, - selectedEntity, - selectedAggOption, - } = useSelector((state) => state.trace); - - function handleCloseTag(value: string): void { - if (value === 'service') { - updatedQueryParams([''], [METRICS_PAGE_QUERY_PARAM.service]); - updateSelectedData({ - selectedAggOption, - selectedEntity, - selectedKind, - selectedLatency, - selectedOperation, - selectedService: '', - }); - } - if (value === 'operation') { - updatedQueryParams([''], [METRICS_PAGE_QUERY_PARAM.operation]); - updateSelectedData({ - selectedAggOption, - selectedEntity, - selectedKind, - selectedLatency, - selectedOperation: '', - selectedService, - }); - } - if (value === 'maxLatency') { - updatedQueryParams([''], [METRICS_PAGE_QUERY_PARAM.latencyMax]); - updateSelectedData({ - selectedAggOption, - selectedEntity, - selectedKind, - selectedLatency: { - min: selectedLatency.min, - max: '', - }, - selectedOperation, - selectedService, - }); - } - if (value === 'minLatency') { - updatedQueryParams([''], [METRICS_PAGE_QUERY_PARAM.latencyMin]); - updateSelectedData({ - selectedAggOption, - selectedEntity, - selectedKind, - selectedLatency: { - min: '', - max: selectedLatency.max, - }, - selectedOperation, - selectedService, - }); - } - } - - function handleCloseTagElement(item: TagItem): void { - const updatedSelectedtags = selectedTags.filter((e) => e.key !== item.key); - - updatedQueryParams( - [updatedSelectedtags], - [METRICS_PAGE_QUERY_PARAM.selectedTags], - ); - updateSelectedTags(updatedSelectedtags); - } - - return ( - - {selectedService.length !== 0 && ( - { - e.preventDefault(); - handleCloseTag('service'); - }} - > - service:{selectedService} - - )} - - {selectedOperation.length !== 0 && ( - { - e.preventDefault(); - handleCloseTag('operation'); - }} - > - operation:{selectedOperation} - - )} - - {selectedLatency?.min.length !== 0 && ( - { - e.preventDefault(); - handleCloseTag('minLatency'); - }} - > - minLatency: - {(parseInt(selectedLatency?.min || '0') / 1000000).toString()}ms - - )} - {selectedLatency?.max.length !== 0 && ( - { - e.preventDefault(); - handleCloseTag('maxLatency'); - }} - > - maxLatency: - {(parseInt(selectedLatency?.max || '0') / 1000000).toString()}ms - - )} - - {selectedTags.map((item) => ( - { - e.preventDefault(); - handleCloseTagElement(item); - }} - > - {item.key} {item.operator} {item.value} - - ))} - - ); -}; - -interface DispatchProps { - updateSelectedTags: ( - selectedTags: TraceReducer['selectedTags'], - ) => (dispatch: Dispatch) => void; - updateSelectedData: (props: UpdateSelectedDataProps) => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - updateSelectedTags: bindActionCreators(UpdateSelectedTags, dispatch), - updateSelectedData: bindActionCreators(UpdateSelectedData, dispatch), -}); - -interface FilterProps extends DispatchProps { - updatedQueryParams: (updatedValue: string[], key: string[]) => void; -} - -export default connect(null, mapDispatchToProps)(Filter); diff --git a/frontend/src/container/TraceFilter/LatencyForm.tsx b/frontend/src/container/TraceFilter/LatencyForm.tsx deleted file mode 100644 index 593e1fd877..0000000000 --- a/frontend/src/container/TraceFilter/LatencyForm.tsx +++ /dev/null @@ -1,160 +0,0 @@ -import { Col, Form, InputNumber, Modal, notification, Row } from 'antd'; -import { METRICS_PAGE_QUERY_PARAM } from 'constants/query'; -import { FormInstance, RuleObject } from 'rc-field-form/lib/interface'; -import React from 'react'; -import { connect, useSelector } from 'react-redux'; -import { bindActionCreators, Dispatch } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { UpdateSelectedLatency } from 'store/actions/trace'; -import { - UpdateSelectedData, - UpdateSelectedDataProps, -} from 'store/actions/trace/updateSelectedData'; -import { AppState } from 'store/reducers'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -const LatencyForm = ({ - onCancel, - visible, - updateSelectedLatency, - onLatencyButtonClick, - updatedQueryParams, - updateSelectedData, -}: LatencyModalFormProps): JSX.Element => { - const [form] = Form.useForm(); - const [notifications, Element] = notification.useNotification(); - const { - selectedLatency, - selectedKind, - selectedOperation, - selectedService, - selectedAggOption, - selectedEntity, - } = useSelector((state) => state.trace); - - const validateMinValue = (form: FormInstance): RuleObject => ({ - validator(_: RuleObject, value): Promise { - const { getFieldValue } = form; - const minValue = getFieldValue('min'); - const maxValue = getFieldValue('max'); - - if (value <= maxValue && value >= minValue) { - return Promise.resolve(); - } - return Promise.reject(new Error('Min value should be less than Max value')); - }, - }); - - const validateMaxValue = (form: FormInstance): RuleObject => ({ - validator(_, value): Promise { - const { getFieldValue } = form; - - const minValue = getFieldValue('min'); - const maxValue = getFieldValue('max'); - - if (value >= minValue && value <= maxValue) { - return Promise.resolve(); - } - return Promise.reject( - new Error('Max value should be greater than Min value'), - ); - }, - }); - - const onOkHandler = (): void => { - form - .validateFields() - .then((values) => { - const maxValue = (values.max * 1000000).toString(); - const minValue = (values.min * 1000000).toString(); - - onLatencyButtonClick(); - updatedQueryParams( - [maxValue, minValue], - [METRICS_PAGE_QUERY_PARAM.latencyMax, METRICS_PAGE_QUERY_PARAM.latencyMin], - ); - updateSelectedLatency({ - max: maxValue, - min: minValue, - }); - updateSelectedData({ - selectedKind, - selectedLatency: { - max: maxValue, - min: minValue, - }, - selectedOperation, - selectedService, - selectedAggOption, - selectedEntity, - }); - }) - .catch((info) => { - notifications.error({ - message: info.toString(), - }); - }); - }; - - return ( - <> - {Element} - - -
- -
- - - - - - - - - - - - - - ); -}; - -interface DispatchProps { - updateSelectedLatency: ( - selectedLatency: TraceReducer['selectedLatency'], - ) => (dispatch: Dispatch) => void; - updateSelectedData: (props: UpdateSelectedDataProps) => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - updateSelectedLatency: bindActionCreators(UpdateSelectedLatency, dispatch), - updateSelectedData: bindActionCreators(UpdateSelectedData, dispatch), -}); - -interface LatencyModalFormProps extends DispatchProps { - onCancel: () => void; - visible: boolean; - onLatencyButtonClick: () => void; - updatedQueryParams: (updatedValue: string[], value: string[]) => void; -} - -export default connect(null, mapDispatchToProps)(LatencyForm); diff --git a/frontend/src/container/TraceFilter/config.ts b/frontend/src/container/TraceFilter/config.ts deleted file mode 100644 index 8410b1cf54..0000000000 --- a/frontend/src/container/TraceFilter/config.ts +++ /dev/null @@ -1,15 +0,0 @@ -interface SpanKindList { - label: 'SERVER' | 'CLIENT'; - value: string; -} - -export const spanKindList: SpanKindList[] = [ - { - label: 'SERVER', - value: '2', - }, - { - label: 'CLIENT', - value: '3', - }, -]; diff --git a/frontend/src/container/TraceFilter/index.tsx b/frontend/src/container/TraceFilter/index.tsx deleted file mode 100644 index 3d4f7d2f43..0000000000 --- a/frontend/src/container/TraceFilter/index.tsx +++ /dev/null @@ -1,390 +0,0 @@ -import { Button, Input, notification, Typography } from 'antd'; -import { SelectValue } from 'antd/lib/select'; -import React, { useCallback, useEffect, useState } from 'react'; -import { connect, useSelector } from 'react-redux'; -import { AppState } from 'store/reducers'; -import { TagItem, TraceReducer } from 'types/reducer/trace'; - -import { spanKindList } from './config'; -import Filter from './Filter'; -import LatencyForm from './LatencyForm'; -import { AutoComplete, Form, InfoWrapper, Select } from './styles'; -const { Option } = Select; -import { METRICS_PAGE_QUERY_PARAM } from 'constants/query'; -import ROUTES from 'constants/routes'; -import createQueryParams from 'lib/createQueryParams'; -import history from 'lib/history'; -import { useLocation } from 'react-router'; -import { bindActionCreators, Dispatch } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { UpdateSelectedTags } from 'store/actions/trace'; -import { - UpdateSelectedData, - UpdateSelectedDataProps, -} from 'store/actions/trace/updateSelectedData'; -import AppActions from 'types/actions'; - -const FormItem = Form.Item; - -const TraceList = ({ - updateSelectedTags, - updateSelectedData, -}: TraceListProps): JSX.Element => { - const [ - notificationInstance, - NotificationElement, - ] = notification.useNotification(); - - const [visible, setVisible] = useState(false); - const [form] = Form.useForm(); - const [form_basefilter] = Form.useForm(); - - const { search } = useLocation(); - - const params = new URLSearchParams(search); - - const onLatencyButtonClick = useCallback(() => { - setVisible((visible) => !visible); - }, []); - - const { - operationsList, - serviceList, - tagsSuggestions, - selectedTags, - selectedService, - selectedOperation, - selectedLatency, - selectedKind, - selectedAggOption, - selectedEntity, - } = useSelector((state) => state.trace); - - const paramsInObject = (params: URLSearchParams): { [x: string]: string } => { - const updatedParamas: { [x: string]: string } = {}; - params.forEach((value, key) => { - updatedParamas[key] = value; - }); - return updatedParamas; - }; - - const updatedQueryParams = (updatedValue: string[], key: string[]): void => { - const updatedParams = paramsInObject(params); - - updatedValue.forEach((_, index) => { - updatedParams[key[index]] = updatedValue[index]; - }); - - const queryParams = createQueryParams(updatedParams); - history.push(ROUTES.TRACE + `?${queryParams}`); - }; - - const getUpdatedSelectedData = (props: UpdateSelectedDataProps): void => { - const { - selectedKind, - selectedLatency, - selectedOperation, - selectedService, - } = props; - - updateSelectedData({ - selectedKind, - selectedLatency, - selectedOperation, - selectedService, - selectedAggOption, - selectedEntity, - }); - }; - - const onTagSubmitTagHandler = (values: Item): void => { - if (values.tag_key.length === 0 || values.tag_value.length === 0) { - return; - } - - // check whether it is pre-existing in the array or not - - const isFound = selectedTags.find((tags) => { - return ( - tags.key === values.tag_key && - tags.value === values.tag_value && - tags.operator === values.operator - ); - }); - - if (!isFound) { - const preSelectedTags = [ - ...selectedTags, - { - operator: values.operator, - key: values.tag_key, - value: values.tag_value, - }, - ]; - - updatedQueryParams( - [JSON.stringify(preSelectedTags)], - [METRICS_PAGE_QUERY_PARAM.selectedTags], - ); - - updateSelectedTags(preSelectedTags); - } else { - notificationInstance.error({ - message: 'Tag Already Present', - }); - } - }; - - const onChangeTagKey = (data: string): void => { - form.setFieldsValue({ tag_key: data }); - }; - - const updateSelectedServiceHandler = (value: string): void => { - updatedQueryParams([value], [METRICS_PAGE_QUERY_PARAM.service]); - getUpdatedSelectedData({ - selectedKind, - selectedLatency, - selectedOperation, - selectedService: value, - selectedAggOption, - selectedEntity, - }); - }; - - const updateSelectedOperationHandler = (value: string): void => { - updatedQueryParams([value], [METRICS_PAGE_QUERY_PARAM.operation]); - getUpdatedSelectedData({ - selectedKind, - selectedLatency, - selectedOperation: value, - selectedService, - selectedAggOption, - selectedEntity, - }); - }; - - const updateSelectedKindHandler = (value: string): void => { - updatedQueryParams([value], [METRICS_PAGE_QUERY_PARAM.kind]); - getUpdatedSelectedData({ - selectedKind: value, - selectedLatency, - selectedOperation, - selectedService, - selectedAggOption, - selectedEntity, - }); - }; - - useEffect(() => { - if (selectedService.length !== 0) { - form_basefilter.setFieldsValue({ - service: selectedService, - }); - } else { - form_basefilter.setFieldsValue({ - service: '', - }); - } - - if (selectedOperation.length !== 0) { - form_basefilter.setFieldsValue({ - operation: selectedOperation, - }); - } else { - form_basefilter.setFieldsValue({ - operation: '', - }); - } - - if (selectedKind.length !== 0) { - form_basefilter.setFieldsValue({ - spanKind: selectedKind, - }); - } else { - form_basefilter.setFieldsValue({ - spanKind: '', - }); - } - - if (selectedLatency.max.length === 0 && selectedLatency.min.length === 0) { - form_basefilter.setFieldsValue({ - latency: 'Latency', - }); - } - - if (selectedLatency.max.length !== 0 && selectedLatency.min.length === 0) { - form_basefilter.setFieldsValue({ - latency: `Latency < Max Latency: ${ - parseInt(selectedLatency.max, 10) / 1000000 - } ms`, - }); - } - - if (selectedLatency.max.length === 0 && selectedLatency.min.length !== 0) { - form_basefilter.setFieldsValue({ - latency: `Min Latency: ${ - parseInt(selectedLatency.min, 10) / 1000000 - } ms < Latency`, - }); - } - - if (selectedLatency.max.length !== 0 && selectedLatency.min.length !== 0) { - form_basefilter.setFieldsValue({ - latency: `Min Latency: ${ - parseInt(selectedLatency.min, 10) / 1000000 - } ms < Latency < Max Latency: ${ - parseInt(selectedLatency.min, 10) / 1000000 - } ms`, - }); - } - }, [ - selectedService, - selectedOperation, - selectedKind, - selectedLatency, - form_basefilter, - ]); - - return ( - <> - {NotificationElement} - - Filter Traces -
- - - - - - - - - - - - - - - - - - {(selectedTags.length !== 0 || - selectedService.length !== 0 || - selectedOperation.length !== 0 || - selectedLatency.max.length !== 0 || - selectedLatency.min.length !== 0) && ( - - )} - - Select Service to get Tag suggestions -
- - { - return { value: s.tagKeys }; - })} - onChange={onChangeTagKey} - filterOption={(inputValue, option): boolean => - option?.value.toUpperCase().indexOf(inputValue.toUpperCase()) !== -1 - } - placeholder="Tag Key" - /> - - - - - - - - - - - - - - - { - setVisible(false); - }} - updatedQueryParams={updatedQueryParams} - visible={visible} - onLatencyButtonClick={onLatencyButtonClick} - /> - - ); -}; - -interface Item { - tag_key: string; - tag_value: string; - operator: TagItem['operator']; -} - -interface DispatchProps { - updateSelectedTags: ( - selectedTags: TraceReducer['selectedTags'], - ) => (dispatch: Dispatch) => void; - updateSelectedData: (props: UpdateSelectedDataProps) => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - updateSelectedTags: bindActionCreators(UpdateSelectedTags, dispatch), - updateSelectedData: bindActionCreators(UpdateSelectedData, dispatch), -}); - -type TraceListProps = DispatchProps; - -export default connect(null, mapDispatchToProps)(TraceList); diff --git a/frontend/src/container/TraceFilter/styles.ts b/frontend/src/container/TraceFilter/styles.ts deleted file mode 100644 index 165f48e1b6..0000000000 --- a/frontend/src/container/TraceFilter/styles.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { - AutoComplete as AutoCompleteComponent, - Card as CardComponent, - Form as FormComponent, - Select as SelectComponent, - Typography, -} from 'antd'; -import styled from 'styled-components'; - -export const InfoWrapper = styled(Typography)` - padding-top: 1rem; - font-style: italic; - font-size: 0.75rem; -`; - -export const Select = styled(SelectComponent)` - min-width: 180px; -`; - -export const AutoComplete = styled(AutoCompleteComponent)` - min-width: 180px; -`; - -export const Form = styled(FormComponent)` - margin-top: 1rem; - margin-bottom: 1rem; - gap: 0.5rem; -`; - -export const Card = styled(CardComponent)` - .ant-card-body { - padding: 0.5rem; - } -`; diff --git a/frontend/src/container/TraceList/index.tsx b/frontend/src/container/TraceList/index.tsx deleted file mode 100644 index 9f655dd964..0000000000 --- a/frontend/src/container/TraceList/index.tsx +++ /dev/null @@ -1,141 +0,0 @@ -import { Space, Table, Typography } from 'antd'; -import { ColumnsType } from 'antd/lib/table/Table'; -import ROUTES from 'constants/routes'; -import convertDateToAmAndPm from 'lib/convertDateToAmAndPm'; -import getFormattedDate from 'lib/getFormatedDate'; -import history from 'lib/history'; -import React from 'react'; -import { useSelector } from 'react-redux'; -import { AppState } from 'store/reducers'; -import { pushDStree } from 'types/api/trace/getSpans'; -import { TraceReducer } from 'types/reducer/trace'; -import { isOnboardingSkipped } from 'utils/app'; - -import { TitleContainer } from './styles'; - -const TraceDetails = (): JSX.Element => { - const { spanList } = useSelector( - (state) => state.trace, - ); - - const spans: TableDataSourceItem[] = spanList[0]?.events?.map( - (item: (number | string | string[] | pushDStree[])[], index) => { - if ( - typeof item[0] === 'number' && - typeof item[4] === 'string' && - typeof item[6] === 'string' && - typeof item[1] === 'string' && - typeof item[2] === 'string' && - typeof item[3] === 'string' - ) { - return { - startTime: item[0], - operationName: item[4], - duration: parseInt(item[6]), - spanid: item[1], - traceid: item[2], - key: index.toString(), - service: item[3], - }; - } - return { - duration: 0, - key: '', - operationName: '', - service: '', - spanid: '', - startTime: 0, - traceid: '', - }; - }, - ); - - const columns: ColumnsType = [ - { - title: 'Start Time', - dataIndex: 'startTime', - key: 'startTime', - sorter: (a, b): number => a.startTime - b.startTime, - sortDirections: ['descend', 'ascend'], - render: (value: number): string => { - const date = new Date(value); - const result = `${getFormattedDate(date)} ${convertDateToAmAndPm(date)}`; - return result; - }, - }, - { - title: 'Service', - dataIndex: 'service', - key: 'service', - }, - { - title: 'Operation', - dataIndex: 'operationName', - key: 'operationName', - }, - { - title: 'Duration (in ms)', - dataIndex: 'duration', - key: 'duration', - sorter: (a, b): number => a.duration - b.duration, - sortDirections: ['descend', 'ascend'], - render: (value: number): string => (value / 1000000).toFixed(2), - }, - ]; - - if (isOnboardingSkipped() && spans?.length === 0) { - return ( - - No spans found. Please add instrumentation (follow this - - guide - - ) - - ); - } - - if (spans?.length === 0) { - return No spans found for given filter!; - } - - return ( - <> - List of filtered spans - -
=> ({ - onClick: (): void => { - history.push({ - pathname: ROUTES.TRACE + '/' + record.traceid, - state: { - spanId: record.spanid, - }, - }); - }, - })} - /> - - ); -}; - -export interface TableDataSourceItem { - key: string; - spanid: string; - traceid: string; - operationName: string; - startTime: number; - duration: number; - service: string; -} - -export default TraceDetails; diff --git a/frontend/src/container/TraceList/styles.ts b/frontend/src/container/TraceList/styles.ts deleted file mode 100644 index dfde5eaad6..0000000000 --- a/frontend/src/container/TraceList/styles.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { Typography } from 'antd'; -import styled from 'styled-components'; - -export const TitleContainer = styled(Typography)` - margin-top: 1rem; - margin-bottom: 1rem; -`; diff --git a/frontend/src/hooks/useClickOutside.ts b/frontend/src/hooks/useClickOutside.ts new file mode 100644 index 0000000000..d3d7ce83c7 --- /dev/null +++ b/frontend/src/hooks/useClickOutside.ts @@ -0,0 +1,26 @@ +import React, { useEffect } from 'react'; + +const useClickOutside = ( + ref: React.RefObject, + callback: (e: HTMLElement) => void | null, +) => { + const listener = (e: Event) => { + const node = e?.target as HTMLElement; + + if (ref.current && !ref.current.contains(node)) { + if (callback) { + callback(node); + } + } + }; + + useEffect(() => { + document.addEventListener('click', listener); + + return () => { + document.removeEventListener('click', listener); + }; + }, [ref, callback]); +}; + +export default useClickOutside; diff --git a/frontend/src/hooks/useDebouncedFunction.ts b/frontend/src/hooks/useDebouncedFunction.ts new file mode 100644 index 0000000000..4a26fbc46f --- /dev/null +++ b/frontend/src/hooks/useDebouncedFunction.ts @@ -0,0 +1,34 @@ +import { useCallback } from 'react'; +import debounce from 'lodash-es/debounce'; + +export interface DebouncedFunc any> { + (...args: Parameters): ReturnType | undefined; + + cancel(): void; + + flush(): ReturnType | undefined; +} + +export type DebounceOptions = { + leading?: boolean; + maxWait?: number; + trailing?: boolean; +}; + +const defaultOptions: DebounceOptions = { + leading: false, + trailing: true, +}; + +const useDebouncedFn = any>( + fn: T, + wait: number = 100, + options: DebounceOptions = defaultOptions, + dependencies?: ReadonlyArray, +): DebouncedFunc => { + const debounced = debounce(fn, wait, options); + + return useCallback(debounced, dependencies || []); +}; + +export default useDebouncedFn; diff --git a/frontend/src/lib/query/convertObjectIntoParams.ts b/frontend/src/lib/query/convertObjectIntoParams.ts new file mode 100644 index 0000000000..fa6dc3f6f5 --- /dev/null +++ b/frontend/src/lib/query/convertObjectIntoParams.ts @@ -0,0 +1,15 @@ +const convertObjectIntoParams = ( + props: Record, + stringify = false, +) => { + return Object.keys(props) + .map( + (e) => + `${e}=${ + stringify ? encodeURIComponent(JSON.stringify(props[e])) : props[e] + }`, + ) + .join('&'); +}; + +export default convertObjectIntoParams; diff --git a/frontend/src/pages/Trace/index.tsx b/frontend/src/pages/Trace/index.tsx new file mode 100644 index 0000000000..5fd2894365 --- /dev/null +++ b/frontend/src/pages/Trace/index.tsx @@ -0,0 +1,157 @@ +import { Card } from 'antd'; +import ROUTES from 'constants/routes'; +import Filters from 'container/Trace/Filters'; +import TraceGraph from 'container/Trace/Graph'; +import Search from 'container/Trace/Search'; +import TraceGraphFilter from 'container/Trace/TraceGraphFilter'; +import TraceTable from 'container/Trace/TraceTable'; +import history from 'lib/history'; +import React, { useCallback, useEffect, useState } from 'react'; +import { connect, useDispatch, useSelector } from 'react-redux'; +import { bindActionCreators, Dispatch } from 'redux'; +import { ThunkDispatch } from 'redux-thunk'; +import { GetInitialTraceFilter } from 'store/actions/trace/getInitialFilter'; +import { + GetSpansAggregate, + GetSpansAggregateProps, +} from 'store/actions/trace/getInitialSpansAggregate'; +import { GetSpans, GetSpansProps } from 'store/actions/trace/getSpans'; +import { AppState } from 'store/reducers'; +import AppActions from 'types/actions'; +import { RESET_TRACE_FILTER } from 'types/actions/trace'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { TraceReducer } from 'types/reducer/trace'; + +import { + Container, + LeftContainer, + RightContainer, + ClearAllFilter, +} from './styles'; + +const Trace = ({ + getSpansAggregate, + getSpans, + getInitialFilter, +}: Props): JSX.Element => { + const { maxTime, minTime } = useSelector( + (state) => state.globalTime, + ); + + const dispatch = useDispatch>(); + + const [isChanged, setIsChanged] = useState(true); + + const { + selectedFilter, + spansAggregate, + selectedTags, + selectedFunction, + selectedGroupBy, + isFilterExclude, + } = useSelector((state) => state.traces); + + useEffect(() => { + getInitialFilter(minTime, maxTime); + }, [maxTime, minTime, getInitialFilter, isChanged]); + + useEffect(() => { + getSpansAggregate({ + maxTime: maxTime, + minTime: minTime, + selectedFilter, + current: spansAggregate.currentPage, + pageSize: spansAggregate.pageSize, + selectedTags, + }); + }, [selectedTags, selectedFilter, maxTime, minTime]); + + useEffect(() => { + getSpans({ + end: maxTime, + function: selectedFunction, + groupBy: selectedGroupBy, + selectedFilter, + selectedTags, + start: minTime, + step: 60, + isFilterExclude, + }); + }, [ + selectedFunction, + selectedGroupBy, + selectedFilter, + selectedTags, + maxTime, + minTime, + ]); + + useEffect(() => { + return () => { + dispatch({ + type: RESET_TRACE_FILTER, + }); + }; + }, []); + + const onClickHandler = useCallback((e) => { + e.preventDefault(); + e.stopPropagation(); + + history.replace(ROUTES.TRACE); + + dispatch({ + type: RESET_TRACE_FILTER, + }); + + setIsChanged((state) => !state); + }, []); + + return ( + <> + + +
+ + Clear all filters + + + + +
+ + + + + + + + + + + +
+ + ); +}; + +interface DispatchProps { + getSpansAggregate: (props: GetSpansAggregateProps) => void; + getSpans: (props: GetSpansProps) => void; + getInitialFilter: ( + minTime: GlobalReducer['minTime'], + maxTime: GlobalReducer['maxTime'], + ) => void; +} + +const mapDispatchToProps = ( + dispatch: ThunkDispatch, +): DispatchProps => ({ + getInitialFilter: bindActionCreators(GetInitialTraceFilter, dispatch), + getSpansAggregate: bindActionCreators(GetSpansAggregate, dispatch), + getSpans: bindActionCreators(GetSpans, dispatch), +}); + +type Props = DispatchProps; + +export default connect(null, mapDispatchToProps)(Trace); diff --git a/frontend/src/pages/Trace/styles.ts b/frontend/src/pages/Trace/styles.ts new file mode 100644 index 0000000000..f124268258 --- /dev/null +++ b/frontend/src/pages/Trace/styles.ts @@ -0,0 +1,37 @@ +import styled from 'styled-components'; +import { Button, Card } from 'antd'; + +export const Container = styled.div` + display: flex; + flex: 1; + min-height: 80vh; + + margin-top: 1rem; +`; + +export const LeftContainer = styled(Card)` + flex: 0.5; + width: 95%; + padding-right: 0.5rem; + + .ant-card-body { + padding: 0; + } +`; + +export const RightContainer = styled(Card)` + &&& { + flex: 2; + } + + .ant-card-body { + padding: 0.5rem; + } +`; + +export const ClearAllFilter = styled(Button)` + &&& { + width: 95%; + margin-bottom: 0.5rem; + } +`; diff --git a/frontend/src/pages/TraceDetails/index.tsx b/frontend/src/pages/TraceDetails/index.tsx deleted file mode 100644 index 4f58d3c554..0000000000 --- a/frontend/src/pages/TraceDetails/index.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { Typography } from 'antd'; -import Spinner from 'components/Spinner'; -import TraceCustomVisualisation from 'container/TraceCustomVisualization'; -import TraceFilter from 'container/TraceFilter'; -import TraceList from 'container/TraceList'; -import React, { useEffect } from 'react'; -import { connect, useSelector } from 'react-redux'; -import { bindActionCreators } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { - GetInitialTraceData, - GetInitialTraceDataProps, - ResetRaceData, -} from 'store/actions/trace'; -import { AppState } from 'store/reducers'; -import AppActions from 'types/actions'; -import { GlobalReducer } from 'types/reducer/globalTime'; -import { TraceReducer } from 'types/reducer/trace'; - -const TraceDetail = ({ - getInitialTraceData, - resetTraceData, -}: TraceDetailProps): JSX.Element => { - const { loading, selectedTime } = useSelector( - (state) => state.globalTime, - ); - - const { loading: TraceLoading, error, errorMessage } = useSelector< - AppState, - TraceReducer - >((state) => state.trace); - - useEffect(() => { - if (!loading) { - getInitialTraceData({ - selectedTime, - }); - } - - return (): void => { - resetTraceData(); - }; - }, [getInitialTraceData, loading, selectedTime, resetTraceData]); - - if (error) { - return {errorMessage}; - } - - if (loading || TraceLoading) { - return ; - } - - return ( - <> - - - - - ); -}; - -interface DispatchProps { - getInitialTraceData: (props: GetInitialTraceDataProps) => void; - resetTraceData: () => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - getInitialTraceData: bindActionCreators(GetInitialTraceData, dispatch), - resetTraceData: bindActionCreators(ResetRaceData, dispatch), -}); - -type TraceDetailProps = DispatchProps; - -export default connect(null, mapDispatchToProps)(TraceDetail); diff --git a/frontend/src/store/actions/trace/getInitialData.ts b/frontend/src/store/actions/trace/getInitialData.ts deleted file mode 100644 index 13851614a8..0000000000 --- a/frontend/src/store/actions/trace/getInitialData.ts +++ /dev/null @@ -1,201 +0,0 @@ -import getServiceList from 'api/trace/getServiceList'; -import getServiceOperation from 'api/trace/getServiceOperation'; -import getSpan from 'api/trace/getSpan'; -import getSpansAggregate from 'api/trace/getSpanAggregate'; -import getTags from 'api/trace/getTags'; -import { AxiosError } from 'axios'; -import { METRICS_PAGE_QUERY_PARAM } from 'constants/query'; -import history from 'lib/history'; -import { Dispatch } from 'redux'; -import store from 'store'; -import AppActions from 'types/actions'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps as ServiceOperationPayloadProps } from 'types/api/trace/getServiceOperation'; -import { PayloadProps as TagPayloadProps } from 'types/api/trace/getTags'; -import { GlobalReducer } from 'types/reducer/globalTime'; -import { TraceReducer } from 'types/reducer/trace'; - -export const GetInitialTraceData = ({ - selectedTime, -}: GetInitialTraceDataProps): ((dispatch: Dispatch) => void) => { - return async (dispatch: Dispatch): Promise => { - try { - const { globalTime, trace } = store.getState(); - const { minTime, maxTime, selectedTime: globalSelectedTime } = globalTime; - const { selectedAggOption, selectedEntity } = trace; - - // keeping the redux as source of truth - if (selectedTime !== globalSelectedTime) { - return; - } - - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: true, - }, - }); - - const urlParams = new URLSearchParams(history.location.search.split('?')[1]); - const operationName = urlParams.get(METRICS_PAGE_QUERY_PARAM.operation); - const serviceName = urlParams.get(METRICS_PAGE_QUERY_PARAM.service); - const errorTag = urlParams.get(METRICS_PAGE_QUERY_PARAM.error); - const kindTag = urlParams.get(METRICS_PAGE_QUERY_PARAM.kind); - const latencyMin = urlParams.get(METRICS_PAGE_QUERY_PARAM.latencyMin); - const latencyMax = urlParams.get(METRICS_PAGE_QUERY_PARAM.latencyMax); - const selectedTags = urlParams.get(METRICS_PAGE_QUERY_PARAM.selectedTags); - const aggregationOption = urlParams.get( - METRICS_PAGE_QUERY_PARAM.aggregationOption, - ); - const selectedEntityOption = urlParams.get(METRICS_PAGE_QUERY_PARAM.entity); - - const isCustomSelected = selectedTime === 'custom'; - - const end = isCustomSelected - ? globalTime.maxTime + 15 * 60 * 1000000000 - : maxTime; - - const start = isCustomSelected - ? globalTime.minTime - 15 * 60 * 1000000000 - : minTime; - - const [ - serviceListResponse, - spanResponse, - spanAggregateResponse, - ] = await Promise.all([ - getServiceList(), - getSpan({ - start, - end, - kind: kindTag || '', - limit: '100', - lookback: '2d', - maxDuration: latencyMax || '', - minDuration: latencyMin || '', - operation: operationName || '', - service: serviceName || '', - tags: selectedTags || '[]', - }), - getSpansAggregate({ - aggregation_option: aggregationOption || selectedAggOption, - dimension: selectedEntityOption || selectedEntity, - end, - start, - kind: kindTag || '', - maxDuration: latencyMax || '', - minDuration: latencyMin || '', - operation: operationName || '', - service: serviceName || '', - step: '60', - tags: selectedTags || '[]', - }), - ]); - - let tagResponse: - | SuccessResponse - | ErrorResponse - | undefined; - - let serviceOperationResponse: - | SuccessResponse - | ErrorResponse - | undefined; - - if (serviceName !== null && serviceName.length !== 0) { - [tagResponse, serviceOperationResponse] = await Promise.all([ - getTags({ - service: serviceName, - }), - getServiceOperation({ - service: serviceName, - }), - ]); - } - - const getSelectedTags = (): TraceReducer['selectedTags'] => { - const selectedTag = JSON.parse(selectedTags || '[]'); - - if (typeof selectedTag !== 'object' && Array.isArray(selectedTag)) { - return []; - } - - if (errorTag) { - return [ - ...selectedTag, - { - key: METRICS_PAGE_QUERY_PARAM.error, - operator: 'equals', - value: errorTag, - }, - ]; - } - - return [...selectedTag]; - }; - - const getCondition = (): boolean => { - const basicCondition = - serviceListResponse.statusCode === 200 && - spanResponse.statusCode === 200 && - (spanAggregateResponse.statusCode === 200 || - spanAggregateResponse.statusCode === 400); - - if (serviceName === null || serviceName.length === 0) { - return basicCondition; - } - - return ( - basicCondition && - tagResponse?.statusCode === 200 && - serviceOperationResponse?.statusCode === 200 - ); - }; - - const condition = getCondition(); - - if (condition) { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_SUCCESS', - payload: { - serviceList: serviceListResponse.payload || [], - operationList: serviceOperationResponse?.payload || [], - tagsSuggestions: tagResponse?.payload || [], - spansList: spanResponse?.payload || [], - selectedService: serviceName || '', - selectedOperation: operationName || '', - selectedTags: getSelectedTags(), - selectedKind: kindTag || '', - selectedLatency: { - max: latencyMax || '', - min: latencyMin || '', - }, - spansAggregate: spanAggregateResponse.payload || [], - }, - }); - - dispatch({ - type: 'GET_TRACE_LOADING_END', - }); - } else { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: serviceListResponse?.error || 'Something went wrong', - }, - }); - } - } catch (error) { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: (error as AxiosError).toString() || 'Something went wrong', - }, - }); - } - }; -}; - -export interface GetInitialTraceDataProps { - selectedTime: GlobalReducer['selectedTime']; -} diff --git a/frontend/src/store/actions/trace/getInitialFilter.ts b/frontend/src/store/actions/trace/getInitialFilter.ts new file mode 100644 index 0000000000..99e3fee6c5 --- /dev/null +++ b/frontend/src/store/actions/trace/getInitialFilter.ts @@ -0,0 +1,173 @@ +import { Dispatch, Store } from 'redux'; +import { AppState } from 'store/reducers'; +import AppActions from 'types/actions'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import getFiltersApi from 'api/trace/getFilters'; +import { + parseSelectedFilter, + parseFilterToFetchData, + parseQueryIntoCurrent, + parseQueryIntoSelectedTags, + isTraceFilterEnum, + parseQueryIntoFilter, + parseIsSkippedSelection, + parseFilterExclude, +} from './util'; +import { + UPDATE_ALL_FILTERS, + UPDATE_TRACE_FILTER_LOADING, +} from 'types/actions/trace'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { notification } from 'antd'; +import xor from 'lodash-es/xor'; + +export const GetInitialTraceFilter = ( + minTime: GlobalReducer['minTime'], + maxTime: GlobalReducer['maxTime'], +): (( + dispatch: Dispatch, + getState: Store['getState'], +) => void) => { + return async (dispatch, getState): Promise => { + try { + const query = location.search; + + const { traces, globalTime } = getState(); + + if (globalTime.maxTime !== maxTime && globalTime.minTime !== minTime) { + return; + } + + const getSelectedFilter = parseSelectedFilter( + query, + traces.selectedFilter, + true, + ); + + const getFilterToFetchData = parseFilterToFetchData( + query, + traces.filterToFetchData, + ); + + const getUserSelected = parseSelectedFilter( + query, + traces.userSelectedFilter, + ); + + const getIsFilterExcluded = parseFilterExclude( + query, + traces.isFilterExclude, + ); + + const parsedQueryCurrent = parseQueryIntoCurrent( + query, + traces.spansAggregate.currentPage, + ); + + const isSelectionSkipped = parseIsSkippedSelection(query); + + const parsedSelectedTags = parseQueryIntoSelectedTags( + query, + traces.selectedTags, + ); + + const parsedFilter = parseQueryIntoFilter(query, traces.filter); + + // now filter are not matching we need to fetch the data and make in sync + dispatch({ + type: UPDATE_TRACE_FILTER_LOADING, + payload: { + filterLoading: true, + }, + }); + + const response = await getFiltersApi({ + end: String(maxTime), + getFilters: getFilterToFetchData.currentValue, + start: String(minTime), + other: Object.fromEntries(getSelectedFilter.currentValue), + isFilterExclude: getIsFilterExcluded.currentValue, + }); + + let preSelectedFilter: Map = new Map( + getSelectedFilter.currentValue, + ); + + if (response.payload && !isSelectionSkipped.currentValue) { + const diff = + query.length === 0 + ? traces.filterToFetchData + : xor(traces.filterToFetchData, getFilterToFetchData.currentValue); + + Object.keys(response.payload).map((key) => { + const value = response.payload[key]; + Object.keys(value) + // remove maxDuration and minDuration filter from initial selection logic + .filter((e) => !['maxDuration', 'minDuration'].includes(e)) + .map((preKey) => { + if (isTraceFilterEnum(key) && diff.find((v) => v === key)) { + // const preValue = preSelectedFilter?.get(key) || []; + const preValue = getUserSelected.currentValue?.get(key) || []; + // preSelectedFilter?.set(key, [...new Set([...preValue, preKey])]); + getUserSelected.currentValue.set(key, [ + ...new Set([...preValue, preKey]), + ]); + } + }); + }); + } + + if (response.statusCode === 200) { + const preResponseSelected: TraceReducer['filterResponseSelected'] = new Set(); + + const initialFilter = new Map>( + parsedFilter.currentValue, + ); + + Object.keys(response.payload).forEach((key) => { + const value = response.payload[key]; + if (isTraceFilterEnum(key)) { + Object.keys(value).forEach((e) => preResponseSelected.add(e)); + + initialFilter.set(key, { + ...initialFilter.get(key), + ...value, + }); + } + }); + + dispatch({ + type: UPDATE_ALL_FILTERS, + payload: { + filter: initialFilter, + selectedFilter: preSelectedFilter, + filterToFetchData: getFilterToFetchData.currentValue, + current: parsedQueryCurrent.currentValue, + selectedTags: parsedSelectedTags.currentValue, + userSelected: getUserSelected.currentValue, + isFilterExclude: getIsFilterExcluded.currentValue, + }, + }); + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + } + + dispatch({ + type: UPDATE_TRACE_FILTER_LOADING, + payload: { + filterLoading: false, + }, + }); + } catch (error) { + console.log(error); + dispatch({ + type: UPDATE_TRACE_FILTER_LOADING, + payload: { + filterLoading: false, + }, + }); + } + }; +}; diff --git a/frontend/src/store/actions/trace/getInitialSpansAggregate.ts b/frontend/src/store/actions/trace/getInitialSpansAggregate.ts new file mode 100644 index 0000000000..0ae6362eea --- /dev/null +++ b/frontend/src/store/actions/trace/getInitialSpansAggregate.ts @@ -0,0 +1,115 @@ +import { Dispatch, Store } from 'redux'; +import { AppState } from 'store/reducers'; +import AppActions from 'types/actions'; +import { UPDATE_SPANS_AGGREEGATE } from 'types/actions/trace'; +import getSpansAggregate from 'api/trace/getSpansAggregate'; +import { GlobalReducer } from 'types/reducer/globalTime'; +import { TraceReducer } from 'types/reducer/trace'; +import { notification } from 'antd'; + +export const GetSpansAggregate = ( + props: GetSpansAggregateProps, +): (( + dispatch: Dispatch, + getState: Store['getState'], +) => void) => { + return async (dispatch, getState): Promise => { + const { traces, globalTime } = getState(); + const { spansAggregate } = traces; + + if ( + globalTime.maxTime !== props.maxTime && + globalTime.minTime !== props.minTime + ) { + return; + } + + if (traces.filterLoading) { + return; + } + + try { + // triggering loading + dispatch({ + type: UPDATE_SPANS_AGGREEGATE, + payload: { + spansAggregate: { + currentPage: props.current, + loading: true, + data: spansAggregate.data, + error: false, + total: spansAggregate.total, + pageSize: props.pageSize, + }, + }, + }); + + const response = await getSpansAggregate({ + end: props.maxTime, + start: props.minTime, + selectedFilter: props.selectedFilter, + limit: props.pageSize, + offset: props.current * props.pageSize - props.pageSize, + selectedTags: props.selectedTags, + isFilterExclude: traces.isFilterExclude, + }); + + if (response.statusCode === 200) { + dispatch({ + type: UPDATE_SPANS_AGGREEGATE, + payload: { + spansAggregate: { + currentPage: props.current, + loading: false, + data: response.payload.spans, + error: false, + total: response.payload.totalSpans, + pageSize: props.pageSize, + }, + }, + }); + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + + dispatch({ + type: UPDATE_SPANS_AGGREEGATE, + payload: { + spansAggregate: { + currentPage: props.current, + loading: false, + data: spansAggregate.data, + error: true, + total: spansAggregate.total, + pageSize: props.pageSize, + }, + }, + }); + } + } catch (error) { + dispatch({ + type: UPDATE_SPANS_AGGREEGATE, + payload: { + spansAggregate: { + currentPage: props.current, + loading: false, + data: spansAggregate.data, + error: true, + total: spansAggregate.total, + pageSize: props.pageSize, + }, + }, + }); + } + }; +}; + +export interface GetSpansAggregateProps { + maxTime: GlobalReducer['maxTime']; + minTime: GlobalReducer['minTime']; + selectedFilter: TraceReducer['selectedFilter']; + current: TraceReducer['spansAggregate']['currentPage']; + pageSize: TraceReducer['spansAggregate']['pageSize']; + selectedTags: TraceReducer['selectedTags']; +} diff --git a/frontend/src/store/actions/trace/getSpans.ts b/frontend/src/store/actions/trace/getSpans.ts new file mode 100644 index 0000000000..6cee9a95c8 --- /dev/null +++ b/frontend/src/store/actions/trace/getSpans.ts @@ -0,0 +1,96 @@ +import { Dispatch, Store } from 'redux'; +import { AppState } from 'store/reducers'; +import AppActions from 'types/actions'; +import { + UPDATE_TRACE_GRAPH_ERROR, + UPDATE_TRACE_GRAPH_LOADING, + UPDATE_TRACE_GRAPH_SUCCESS, +} from 'types/actions/trace'; +import getSpans from 'api/trace/getSpans'; +import { Props } from 'types/api/trace/getSpans'; +import { notification } from 'antd'; + +export const GetSpans = ( + props: GetSpansProps, +): (( + dispatch: Dispatch, + getState: Store['getState'], +) => void) => { + return async (dispatch, getState): Promise => { + try { + const { traces, globalTime } = getState(); + const { spansGraph } = traces; + + if (globalTime.maxTime !== props.end && globalTime.minTime !== props.start) { + return; + } + + const { selectedTime } = globalTime; + + if (traces.filterLoading) { + return; + } + + // @TODO refactor this logic when share url functionlity is updated + const isCustomSelected = selectedTime === 'custom'; + + const end = isCustomSelected + ? globalTime.maxTime + 15 * 60 * 1000000000 + : props.end; + + const start = isCustomSelected + ? globalTime.minTime - 15 * 60 * 1000000000 + : props.start; + + if (!spansGraph.loading) { + dispatch({ + type: UPDATE_TRACE_GRAPH_LOADING, + payload: { + loading: true, + }, + }); + } + + const response = await getSpans({ + end: end, + function: props.function, + groupBy: props.groupBy, + selectedFilter: props.selectedFilter, + selectedTags: props.selectedTags, + start: start, + step: props.step, + isFilterExclude: props.isFilterExclude, + }); + + if (response.statusCode === 200) { + dispatch({ + type: UPDATE_TRACE_GRAPH_SUCCESS, + payload: { + data: response.payload, + }, + }); + } else { + notification.error({ + message: response.error || 'Something went wrong', + }); + dispatch({ + type: UPDATE_TRACE_GRAPH_ERROR, + payload: { + error: true, + errorMessage: response.error || 'Something went wrong', + }, + }); + } + } catch (error) { + dispatch({ + type: UPDATE_TRACE_GRAPH_ERROR, + payload: { + error: true, + errorMessage: (error as Error)?.toString() || 'Something went wrong', + }, + }); + } + }; +}; + +export type GetSpansProps = Props; diff --git a/frontend/src/store/actions/trace/getTraceVisualAgrregates.ts b/frontend/src/store/actions/trace/getTraceVisualAgrregates.ts deleted file mode 100644 index fa12fc4e47..0000000000 --- a/frontend/src/store/actions/trace/getTraceVisualAgrregates.ts +++ /dev/null @@ -1,92 +0,0 @@ -import getSpansAggregate from 'api/trace/getSpanAggregate'; -import { AxiosError } from 'axios'; -import { Dispatch } from 'redux'; -import store from 'store'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const GetTraceVisualAggregates = ({ - selectedEntity, - selectedAggOption, -}: GetTraceVisualAggregatesProps): (( - dispatch: Dispatch, -) => void) => { - return async (dispatch: Dispatch): Promise => { - try { - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: true, - }, - }); - - const { trace, globalTime } = store.getState(); - - const { - selectedKind, - selectedLatency, - selectedOperation, - selectedService, - selectedTags, - } = trace; - - const { selectedTime, maxTime, minTime } = globalTime; - - const isCustomSelected = selectedTime === 'custom'; - - const end = isCustomSelected - ? globalTime.maxTime + 15 * 60 * 1000000000 - : maxTime; - - const start = isCustomSelected - ? globalTime.minTime - 15 * 60 * 1000000000 - : minTime; - - const [spanAggregateResponse] = await Promise.all([ - getSpansAggregate({ - aggregation_option: selectedAggOption, - dimension: selectedEntity, - end, - start, - kind: selectedKind || '', - maxDuration: selectedLatency.max || '', - minDuration: selectedLatency.min || '', - operation: selectedOperation || '', - service: selectedService || '', - step: '60', - tags: JSON.stringify(selectedTags) || '[]', - }), - ]); - - if (spanAggregateResponse.statusCode === 200) { - dispatch({ - type: 'UPDATE_AGGREGATES', - payload: { - spansAggregate: spanAggregateResponse.payload, - selectedAggOption, - selectedEntity, - }, - }); - } - - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: false, - }, - }); - } catch (error) { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: (error as AxiosError).toString() || 'Something went wrong', - }, - }); - } - }; -}; - -export interface GetTraceVisualAggregatesProps { - selectedAggOption: TraceReducer['selectedAggOption']; - selectedEntity: TraceReducer['selectedEntity']; -} diff --git a/frontend/src/store/actions/trace/index.ts b/frontend/src/store/actions/trace/index.ts deleted file mode 100644 index f8d65f3eff..0000000000 --- a/frontend/src/store/actions/trace/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -export * from './getInitialData'; -export * from './resetTraceDetails'; -export * from './updateSelectedAggOption'; -export * from './updateSelectedEntity'; -export * from './updateSelectedKind'; -export * from './updateSelectedLatency'; -export * from './updateSelectedOperation'; -export * from './updateSelectedService'; -export * from './updateSelectedTags'; diff --git a/frontend/src/store/actions/trace/loadingCompleted.ts b/frontend/src/store/actions/trace/loadingCompleted.ts deleted file mode 100644 index 3a64b2449d..0000000000 --- a/frontend/src/store/actions/trace/loadingCompleted.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; - -export const LoadingCompleted = (): (( - dispatch: Dispatch, -) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'GET_TRACE_LOADING_END', - }); - }; -}; diff --git a/frontend/src/store/actions/trace/parseFilter/current.ts b/frontend/src/store/actions/trace/parseFilter/current.ts new file mode 100644 index 0000000000..e8faae05c1 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/current.ts @@ -0,0 +1,36 @@ +import { TraceReducer } from 'types/reducer/trace'; +import { ParsedUrl } from '../util'; + +export const parseQueryIntoCurrent = ( + query: string, + stateCurrent: TraceReducer['spansAggregate']['currentPage'], +): ParsedUrl => { + const url = new URLSearchParams(query); + + let current = 1; + + const selected = url.get('current'); + + if (selected) { + try { + const parsedValue = JSON.parse(decodeURIComponent(selected)); + if (Number.isInteger(parsedValue)) { + current = parseInt(parsedValue, 10); + } + } catch (error) { + console.log('error while parsing json'); + } + } + + if (selected) { + return { + currentValue: parseInt(selected, 10), + urlValue: current, + }; + } + + return { + currentValue: stateCurrent, + urlValue: current, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/filter.ts b/frontend/src/store/actions/trace/parseFilter/filter.ts new file mode 100644 index 0000000000..0d91dfc9e6 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/filter.ts @@ -0,0 +1,43 @@ +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { isTraceFilterEnum, ParsedUrl } from '../util'; + +export const parseQueryIntoFilter = ( + query: string, + stateFilter: TraceReducer['filter'], +): ParsedUrl => { + const urlFilter = new Map>(); + const url = new URLSearchParams(query); + + const selected = url.get('filter'); + + if (selected) { + try { + const parsedValue = JSON.parse(selected); + + if (typeof parsedValue === 'object') { + Object.keys(parsedValue).forEach((key) => { + if (isTraceFilterEnum(key)) { + const value = parsedValue[key]; + if (typeof value === 'object') { + urlFilter.set(key, value); + } + } + }); + } + } catch (error) { + console.log(error); + } + } + + if (selected) { + return { + currentValue: urlFilter, + urlValue: urlFilter, + }; + } + + return { + currentValue: stateFilter, + urlValue: urlFilter, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/filterToFetchData.ts b/frontend/src/store/actions/trace/parseFilter/filterToFetchData.ts new file mode 100644 index 0000000000..5016bc7702 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/filterToFetchData.ts @@ -0,0 +1,37 @@ +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { ParsedUrl } from '../util'; + +export const parseFilterToFetchData = ( + query: string, + stateTraceFilterData: TraceReducer['filterToFetchData'], +): ParsedUrl => { + const url = new URLSearchParams(query); + + let filterToFetchData: TraceFilterEnum[] = []; + + const selected = url.get('filterToFetchData'); + + if (selected) { + try { + const parsedValue = JSON.parse(decodeURIComponent(selected)); + + if (Array.isArray(parsedValue)) { + filterToFetchData.push(...parsedValue); + } + } catch (error) { + //error while parsing json + } + } + + if (selected) { + return { + currentValue: filterToFetchData, + urlValue: filterToFetchData, + }; + } + + return { + currentValue: stateTraceFilterData, + urlValue: filterToFetchData, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/index.ts b/frontend/src/store/actions/trace/parseFilter/index.ts new file mode 100644 index 0000000000..e9fb1f3b19 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/index.ts @@ -0,0 +1,8 @@ +export * from './minMaxTime'; +export * from './selectedFilter'; +export * from './filterToFetchData'; +export * from './selectedTags'; +export * from './filter'; +export * from './skippedSelected'; +export * from './current'; +export * from './isFilterExclude'; diff --git a/frontend/src/store/actions/trace/parseFilter/isFilterExclude.ts b/frontend/src/store/actions/trace/parseFilter/isFilterExclude.ts new file mode 100644 index 0000000000..a07bc4b3b0 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/isFilterExclude.ts @@ -0,0 +1,44 @@ +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { isTraceFilterEnum, ParsedUrl } from '../util'; + +export const parseFilterExclude = ( + query: string, + stateFilterExclude: TraceReducer['isFilterExclude'], +): ParsedUrl => { + const currentFilter = new Map(); + + const url = new URLSearchParams(query); + + const isPresent = url.get('isFilterExclude'); + + if (isPresent) { + try { + const parsedValue = JSON.parse(isPresent); + + if (typeof parsedValue === 'object') { + Object.keys(parsedValue).forEach((key) => { + if (isTraceFilterEnum(key)) { + const keyValue = parsedValue[key]; + if (typeof keyValue === 'boolean') { + currentFilter.set(key, keyValue); + } + } + }); + } + } catch (error) { + // parsing the value + } + } + + if (isPresent) { + return { + currentValue: currentFilter, + urlValue: currentFilter, + }; + } + + return { + currentValue: stateFilterExclude, + urlValue: currentFilter, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/minMaxTime.ts b/frontend/src/store/actions/trace/parseFilter/minMaxTime.ts new file mode 100644 index 0000000000..9953fb18c6 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/minMaxTime.ts @@ -0,0 +1,20 @@ +import { GlobalTime } from 'types/actions/globalTime'; + +export const parseMinMaxTime = (query: string): GlobalTime => { + const url = new URLSearchParams(query); + let maxTime = 0; + let minTime = 0; + + const urlMaxTime = url.get('minTime'); + const urlMinTime = url.get('maxTime'); + + if (urlMaxTime && urlMinTime) { + maxTime = parseInt(urlMaxTime); + minTime = parseInt(urlMinTime); + } + + return { + maxTime, + minTime, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/selectedFilter.ts b/frontend/src/store/actions/trace/parseFilter/selectedFilter.ts new file mode 100644 index 0000000000..973e4ba331 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/selectedFilter.ts @@ -0,0 +1,43 @@ +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import { isTraceFilterEnum, ParsedUrl } from '../util'; + +export const parseSelectedFilter = ( + query: string, + selectedFilter: TraceReducer['selectedFilter'], + isUserSelected = false, +): ParsedUrl> => { + const url = new URLSearchParams(query); + + const filters = new Map(); + + const title = isUserSelected ? 'selected' : 'userSelectedFilter'; + + const selected = url.get(title); + + if (selected) { + try { + const parsedValue = JSON.parse(decodeURIComponent(selected)); + if (typeof parsedValue === 'object') { + Object.keys(parsedValue).forEach((e) => { + if (isTraceFilterEnum(e)) { + filters.set(e, parsedValue[e]); + } + }); + } + } catch (error) { + // if the parsing error happens + } + } + + if (selected) { + return { + urlValue: filters, + currentValue: filters, + }; + } + + return { + urlValue: filters, + currentValue: selectedFilter, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/selectedTags.ts b/frontend/src/store/actions/trace/parseFilter/selectedTags.ts new file mode 100644 index 0000000000..a98cbda921 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/selectedTags.ts @@ -0,0 +1,37 @@ +import { TraceReducer } from 'types/reducer/trace'; +import { ParsedUrl } from '../util'; + +export const parseQueryIntoSelectedTags = ( + query: string, + stateSelectedTags: TraceReducer['selectedTags'], +): ParsedUrl => { + const url = new URLSearchParams(query); + + let selectedTags: TraceReducer['selectedTags'] = []; + + const querySelectedTags = url.get('selectedTags'); + + if (querySelectedTags) { + try { + const parsedQuerySelectedTags = JSON.parse(querySelectedTags); + + if (Array.isArray(parsedQuerySelectedTags)) { + selectedTags = parsedQuerySelectedTags; + } + } catch (error) { + //error while parsing + } + } + + if (querySelectedTags) { + return { + currentValue: selectedTags, + urlValue: selectedTags, + }; + } + + return { + currentValue: stateSelectedTags, + urlValue: selectedTags, + }; +}; diff --git a/frontend/src/store/actions/trace/parseFilter/skippedSelected.ts b/frontend/src/store/actions/trace/parseFilter/skippedSelected.ts new file mode 100644 index 0000000000..fd4b110ba9 --- /dev/null +++ b/frontend/src/store/actions/trace/parseFilter/skippedSelected.ts @@ -0,0 +1,33 @@ +import { ParsedUrl } from '../util'; + +export const parseIsSkippedSelection = (query: string): ParsedUrl => { + const url = new URLSearchParams(query); + + let current = false; + + const isSkippedSelected = url.get('isSelectedFilterSkipped'); + + if (isSkippedSelected) { + try { + const parsedValue = JSON.parse(isSkippedSelected); + + if (typeof parsedValue === 'boolean') { + current = parsedValue; + } + } catch (error) { + current = false; + } + } + + if (isSkippedSelected) { + return { + currentValue: current, + urlValue: current, + }; + } + + return { + currentValue: current, + urlValue: current, + }; +}; diff --git a/frontend/src/store/actions/trace/resetTraceDetails.ts b/frontend/src/store/actions/trace/resetTraceDetails.ts deleted file mode 100644 index 80b3e63bc9..0000000000 --- a/frontend/src/store/actions/trace/resetTraceDetails.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; - -export const ResetRaceData = (): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'RESET_TRACE_DATA', - }); - }; -}; diff --git a/frontend/src/store/actions/trace/selectTraceFilter.ts b/frontend/src/store/actions/trace/selectTraceFilter.ts new file mode 100644 index 0000000000..b18443ad3e --- /dev/null +++ b/frontend/src/store/actions/trace/selectTraceFilter.ts @@ -0,0 +1,50 @@ +import { Dispatch, Store } from 'redux'; +import { AppState } from 'store/reducers'; +import AppActions from 'types/actions'; +import { TraceFilterEnum } from 'types/reducer/trace'; +import { updateURL } from './util'; + +export const SelectedTraceFilter = (props: { + topic: TraceFilterEnum; + value: string; +}): (( + dispatch: Dispatch, + getState: Store['getState'], +) => void) => { + return (_, getState): void => { + const { topic, value } = props; + const { traces } = getState(); + + const filter = traces.selectedFilter; + + const isTopicPresent = filter.get(topic); + + // append the value + if (!isTopicPresent) { + filter.set(props.topic, [props.value]); + } else { + const isValuePresent = + isTopicPresent.find((e) => e === props.value) !== undefined; + + // check the value if present then remove the value + if (isValuePresent) { + filter.set( + props.topic, + isTopicPresent.filter((e) => e !== value), + ); + } else { + // if not present add into the array of string + filter.set(props.topic, [...isTopicPresent, props.value]); + } + } + + updateURL( + filter, + traces.filterToFetchData, + traces.spansAggregate.currentPage, + traces.selectedTags, + traces.filter, + traces.isFilterExclude, + ); + }; +}; diff --git a/frontend/src/store/actions/trace/updateIsTagsError.ts b/frontend/src/store/actions/trace/updateIsTagsError.ts new file mode 100644 index 0000000000..be7b0b5f89 --- /dev/null +++ b/frontend/src/store/actions/trace/updateIsTagsError.ts @@ -0,0 +1,17 @@ +import { Dispatch } from 'redux'; +import AppActions from 'types/actions'; +import { TraceReducer } from 'types/reducer/trace'; +import { UPDATE_IS_TAG_ERROR } from 'types/actions/trace'; + +export const UpdateTagIsError = ( + isTagModalError: TraceReducer['isTagModalError'], +): ((dispatch: Dispatch) => void) => { + return (dispatch): void => { + dispatch({ + type: UPDATE_IS_TAG_ERROR, + payload: { + isTagModalError, + }, + }); + }; +}; diff --git a/frontend/src/store/actions/trace/updateSelectedAggOption.ts b/frontend/src/store/actions/trace/updateSelectedAggOption.ts deleted file mode 100644 index b5004e6a2b..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedAggOption.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedAggOption = ( - selectedAggOption: TraceReducer['selectedAggOption'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_SELECTED_AGG_OPTION', - payload: { - selectedAggOption, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedData.ts b/frontend/src/store/actions/trace/updateSelectedData.ts deleted file mode 100644 index d9371daa4f..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedData.ts +++ /dev/null @@ -1,164 +0,0 @@ -import getServiceOperation from 'api/trace/getServiceOperation'; -import getSpan from 'api/trace/getSpan'; -import getSpansAggregate from 'api/trace/getSpanAggregate'; -import getTags from 'api/trace/getTags'; -import { AxiosError } from 'axios'; -import { Dispatch } from 'redux'; -import store from 'store'; -import AppActions from 'types/actions'; -import { ErrorResponse, SuccessResponse } from 'types/api'; -import { PayloadProps as ServiceOperationPayloadProps } from 'types/api/trace/getServiceOperation'; -import { PayloadProps as TagPayloadProps } from 'types/api/trace/getTags'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedData = ({ - selectedKind, - selectedService, - selectedLatency, - selectedOperation, - selectedAggOption, - selectedEntity, -}: UpdateSelectedDataProps): ((dispatch: Dispatch) => void) => { - return async (dispatch: Dispatch): Promise => { - try { - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: true, - }, - }); - const { trace, globalTime } = store.getState(); - const { minTime, maxTime, selectedTime } = globalTime; - - const { selectedTags } = trace; - - const isCustomSelected = selectedTime === 'custom'; - - const end = isCustomSelected - ? globalTime.maxTime + 15 * 60 * 1000000000 - : maxTime; - - const start = isCustomSelected - ? globalTime.minTime - 15 * 60 * 1000000000 - : minTime; - - const [spanResponse, getSpanAggregateResponse] = await Promise.all([ - getSpan({ - start, - end, - kind: selectedKind || '', - limit: '100', - lookback: '2d', - maxDuration: selectedLatency.max || '', - minDuration: selectedLatency.min || '', - operation: selectedOperation || '', - service: selectedService || '', - tags: JSON.stringify(selectedTags), - }), - getSpansAggregate({ - aggregation_option: selectedAggOption || '', - dimension: selectedEntity || '', - end, - kind: selectedKind || '', - maxDuration: selectedLatency.max || '', - minDuration: selectedLatency.min || '', - operation: selectedOperation || '', - service: selectedService || '', - start, - step: '60', - tags: JSON.stringify(selectedTags), - }), - ]); - - let tagResponse: - | SuccessResponse - | ErrorResponse - | undefined; - - let serviceOperationResponse: - | SuccessResponse - | ErrorResponse - | undefined; - - if (selectedService !== null && selectedService.length !== 0) { - [tagResponse, serviceOperationResponse] = await Promise.all([ - getTags({ - service: selectedService, - }), - getServiceOperation({ - service: selectedService, - }), - ]); - } - - const spanAggregateCondition = - getSpanAggregateResponse.statusCode === 200 || - getSpanAggregateResponse.statusCode === 400; - - const getCondition = (): boolean => { - const basicCondition = - spanResponse.statusCode === 200 && spanAggregateCondition; - - if (selectedService === null || selectedService.length === 0) { - return basicCondition; - } - - return ( - basicCondition && - tagResponse?.statusCode === 200 && - serviceOperationResponse?.statusCode === 200 - ); - }; - - const condition = getCondition(); - - if (condition) { - dispatch({ - type: 'UPDATE_SELECTED_TRACE_DATA', - payload: { - operationList: serviceOperationResponse?.payload || [], - spansList: spanResponse.payload || [], - tagsSuggestions: tagResponse?.payload || [], - selectedKind, - selectedService, - selectedLatency, - selectedOperation, - spansAggregate: spanAggregateCondition - ? getSpanAggregateResponse.payload || [] - : [], - }, - }); - } else { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: 'Something went wrong', - }, - }); - } - - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: false, - }, - }); - } catch (error) { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: (error as AxiosError).toString() || 'Something went wrong', - }, - }); - } - }; -}; - -export interface UpdateSelectedDataProps { - selectedKind: TraceReducer['selectedKind']; - selectedService: TraceReducer['selectedService']; - selectedLatency: TraceReducer['selectedLatency']; - selectedOperation: TraceReducer['selectedOperation']; - selectedEntity: TraceReducer['selectedEntity']; - selectedAggOption: TraceReducer['selectedAggOption']; -} diff --git a/frontend/src/store/actions/trace/updateSelectedEntity.ts b/frontend/src/store/actions/trace/updateSelectedEntity.ts deleted file mode 100644 index 95b10baa6e..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedEntity.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedEntity = ( - selectedEntity: TraceReducer['selectedEntity'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_SELECTED_ENTITY', - payload: { - selectedEntity, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedKind.ts b/frontend/src/store/actions/trace/updateSelectedKind.ts deleted file mode 100644 index 5954599926..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedKind.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedKind = ( - selectedKind: TraceReducer['selectedKind'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_TRACE_SELECTED_KIND', - payload: { - selectedKind, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedLatency.ts b/frontend/src/store/actions/trace/updateSelectedLatency.ts deleted file mode 100644 index 636b2e84da..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedLatency.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedLatency = ( - selectedLatency: TraceReducer['selectedLatency'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_TRACE_SELECTED_LATENCY_VALUE', - payload: { - selectedLatency, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedOperation.ts b/frontend/src/store/actions/trace/updateSelectedOperation.ts deleted file mode 100644 index e8e1a89f9c..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedOperation.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedOperation = ( - selectedOperation: TraceReducer['selectedOperation'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_TRACE_SELECTED_OPERATION', - payload: { - selectedOperation, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedService.ts b/frontend/src/store/actions/trace/updateSelectedService.ts deleted file mode 100644 index c936dfdc58..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedService.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedService = ( - selectedService: TraceReducer['selectedService'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_TRACE_SELECTED_SERVICE', - payload: { - selectedService, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateSelectedTags.ts b/frontend/src/store/actions/trace/updateSelectedTags.ts deleted file mode 100644 index 1f23c54fac..0000000000 --- a/frontend/src/store/actions/trace/updateSelectedTags.ts +++ /dev/null @@ -1,94 +0,0 @@ -import getSpan from 'api/trace/getSpan'; -import getSpansAggregate from 'api/trace/getSpanAggregate'; -import { AxiosError } from 'axios'; -import { Dispatch } from 'redux'; -import store from 'store'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSelectedTags = ( - selectedTags: TraceReducer['selectedTags'], -): ((dispatch: Dispatch) => void) => { - return async (dispatch: Dispatch): Promise => { - try { - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: true, - }, - }); - - const { trace, globalTime } = store.getState(); - const { - selectedKind, - selectedLatency, - selectedOperation, - selectedService, - selectedAggOption, - selectedEntity, - spansAggregate, - } = trace; - - const { maxTime, minTime } = globalTime; - - const [spanResponse, spansAggregateResponse] = await Promise.all([ - getSpan({ - start: minTime, - end: maxTime, - kind: selectedKind || '', - limit: '100', - lookback: '2d', - maxDuration: selectedLatency.max || '', - minDuration: selectedLatency.min || '', - operation: selectedOperation || '', - service: selectedService || '', - tags: JSON.stringify(selectedTags), - }), - getSpansAggregate({ - aggregation_option: selectedAggOption, - dimension: selectedEntity, - end: maxTime, - kind: selectedKind || '', - maxDuration: selectedLatency.max || '', - minDuration: selectedLatency.min || '', - operation: selectedOperation || '', - service: selectedService || '', - start: minTime, - step: '60', - tags: JSON.stringify(selectedTags), - }), - ]); - - const condition = - spansAggregateResponse.statusCode === 200 || - spansAggregateResponse.statusCode === 400; - - if (spanResponse.statusCode === 200 && condition) { - dispatch({ - type: 'UPDATE_TRACE_SELECTED_TAGS', - payload: { - selectedTags, - spansList: spanResponse.payload, - spansAggregate: - spansAggregateResponse.statusCode === 400 - ? spansAggregate - : spansAggregateResponse.payload || [], - }, - }); - } - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: false, - }, - }); - } catch (error) { - dispatch({ - type: 'GET_TRACE_INITIAL_DATA_ERROR', - payload: { - errorMessage: (error as AxiosError).toString() || 'Something went wrong', - }, - }); - } - }; -}; diff --git a/frontend/src/store/actions/trace/updateSpanLoading.ts b/frontend/src/store/actions/trace/updateSpanLoading.ts deleted file mode 100644 index d5f87764ab..0000000000 --- a/frontend/src/store/actions/trace/updateSpanLoading.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import { TraceReducer } from 'types/reducer/trace'; - -export const UpdateSpanLoading = ( - spansLoading: TraceReducer['spansLoading'], -): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - dispatch({ - type: 'UPDATE_SPANS_LOADING', - payload: { - loading: spansLoading, - }, - }); - }; -}; diff --git a/frontend/src/store/actions/trace/updateTagPanelVisiblity.ts b/frontend/src/store/actions/trace/updateTagPanelVisiblity.ts new file mode 100644 index 0000000000..0ff8321670 --- /dev/null +++ b/frontend/src/store/actions/trace/updateTagPanelVisiblity.ts @@ -0,0 +1,17 @@ +import { Dispatch } from 'redux'; +import AppActions from 'types/actions'; +import { TraceReducer } from 'types/reducer/trace'; +import { UPDATE_TAG_MODAL_VISIBLITY } from 'types/actions/trace'; + +export const UpdateTagVisiblity = ( + isTagModalOpen: TraceReducer['isTagModalOpen'], +): ((dispatch: Dispatch) => void) => { + return (dispatch): void => { + dispatch({ + type: UPDATE_TAG_MODAL_VISIBLITY, + payload: { + isTagModalOpen: isTagModalOpen, + }, + }); + }; +}; diff --git a/frontend/src/store/actions/trace/updateTagsSelected.ts b/frontend/src/store/actions/trace/updateTagsSelected.ts new file mode 100644 index 0000000000..459aaca1a6 --- /dev/null +++ b/frontend/src/store/actions/trace/updateTagsSelected.ts @@ -0,0 +1,17 @@ +import { Dispatch } from 'redux'; +import AppActions from 'types/actions'; +import { TraceReducer } from 'types/reducer/trace'; +import { UPDATE_SELECTED_TAGS } from 'types/actions/trace'; + +export const UpdateSelectedTags = ( + selectedTags: TraceReducer['selectedTags'], +): ((dispatch: Dispatch) => void) => { + return (dispatch): void => { + dispatch({ + type: UPDATE_SELECTED_TAGS, + payload: { + selectedTags: selectedTags, + }, + }); + }; +}; diff --git a/frontend/src/store/actions/trace/util.ts b/frontend/src/store/actions/trace/util.ts new file mode 100644 index 0000000000..8b3cab27b0 --- /dev/null +++ b/frontend/src/store/actions/trace/util.ts @@ -0,0 +1,79 @@ +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; +import history from 'lib/history'; +import { AllTraceFilterEnum } from 'container/Trace/Filters'; +import { PayloadProps as GetFilterPayload } from 'types/api/trace/getFilters'; +export * from './parseFilter'; +export interface ParsedUrl { + currentValue: T; + urlValue: T; +} + +export function isTraceFilterEnum( + value: TraceFilterEnum | string, +): value is TraceFilterEnum { + if (AllTraceFilterEnum.find((enums) => enums === value)) { + return true; + } + return false; +} + +export const updateURL = ( + selectedFilter: TraceReducer['selectedFilter'], + filterToFetchData: TraceReducer['filterToFetchData'], + current: TraceReducer['spansAggregate']['total'], + selectedTags: TraceReducer['selectedTags'], + filter: TraceReducer['filter'], + isFilterExclude: TraceReducer['isFilterExclude'], + userSelectedFilter: TraceReducer['userSelectedFilter'], +) => { + const search = new URLSearchParams(location.search); + const preResult: { key: string; value: string }[] = []; + + const keyToSkip = [ + 'selected', + 'filterToFetchData', + 'current', + 'selectedTags', + 'filter', + 'isFilterExclude', + 'userSelectedFilter', + ]; + + search.forEach((value, key) => { + if (!keyToSkip.includes(key)) { + preResult.push({ + key, + value, + }); + } + }); + + history.replace( + `${history.location.pathname}?selected=${JSON.stringify( + Object.fromEntries(selectedFilter), + )}&filterToFetchData=${JSON.stringify( + filterToFetchData, + )}¤t=${current}&selectedTags=${JSON.stringify( + selectedTags, + )}&filter=${JSON.stringify(Object.fromEntries(filter))}&${preResult + .map((e) => `${e.key}=${e.value}`) + .join('&')}&isFilterExclude=${JSON.stringify( + Object.fromEntries(isFilterExclude), + )}&userSelectedFilter=${JSON.stringify( + Object.fromEntries(userSelectedFilter), + )}`, + ); +}; + +export const getFilter = (data: GetFilterPayload): TraceReducer['filter'] => { + const filter = new Map>(); + + Object.keys(data).forEach((key) => { + const value = data[key]; + if (isTraceFilterEnum(key)) { + filter.set(key, value); + } + }); + + return filter; +}; diff --git a/frontend/src/store/reducers/index.ts b/frontend/src/store/reducers/index.ts index 4eba388f72..9c2bdecb1f 100644 --- a/frontend/src/store/reducers/index.ts +++ b/frontend/src/store/reducers/index.ts @@ -5,16 +5,15 @@ import dashboardReducer from './dashboard'; import globalTimeReducer from './global'; import metricsReducers from './metric'; import { ServiceMapReducer } from './serviceMap'; -import { traceReducer } from './trace'; +import traceReducer from './trace'; import TraceFilterReducer from './traceFilters'; -import { traceItemReducer, tracesReducer } from './traces'; +import { traceItemReducer } from './traces'; import { usageDataReducer } from './usage'; const reducers = combineReducers({ traceFilters: TraceFilterReducer, - traces: tracesReducer, + traces: traceReducer, traceItem: traceItemReducer, - trace: traceReducer, usageDate: usageDataReducer, globalTime: globalTimeReducer, serviceMap: ServiceMapReducer, diff --git a/frontend/src/store/reducers/trace.ts b/frontend/src/store/reducers/trace.ts index ae9e27e343..cbccb6e7af 100644 --- a/frontend/src/store/reducers/trace.ts +++ b/frontend/src/store/reducers/trace.ts @@ -1,200 +1,199 @@ import { - GET_TRACE_INITIAL_DATA_ERROR, - GET_TRACE_INITIAL_DATA_SUCCESS, - GET_TRACE_LOADING_END, - GET_TRACE_LOADING_START, - RESET_TRACE_DATA, + SELECT_TRACE_FILTER, TraceActions, - UPDATE_AGGREGATES, - UPDATE_SELECTED_AGG_OPTION, - UPDATE_SELECTED_ENTITY, - UPDATE_SELECTED_TRACE_DATA, - UPDATE_SPANS_LOADING, - UPDATE_TRACE_SELECTED_KIND, - UPDATE_TRACE_SELECTED_LATENCY_VALUE, - UPDATE_TRACE_SELECTED_OPERATION, - UPDATE_TRACE_SELECTED_SERVICE, - UPDATE_TRACE_SELECTED_TAGS, + UPDATE_TRACE_FILTER, + UPDATE_TRACE_FILTER_LOADING, + UPDATE_ALL_FILTERS, + UPDATE_SELECTED_TAGS, + UPDATE_SPANS_AGGREEGATE, + UPDATE_TAG_MODAL_VISIBLITY, + UPDATE_IS_TAG_ERROR, + UPDATE_SELECTED_FUNCTION, + UPDATE_SELECTED_GROUP_BY, + UPDATE_TRACE_GRAPH_LOADING, + UPDATE_TRACE_GRAPH_ERROR, + UPDATE_TRACE_GRAPH_SUCCESS, + RESET_TRACE_FILTER, + UPDATE_FILTER_RESPONSE_SELECTED, + UPDATE_FILTER_EXCLUDE, } from 'types/actions/trace'; -import { TraceReducer } from 'types/reducer/trace'; +import { TraceFilterEnum, TraceReducer } from 'types/reducer/trace'; -const intitalState: TraceReducer = { - error: false, - errorMessage: '', - loading: true, - operationsList: [], - selectedKind: '', - selectedLatency: { - max: '', - min: '', - }, - selectedOperation: '', - selectedService: '', +const initialValue: TraceReducer = { + filter: new Map(), + filterToFetchData: ['duration', 'status', 'serviceName'], + filterLoading: true, + filterResponseSelected: new Set(), + selectedFilter: new Map(), selectedTags: [], - serviceList: [], - spanList: [], - tagsSuggestions: [], - selectedAggOption: 'count', - selectedEntity: 'calls', - spansAggregate: [], - spansLoading: false, + isTagModalOpen: false, + isTagModalError: false, + isFilterExclude: new Map([]), + userSelectedFilter: new Map(), + spansAggregate: { + currentPage: 1, + loading: false, + data: [], + error: false, + total: 0, + pageSize: 10, + }, + selectedGroupBy: '', + selectedFunction: 'count', + spansGraph: { + error: false, + errorMessage: '', + loading: true, + payload: { items: {} }, + }, }; -export const traceReducer = ( - state = intitalState, +const traceReducer = ( + state = initialValue, action: TraceActions, ): TraceReducer => { switch (action.type) { - case GET_TRACE_INITIAL_DATA_ERROR: { + case UPDATE_TRACE_FILTER: { return { ...state, - errorMessage: action.payload.errorMessage, - loading: false, - error: true, + filter: action.payload.filter, }; } - case GET_TRACE_LOADING_START: { - return { - ...state, - loading: true, - spansLoading: true, - }; - } - - case GET_TRACE_INITIAL_DATA_SUCCESS: { + case UPDATE_ALL_FILTERS: { + const { payload } = action; const { - serviceList, - operationList, - tagsSuggestions, - selectedOperation, - selectedService, + filter, + filterToFetchData, + selectedFilter, + current, selectedTags, - spansList, - selectedKind, - selectedLatency, - spansAggregate, - } = action.payload; + userSelected, + isFilterExclude, + } = payload; return { ...state, - serviceList: serviceList, - tagsSuggestions, - selectedOperation, - selectedService, + filter, + filterToFetchData, + selectedFilter, selectedTags, - spanList: spansList, - operationsList: operationList, - error: false, - selectedKind, - selectedLatency, - spansAggregate, - spansLoading: false, + userSelectedFilter: userSelected, + isFilterExclude, + spansAggregate: { + ...state.spansAggregate, + currentPage: current, + }, }; } - case UPDATE_TRACE_SELECTED_KIND: { + case UPDATE_TRACE_FILTER_LOADING: { return { ...state, - selectedKind: action.payload.selectedKind, + filterLoading: action.payload.filterLoading, }; } - case UPDATE_TRACE_SELECTED_LATENCY_VALUE: { + case SELECT_TRACE_FILTER: { return { ...state, - selectedLatency: action.payload.selectedLatency, + selectedFilter: action.payload.selectedFilter, }; } - case UPDATE_TRACE_SELECTED_OPERATION: { + case RESET_TRACE_FILTER: { return { - ...state, - selectedOperation: action.payload.selectedOperation, + ...initialValue, }; } - case UPDATE_TRACE_SELECTED_SERVICE: { - return { - ...state, - selectedService: action.payload.selectedService, - }; - } - - case UPDATE_TRACE_SELECTED_TAGS: { + case UPDATE_SELECTED_TAGS: { return { ...state, selectedTags: action.payload.selectedTags, - spanList: action.payload.spansList, - spansAggregate: action.payload.spansAggregate, }; } - case UPDATE_SELECTED_TRACE_DATA: { - const { - spansList, - tagsSuggestions, - operationList, - selectedOperation, - selectedLatency, - selectedService, - selectedKind, - spansAggregate, - } = action.payload; - - return { - ...state, - spanList: spansList, - tagsSuggestions, - operationsList: operationList, - selectedOperation, - selectedLatency, - selectedService, - selectedKind, - spansAggregate, - }; - } - - case GET_TRACE_LOADING_END: { - return { - ...state, - loading: false, - }; - } - - case UPDATE_SELECTED_AGG_OPTION: { - return { - ...state, - selectedAggOption: action.payload.selectedAggOption, - }; - } - - case UPDATE_SELECTED_ENTITY: { - return { - ...state, - selectedEntity: action.payload.selectedEntity, - }; - } - - case UPDATE_SPANS_LOADING: { - return { - ...state, - spansLoading: action.payload.loading, - }; - } - - case RESET_TRACE_DATA: { - return { - ...intitalState, - }; - } - - case UPDATE_AGGREGATES: { + case UPDATE_SPANS_AGGREEGATE: { return { ...state, spansAggregate: action.payload.spansAggregate, - selectedAggOption: action.payload.selectedAggOption, - selectedEntity: action.payload.selectedEntity, + }; + } + + case UPDATE_TAG_MODAL_VISIBLITY: { + return { + ...state, + isTagModalOpen: action.payload.isTagModalOpen, + }; + } + + case UPDATE_IS_TAG_ERROR: { + return { + ...state, + isTagModalError: action.payload.isTagModalError, + }; + } + + case UPDATE_SELECTED_FUNCTION: { + return { + ...state, + selectedFunction: action.payload.selectedFunction, + }; + } + + case UPDATE_SELECTED_GROUP_BY: { + return { + ...state, + selectedGroupBy: action.payload.selectedGroupBy, + }; + } + + case UPDATE_TRACE_GRAPH_LOADING: { + return { + ...state, + spansGraph: { + ...state.spansGraph, + loading: action.payload.loading, + }, + }; + } + + case UPDATE_TRACE_GRAPH_ERROR: { + return { + ...state, + spansGraph: { + ...state.spansGraph, + error: action.payload.error, + errorMessage: action.payload.errorMessage, + loading: false, + }, + }; + } + + case UPDATE_TRACE_GRAPH_SUCCESS: { + return { + ...state, + spansGraph: { + ...state.spansGraph, + payload: action.payload.data, + loading: false, + error: false, + }, + }; + } + + case UPDATE_FILTER_RESPONSE_SELECTED: { + return { + ...state, + filterResponseSelected: action.payload.filterResponseSelected, + }; + } + + case UPDATE_FILTER_EXCLUDE: { + return { + ...state, + isFilterExclude: action.payload.isFilterExclude, }; } @@ -202,3 +201,5 @@ export const traceReducer = ( return state; } }; + +export default traceReducer; diff --git a/frontend/src/types/actions/trace.ts b/frontend/src/types/actions/trace.ts index 18a4115b7c..a31a676a93 100644 --- a/frontend/src/types/actions/trace.ts +++ b/frontend/src/types/actions/trace.ts @@ -1,151 +1,172 @@ -export const GET_TRACE_INITIAL_DATA_SUCCESS = 'GET_TRACE_INITIAL_DATA_SUCCESS'; -export const GET_TRACE_INITIAL_DATA_ERROR = 'GET_TRACE_INITIAL_DATA_ERROR'; -export const GET_TRACE_LOADING_START = 'GET_TRACE_LOADING_START'; -export const GET_TRACE_LOADING_END = 'GET_TRACE_LOADING_END'; - -export const UPDATE_TRACE_SELECTED_SERVICE = 'UPDATE_TRACE_SELECTED_SERVICE'; -export const UPDATE_TRACE_SELECTED_OPERATION = - 'UPDATE_TRACE_SELECTED_OPERATION'; -export const UPDATE_TRACE_SELECTED_LATENCY_VALUE = - 'UPDATE_TRACE_SELECTED_LATENCY_VALUE'; -export const UPDATE_TRACE_SELECTED_KIND = 'UPDATE_TRACE_SELECTED_KIND'; -export const UPDATE_TRACE_SELECTED_TAGS = 'UPDATE_TRACE_SELECTED_TAGS'; - -export const UPDATE_SELECTED_AGG_OPTION = 'UPDATE_SELECTED_AGG_OPTION'; -export const UPDATE_SELECTED_ENTITY = 'UPDATE_SELECTED_ENTITY'; -export const UPDATE_SPANS_LOADING = 'UPDATE_SPANS_LOADING'; - -export const UPDATE_SELECTED_TRACE_DATA = 'UPDATE_SELECTED_TRACE_DATA'; -export const UPDATE_AGGREGATES = 'UPDATE_AGGREGATES'; - -export const RESET_TRACE_DATA = 'RESET_TRACE_DATA'; - import { TraceReducer } from 'types/reducer/trace'; -interface GetTraceLoading { - type: typeof GET_TRACE_LOADING_START | typeof GET_TRACE_LOADING_END; -} +export const UPDATE_TRACE_FILTER = 'UPDATE_TRACE_FILTER'; +export const GET_TRACE_FILTER = 'GET_TRACE_FILTER'; +export const UPDATE_TRACE_FILTER_LOADING = 'UPDATE_TRACE_FILTER_LOADING'; -interface UpdateSpansLoading { - type: typeof UPDATE_SPANS_LOADING; +export const SELECT_TRACE_FILTER = 'SELECT_TRACE_FILTER'; +export const UPDATE_ALL_FILTERS = 'UPDATE_ALL_FILTERS'; +export const UPDATE_SELECTED_TAGS = 'UPDATE_SELECTED_TAGS'; +export const UPDATE_TAG_MODAL_VISIBLITY = 'UPDATE_TAG_MODAL_VISIBLITY'; + +export const UPDATE_SPANS_AGGREEGATE = 'UPDATE_SPANS_AGGREEGATE'; + +export const UPDATE_IS_TAG_ERROR = 'UPDATE_IS_TAG_ERROR'; + +export const UPDATE_SELECTED_FUNCTION = 'UPDATE_SELECTED_FUNCTION'; +export const UPDATE_SELECTED_GROUP_BY = 'UPDATE_SELECTED_GROUP_BY'; + +export const UPDATE_TRACE_GRAPH_LOADING = 'UPDATE_TRACE_GRAPH_LOADING'; +export const UPDATE_TRACE_GRAPH_ERROR = 'UPDATE_TRACE_GRAPH_ERROR'; +export const UPDATE_TRACE_GRAPH_SUCCESS = 'UPDATE_TRACE_GRAPH_SUCCESS'; + +export const RESET_TRACE_FILTER = 'RESET_TRACE_FILTER'; +export const UPDATE_FILTER_RESPONSE_SELECTED = + 'UPDATE_FILTER_RESPONSE_SELECTED'; +export const UPDATE_FILTER_EXCLUDE = 'UPDATE_FILTER_EXCLUDE'; + +export interface UpdateFilter { + type: typeof UPDATE_TRACE_FILTER; payload: { - loading: boolean; + filter: TraceReducer['filter']; }; } -interface GetTraceInitialData { - type: typeof GET_TRACE_INITIAL_DATA_SUCCESS; +export interface UpdateSpansAggregate { + type: typeof UPDATE_SPANS_AGGREEGATE; payload: { - serviceList: TraceReducer['serviceList']; - selectedTags: TraceReducer['selectedTags']; - operationList: TraceReducer['operationsList']; - tagsSuggestions: TraceReducer['tagsSuggestions']; - spansList: TraceReducer['spanList']; - selectedService: TraceReducer['selectedService']; - selectedOperation: TraceReducer['selectedOperation']; - selectedLatency: TraceReducer['selectedLatency']; - selectedKind: TraceReducer['selectedKind']; spansAggregate: TraceReducer['spansAggregate']; }; } -interface UpdateSelectedDate { - type: typeof UPDATE_SELECTED_TRACE_DATA; +export interface UpdateTagVisiblity { + type: typeof UPDATE_TAG_MODAL_VISIBLITY; payload: { - operationList: TraceReducer['operationsList']; - tagsSuggestions: TraceReducer['tagsSuggestions']; - spansList: TraceReducer['spanList']; - selectedKind: TraceReducer['selectedKind']; - selectedService: TraceReducer['selectedService']; - selectedLatency: TraceReducer['selectedLatency']; - selectedOperation: TraceReducer['selectedOperation']; - spansAggregate: TraceReducer['spansAggregate']; + isTagModalOpen: TraceReducer['isTagModalOpen']; }; } -export interface GetTraceInitialDataError { - type: typeof GET_TRACE_INITIAL_DATA_ERROR; - payload: { - errorMessage: string; - }; -} - -interface UpdateTraceSelectedService { - type: typeof UPDATE_TRACE_SELECTED_SERVICE; - payload: { - selectedService: TraceReducer['selectedService']; - }; -} - -interface UpdateTraceSelectedOperation { - type: typeof UPDATE_TRACE_SELECTED_OPERATION; - payload: { - selectedOperation: TraceReducer['selectedOperation']; - }; -} - -interface UpdateTraceSelectedKind { - type: typeof UPDATE_TRACE_SELECTED_KIND; - payload: { - selectedKind: TraceReducer['selectedKind']; - }; -} - -interface UpdateTraceSelectedLatencyValue { - type: typeof UPDATE_TRACE_SELECTED_LATENCY_VALUE; - payload: { - selectedLatency: TraceReducer['selectedLatency']; - }; -} - -interface UpdateTraceSelectedTags { - type: typeof UPDATE_TRACE_SELECTED_TAGS; +export interface UpdateSelectedTags { + type: typeof UPDATE_SELECTED_TAGS; payload: { selectedTags: TraceReducer['selectedTags']; - spansList: TraceReducer['spanList']; - spansAggregate: TraceReducer['spansAggregate']; }; } -interface UpdateSelectedAggOption { - type: typeof UPDATE_SELECTED_AGG_OPTION; +export interface UpdateSelected { + type: typeof UPDATE_FILTER_RESPONSE_SELECTED; payload: { - selectedAggOption: TraceReducer['selectedAggOption']; + filterResponseSelected: TraceReducer['filterResponseSelected']; }; } -interface UpdateSelectedEntity { - type: typeof UPDATE_SELECTED_ENTITY; +export interface UpdateAllFilters { + type: typeof UPDATE_ALL_FILTERS; payload: { - selectedEntity: TraceReducer['selectedEntity']; + filter: TraceReducer['filter']; + selectedFilter: TraceReducer['selectedFilter']; + filterToFetchData: TraceReducer['filterToFetchData']; + current: TraceReducer['spansAggregate']['currentPage']; + selectedTags: TraceReducer['selectedTags']; + userSelected: TraceReducer['userSelectedFilter']; + isFilterExclude: TraceReducer['isFilterExclude']; }; } -interface UpdateAggregates { - type: typeof UPDATE_AGGREGATES; +export interface UpdateFilterLoading { + type: typeof UPDATE_TRACE_FILTER_LOADING; payload: { - spansAggregate: TraceReducer['spansAggregate']; - selectedEntity: TraceReducer['selectedEntity']; - selectedAggOption: TraceReducer['selectedAggOption']; + filterLoading: TraceReducer['filterLoading']; }; } -interface ResetTraceData { - type: typeof RESET_TRACE_DATA; +export interface SelectTraceFilter { + type: typeof SELECT_TRACE_FILTER; + payload: { + selectedFilter: TraceReducer['selectedFilter']; + }; +} + +export interface ResetTraceFilter { + type: typeof RESET_TRACE_FILTER; +} + +export interface GetTraceFilter { + type: typeof GET_TRACE_FILTER; + payload: { + filter: TraceReducer['filter']; + }; +} + +export interface UpdateIsTagError { + type: typeof UPDATE_IS_TAG_ERROR; + payload: { + isTagModalError: TraceReducer['isTagModalError']; + }; +} + +export interface UpdateSelectedGroupBy { + type: typeof UPDATE_SELECTED_GROUP_BY; + payload: { + selectedGroupBy: TraceReducer['selectedGroupBy']; + }; +} + +export interface UpdateSelectedFunction { + type: typeof UPDATE_SELECTED_FUNCTION; + payload: { + selectedFunction: TraceReducer['selectedFunction']; + }; +} + +export interface UpdateSpanLoading { + type: typeof UPDATE_TRACE_GRAPH_LOADING; + payload: { + loading: TraceReducer['spansGraph']['loading']; + }; +} + +export interface UpdateSpansError { + type: typeof UPDATE_TRACE_GRAPH_ERROR; + payload: { + error: TraceReducer['spansGraph']['error']; + errorMessage: TraceReducer['spansGraph']['errorMessage']; + }; +} + +export interface UpdateFilterExclude { + type: typeof UPDATE_FILTER_EXCLUDE; + payload: { + isFilterExclude: TraceReducer['isFilterExclude']; + }; +} + +export interface UpdateSpans { + type: typeof UPDATE_TRACE_GRAPH_SUCCESS; + payload: { + data: TraceReducer['spansGraph']['payload']; + }; +} + +export interface ResetTraceFilter { + type: typeof RESET_TRACE_FILTER; } export type TraceActions = - | GetTraceLoading - | GetTraceInitialData - | GetTraceInitialDataError - | UpdateTraceSelectedService - | UpdateTraceSelectedLatencyValue - | UpdateTraceSelectedKind - | UpdateTraceSelectedOperation - | UpdateTraceSelectedTags - | UpdateSelectedDate - | UpdateSelectedAggOption - | UpdateSelectedEntity - | UpdateSpansLoading - | ResetTraceData - | UpdateAggregates; + | UpdateFilter + | GetTraceFilter + | UpdateFilterLoading + | SelectTraceFilter + | UpdateAllFilters + | UpdateSelectedTags + | UpdateTagVisiblity + | UpdateSpansAggregate + | UpdateIsTagError + | UpdateSelectedGroupBy + | UpdateSelectedFunction + | UpdateSpanLoading + | UpdateSpansError + | UpdateSpans + | ResetTraceFilter + | UpdateSelected + | UpdateFilterExclude; diff --git a/frontend/src/types/api/trace/getFilters.ts b/frontend/src/types/api/trace/getFilters.ts new file mode 100644 index 0000000000..422e360831 --- /dev/null +++ b/frontend/src/types/api/trace/getFilters.ts @@ -0,0 +1,15 @@ +import { TraceReducer } from 'types/reducer/trace'; + +export interface Props { + start: string; + end: string; + getFilters: string[]; + other: { + [k: string]: string[]; + }; + isFilterExclude: TraceReducer['isFilterExclude']; +} + +export interface PayloadProps { + [key: string]: Record; +} diff --git a/frontend/src/types/api/trace/getServiceList.ts b/frontend/src/types/api/trace/getServiceList.ts deleted file mode 100644 index b01bb5940f..0000000000 --- a/frontend/src/types/api/trace/getServiceList.ts +++ /dev/null @@ -1 +0,0 @@ -export type PayloadProps = string[]; diff --git a/frontend/src/types/api/trace/getServiceOperation.ts b/frontend/src/types/api/trace/getServiceOperation.ts deleted file mode 100644 index 70460a3658..0000000000 --- a/frontend/src/types/api/trace/getServiceOperation.ts +++ /dev/null @@ -1,5 +0,0 @@ -export type PayloadProps = string[]; - -export interface Props { - service: string; -} diff --git a/frontend/src/types/api/trace/getSpanAggregate.ts b/frontend/src/types/api/trace/getSpanAggregate.ts index f81f58b203..0c2232efa9 100644 --- a/frontend/src/types/api/trace/getSpanAggregate.ts +++ b/frontend/src/types/api/trace/getSpanAggregate.ts @@ -1,20 +1,16 @@ +import { TraceReducer } from 'types/reducer/trace'; + export interface Props { start: number; end: number; - service: string; - operation: string; - maxDuration: string; - minDuration: string; - kind: string; - tags: string; - dimension: string; - aggregation_option: string; - step: string; + selectedFilter: TraceReducer['selectedFilter']; + limit: number; + offset: number; + selectedTags: TraceReducer['selectedTags']; + isFilterExclude: TraceReducer['isFilterExclude']; } -interface Timestamp { - timestamp: number; - value: number; -} - -export type PayloadProps = Timestamp[]; +export type PayloadProps = { + spans: TraceReducer['spansAggregate']['data']; + totalSpans: number; +}; diff --git a/frontend/src/types/api/trace/getSpans.ts b/frontend/src/types/api/trace/getSpans.ts index 508e7abb9a..9143f2a693 100644 --- a/frontend/src/types/api/trace/getSpans.ts +++ b/frontend/src/types/api/trace/getSpans.ts @@ -1,51 +1,22 @@ -import { GlobalTime } from 'types/actions/globalTime'; - -export interface TraceTagItem { - key: string; - value: string; -} - -export interface pushDStree { - id: string; - name: string; - value: number; - time: number; - startTime: number; - tags: TraceTagItem[]; - children: pushDStree[]; -} - -export type span = [ - number, - string, - string, - string, - string, - string, - string, - string | string[], - string | string[], - string | string[], - pushDStree[], -]; - -export interface SpanList { - events: span[]; - segmentID: string; - columns: string[]; -} - -export type PayloadProps = SpanList[]; +import { TraceReducer } from 'types/reducer/trace'; export interface Props { - start: GlobalTime['minTime']; - end: GlobalTime['maxTime']; - lookback: string; - service: string; - operation: string; - maxDuration: string; - minDuration: string; - kind: string; - limit: string; - tags: string; + start: number; + end: number; + function: TraceReducer['selectedFunction']; + step: number; + groupBy: TraceReducer['selectedGroupBy']; + selectedFilter: TraceReducer['selectedFilter']; + selectedTags: TraceReducer['selectedTags']; + isFilterExclude: TraceReducer['isFilterExclude']; +} + +export interface PayloadProps { + items: Record; +} + +interface SpanData { + timestamp: number; + value?: number; + groupBy?: Record; } diff --git a/frontend/src/types/api/trace/getTagFilters.ts b/frontend/src/types/api/trace/getTagFilters.ts new file mode 100644 index 0000000000..31581e98ba --- /dev/null +++ b/frontend/src/types/api/trace/getTagFilters.ts @@ -0,0 +1,13 @@ +export interface Props { + start: number; + end: number; + other: { + [k: string]: string[]; + }; +} + +interface TagsKeys { + tagKeys: string; +} + +export type PayloadProps = TagsKeys[]; diff --git a/frontend/src/types/api/trace/getTags.ts b/frontend/src/types/api/trace/getTags.ts deleted file mode 100644 index 9e6c3a4e2e..0000000000 --- a/frontend/src/types/api/trace/getTags.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { Props as Prop } from './getServiceOperation'; - -interface TagKeys { - tagCount: number; - tagKeys: string; -} - -export type PayloadProps = TagKeys[]; - -export type Props = Prop; diff --git a/frontend/src/types/reducer/trace.ts b/frontend/src/types/reducer/trace.ts index 318a3678ba..a77f4a4b87 100644 --- a/frontend/src/types/reducer/trace.ts +++ b/frontend/src/types/reducer/trace.ts @@ -1,37 +1,107 @@ -import { PayloadProps as ServicePayload } from 'types/api/trace/getServiceList'; -import { PayloadProps as OperationsPayload } from 'types/api/trace/getServiceOperation'; -import { PayloadProps as GetSpansAggregatePayload } from 'types/api/trace/getSpanAggregate'; -import { PayloadProps as GetSpansPayloadProps } from 'types/api/trace/getSpans'; -import { PayloadProps as TagsPayload } from 'types/api/trace/getTags'; - -type TagItemOperator = 'equals' | 'contains' | 'regex'; -export interface TagItem { - key: string; - value: string; - operator: TagItemOperator; -} - -export interface LatencyValue { - min: string; - max: string; -} +import { PayloadProps } from 'types/api/trace/getSpans'; export interface TraceReducer { - selectedService: string; - selectedLatency: LatencyValue; - selectedOperation: string; - selectedKind: '' | '2' | '3' | string; - selectedTags: TagItem[]; - tagsSuggestions: TagsPayload; - errorMessage: string; - serviceList: ServicePayload; - spanList: GetSpansPayloadProps; - operationsList: OperationsPayload; - error: boolean; - loading: boolean; - - selectedAggOption: string; - selectedEntity: string; - spansAggregate: GetSpansAggregatePayload; - spansLoading: boolean; + filter: Map>; + filterToFetchData: TraceFilterEnum[]; + filterLoading: boolean; + selectedFilter: Map; + userSelectedFilter: Map; + isFilterExclude: Map; + selectedTags: Tags[]; + isTagModalOpen: boolean; + filterResponseSelected: Set; + isTagModalError: boolean; + spansAggregate: { + loading: boolean; + currentPage: number; + data: SpansAggregateData[]; + error: boolean; + total: number; + pageSize: number; + }; + selectedGroupBy: string; + selectedFunction: string; + spansGraph: { + loading: boolean; + error: boolean; + errorMessage: string; + payload: PayloadProps; + }; } + +interface SpansAggregateData { + timestamp: string; + spanID: string; + traceID: string; + serviceName: string; + operation: string; + durationNano: number; + httpCode: string; + httpMethod: string; +} + +export interface Tags { + Key: string[]; + Operator: OperatorValues; + Values: string[]; +} + +type OperatorValues = 'not in' | 'in'; + +export type TraceFilterEnum = + | 'component' + | 'duration' + | 'httpCode' + | 'httpHost' + | 'httpMethod' + | 'httpRoute' + | 'httpUrl' + | 'operation' + | 'serviceName' + | 'status'; + +export const AllPanelHeading: { + key: TraceFilterEnum; + displayValue: string; +}[] = [ + { + displayValue: 'Component', + key: 'component', + }, + { + key: 'duration', + displayValue: 'Duration', + }, + { + displayValue: 'HTTP Code', + key: 'httpCode', + }, + { + key: 'httpHost', + displayValue: 'HTTP Host', + }, + { + key: 'httpMethod', + displayValue: 'HTTP Method', + }, + { + displayValue: 'HTTP Route', + key: 'httpRoute', + }, + { + key: 'httpUrl', + displayValue: 'HTTP URL', + }, + { + key: 'operation', + displayValue: 'Operation', + }, + { + key: 'serviceName', + displayValue: 'Service Name', + }, + { + key: 'status', + displayValue: 'Status', + }, +]; diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js index 3c2b8488fe..1eb43d9ecf 100644 --- a/frontend/webpack.config.js +++ b/frontend/webpack.config.js @@ -73,6 +73,25 @@ const config = { test: /\.(ttf|eot|woff|woff2)$/, use: ['file-loader'], }, + { + test: /\.less$/i, + use: [ + { + loader: 'style-loader', + }, + { + loader: 'css-loader', + }, + { + loader: 'less-loader', + options: { + lessOptions: { + javascriptEnabled: true, + }, + }, + }, + ], + }, ], }, plugins: plugins, diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index ffc6579085..026781a76b 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -86,6 +86,25 @@ const config = { test: /\.(ttf|eot|woff|woff2)$/, use: ['file-loader'], }, + { + test: /\.less$/i, + use: [ + { + loader: 'style-loader', + }, + { + loader: 'css-loader', + }, + { + loader: 'less-loader', + options: { + lessOptions: { + javascriptEnabled: true, + }, + }, + }, + ], + }, ], }, plugins: plugins, diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 5911c63739..9bdfc03964 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -4208,6 +4208,13 @@ cookie@0.4.0: resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba" integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg== +copy-anything@^2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/copy-anything/-/copy-anything-2.0.3.tgz#842407ba02466b0df844819bbe3baebbe5d45d87" + integrity sha512-GK6QUtisv4fNS+XcI7shX0Gx9ORg7QqIznyfho79JTnX1XhLiyZHfftvGiziqzRiEi/Bjhgpi+D2o7HxJFPnDQ== + dependencies: + is-what "^3.12.0" + copy-descriptor@^0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" @@ -5130,7 +5137,7 @@ date-fns@2.x: resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-2.27.0.tgz#e1ff3c3ddbbab8a2eaadbb6106be2929a5a2d92b" integrity sha512-sj+J0Mo2p2X1e306MHq282WS4/A8Pz/95GIFcsPNMPMZVI3EUrAdSv90al1k+p74WGLCruMXk23bfEDZa71X9Q== -dayjs@1.x, dayjs@^1.10.4: +dayjs@1.x, dayjs@^1.10.4, dayjs@^1.10.7: version "1.10.7" resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.10.7.tgz#2cf5f91add28116748440866a0a1d26f3a6ce468" integrity sha512-P6twpd70BcPK34K26uJ1KT3wlhpuOAPoMwJzpsIWUxHZ7wpmbdZL/hQqBDfz7hGurYSa5PhzdhDHtt319hL3ig== @@ -5154,7 +5161,7 @@ debug@4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.2.0, debug@^4.3.1, d dependencies: ms "2.1.2" -debug@^3.1.0, debug@^3.1.1, debug@^3.2.7: +debug@^3.1.0, debug@^3.1.1, debug@^3.2.6, debug@^3.2.7: version "3.2.7" resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== @@ -5540,6 +5547,13 @@ envinfo@^7.7.3: resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.8.1.tgz#06377e3e5f4d379fea7ac592d5ad8927e0c4d475" integrity sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw== +errno@^0.1.1: + version "0.1.8" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.8.tgz#8bb3e9c7d463be4976ff888f76b4809ebc2e811f" + integrity sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A== + dependencies: + prr "~1.0.1" + error-ex@^1.3.1: version "1.3.2" resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" @@ -6830,7 +6844,7 @@ husky@4.3.8: slash "^3.0.0" which-pm-runs "^1.0.0" -iconv-lite@0.4, iconv-lite@0.4.24: +iconv-lite@0.4, iconv-lite@0.4.24, iconv-lite@^0.4.4: version "0.4.24" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== @@ -6870,6 +6884,11 @@ iltorb@^2.4.3: prebuild-install "^5.3.3" which-pm-runs "^1.0.0" +image-size@~0.5.0: + version "0.5.5" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c" + integrity sha1-Cd/Uq50g4p6xw+gLiZA3jfnjy5w= + import-fresh@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" @@ -7284,6 +7303,11 @@ is-weakref@^1.0.1: dependencies: call-bind "^1.0.0" +is-what@^3.12.0: + version "3.14.1" + resolved "https://registry.yarnpkg.com/is-what/-/is-what-3.14.1.tgz#e1222f46ddda85dead0fd1c9df131760e77755c1" + integrity sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA== + is-windows@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" @@ -7962,6 +7986,11 @@ kleur@^3.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== +klona@^2.0.4: + version "2.0.5" + resolved "https://registry.yarnpkg.com/klona/-/klona-2.0.5.tgz#d166574d90076395d9963aa7a928fabb8d76afbc" + integrity sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ== + layout-bmfont-text@^1.2.0: version "1.3.4" resolved "https://registry.yarnpkg.com/layout-bmfont-text/-/layout-bmfont-text-1.3.4.tgz#f20f2c5464774f48da6ce8a997fbce6d46945b81" @@ -7976,6 +8005,13 @@ lazy-ass@^1.6.0: resolved "https://registry.yarnpkg.com/lazy-ass/-/lazy-ass-1.6.0.tgz#7999655e8646c17f089fdd187d150d3324d54513" integrity sha1-eZllXoZGwX8In90YfRUNMyTVRRM= +less-loader@^10.2.0: + version "10.2.0" + resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-10.2.0.tgz#97286d8797dc3dc05b1d16b0ecec5f968bdd4e32" + integrity sha512-AV5KHWvCezW27GT90WATaDnfXBv99llDbtaj4bshq6DvAihMdNjaPDcUMa6EXKLRF+P2opFenJp89BXg91XLYg== + dependencies: + klona "^2.0.4" + less-plugin-npm-import@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/less-plugin-npm-import/-/less-plugin-npm-import-2.1.0.tgz#823e6986c93318a98171ca858848b6bead55bf3e" @@ -7984,6 +8020,23 @@ less-plugin-npm-import@^2.1.0: promise "~7.0.1" resolve "~1.1.6" +less@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/less/-/less-4.1.2.tgz#6099ee584999750c2624b65f80145f8674e4b4b0" + integrity sha512-EoQp/Et7OSOVu0aJknJOtlXZsnr8XE8KwuzTHOLeVSEx8pVWUICc8Q0VYRHgzyjX78nMEyC/oztWFbgyhtNfDA== + dependencies: + copy-anything "^2.0.1" + parse-node-version "^1.0.1" + tslib "^2.3.0" + optionalDependencies: + errno "^0.1.1" + graceful-fs "^4.1.2" + image-size "~0.5.0" + make-dir "^2.1.0" + mime "^1.4.1" + needle "^2.5.2" + source-map "~0.6.0" + leven@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" @@ -8216,7 +8269,7 @@ lz-string@^1.4.4: resolved "https://registry.yarnpkg.com/lz-string/-/lz-string-1.4.4.tgz#c0d8eaf36059f705796e1e344811cf4c498d3a26" integrity sha1-wNjq82BZ9wV5bh40SBHPTEmNOiY= -make-dir@^2.0.0: +make-dir@^2.0.0, make-dir@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== @@ -8343,7 +8396,7 @@ mime-types@^2.1.12, mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.17, dependencies: mime-db "1.51.0" -mime@1.6.0, mime@^1.3.4: +mime@1.6.0, mime@^1.3.4, mime@^1.4.1: version "1.6.0" resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== @@ -8383,7 +8436,7 @@ mini-create-react-context@^0.4.0: "@babel/runtime" "^7.12.1" tiny-warning "^1.0.3" -mini-css-extract-plugin@^2.4.5: +mini-css-extract-plugin@2.4.5: version "2.4.5" resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.4.5.tgz#191d6c170226037212c483af1180b4010b7b9eef" integrity sha512-oEIhRucyn1JbT/1tU2BhnwO6ft1jjH1iCX9Gc59WFMg0n5773rQU0oyQ0zzeYFFuBfONaRbQJyGoPtuNseMxjA== @@ -8517,6 +8570,15 @@ natural-compare@^1.4.0: resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= +needle@^2.5.2: + version "2.9.1" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.9.1.tgz#22d1dffbe3490c2b83e301f7709b6736cd8f2684" + integrity sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ== + dependencies: + debug "^3.2.6" + iconv-lite "^0.4.4" + sax "^1.2.4" + negotiator@0.6.2: version "0.6.2" resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" @@ -9030,6 +9092,14 @@ parse-json@^5.0.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" + +parse-node-version@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parse-node-version/-/parse-node-version-1.0.1.tgz#e2b5dbede00e7fa9bc363607f53327e8b073189b" + integrity sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA== + +parse5@6.0.1: + parse5-htmlparser2-tree-adapter@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" @@ -9038,6 +9108,7 @@ parse5-htmlparser2-tree-adapter@^6.0.1: parse5 "^6.0.1" parse5@6.0.1, parse5@^6.0.1: + version "6.0.1" resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== @@ -9649,6 +9720,11 @@ proxy-from-env@1.0.0: resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.0.0.tgz#33c50398f70ea7eb96d21f7b817630a55791c7ee" integrity sha1-M8UDmPcOp+uW0h97gXYwpVeRx+4= +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY= + psl@^1.1.28, psl@^1.1.33: version "1.8.0" resolved "https://registry.yarnpkg.com/psl/-/psl-1.8.0.tgz#9326f8bcfb013adcc005fdff056acce020e51c24" @@ -10633,7 +10709,7 @@ sane@^4.0.3: minimist "^1.1.1" walker "~1.0.5" -sax@>=0.6.0: +sax@>=0.6.0, sax@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== @@ -11690,7 +11766,7 @@ tslib@^1.8.1: resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.0.3: +tslib@^2.0.3, tslib@^2.3.0: version "2.3.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.3.1.tgz#e8a335add5ceae51aa261d32a490158ef042ef01" integrity sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw== From 1ee2e302e2ad9c34d49f1a329051b61137363e3c Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Wed, 9 Feb 2022 11:44:08 +0530 Subject: [PATCH 63/81] Feature(FE): signup page (#642) * chore: icon is updated * feat: signup page design is updated * chore: set get user pref is added * chore: svg is added * feat: signup page is updated * feat: signup page is updated --- frontend/public/signoz-signup.svg | 9 + frontend/public/signoz.svg | 2 +- frontend/src/api/user/getPreference.ts | 24 ++ frontend/src/api/user/getVersion.ts | 24 ++ frontend/src/api/user/setPreference.ts | 26 +++ frontend/src/api/user/signup.ts | 3 +- frontend/src/pages/SignUp/SignUp.tsx | 219 ++++++++++++++++++ frontend/src/pages/SignUp/index.tsx | 171 +++----------- frontend/src/pages/SignUp/styles.ts | 54 +++-- frontend/src/store/actions/app/index.ts | 1 - .../src/store/actions/app/userLoggedIn.ts | 14 -- .../src/types/api/user/getUserPreference.ts | 6 + frontend/src/types/api/user/getVersion.ts | 3 + .../src/types/api/user/setUserPreference.ts | 8 + frontend/src/types/api/user/signup.ts | 1 + 15 files changed, 393 insertions(+), 172 deletions(-) create mode 100644 frontend/public/signoz-signup.svg create mode 100644 frontend/src/api/user/getPreference.ts create mode 100644 frontend/src/api/user/getVersion.ts create mode 100644 frontend/src/api/user/setPreference.ts create mode 100644 frontend/src/pages/SignUp/SignUp.tsx delete mode 100644 frontend/src/store/actions/app/userLoggedIn.ts create mode 100644 frontend/src/types/api/user/getUserPreference.ts create mode 100644 frontend/src/types/api/user/getVersion.ts create mode 100644 frontend/src/types/api/user/setUserPreference.ts diff --git a/frontend/public/signoz-signup.svg b/frontend/public/signoz-signup.svg new file mode 100644 index 0000000000..67c45b1c23 --- /dev/null +++ b/frontend/public/signoz-signup.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/frontend/public/signoz.svg b/frontend/public/signoz.svg index 73df99420e..53a3a23754 100644 --- a/frontend/public/signoz.svg +++ b/frontend/public/signoz.svg @@ -2,4 +2,4 @@ - + \ No newline at end of file diff --git a/frontend/src/api/user/getPreference.ts b/frontend/src/api/user/getPreference.ts new file mode 100644 index 0000000000..b284eab47b --- /dev/null +++ b/frontend/src/api/user/getPreference.ts @@ -0,0 +1,24 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps } from 'types/api/user/getUserPreference'; + +const getPreference = async (): Promise< + SuccessResponse | ErrorResponse +> => { + try { + const response = await axios.get(`/userPreferences`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getPreference; diff --git a/frontend/src/api/user/getVersion.ts b/frontend/src/api/user/getVersion.ts new file mode 100644 index 0000000000..a65ede2f0d --- /dev/null +++ b/frontend/src/api/user/getVersion.ts @@ -0,0 +1,24 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps } from 'types/api/user/getVersion'; + +const getVersion = async (): Promise< + SuccessResponse | ErrorResponse +> => { + try { + const response = await axios.get(`/version`); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default getVersion; diff --git a/frontend/src/api/user/setPreference.ts b/frontend/src/api/user/setPreference.ts new file mode 100644 index 0000000000..de8e309b65 --- /dev/null +++ b/frontend/src/api/user/setPreference.ts @@ -0,0 +1,26 @@ +import axios from 'api'; +import { ErrorResponseHandler } from 'api/ErrorResponseHandler'; +import { AxiosError } from 'axios'; +import { ErrorResponse, SuccessResponse } from 'types/api'; +import { PayloadProps, Props } from 'types/api/user/setUserPreference'; + +const setPreference = async ( + props: Props, +): Promise | ErrorResponse> => { + try { + const response = await axios.post(`/userPreferences`, { + ...props, + }); + + return { + statusCode: 200, + error: null, + message: response.data.status, + payload: response.data, + }; + } catch (error) { + return ErrorResponseHandler(error as AxiosError); + } +}; + +export default setPreference; diff --git a/frontend/src/api/user/signup.ts b/frontend/src/api/user/signup.ts index 9635f70e8f..8778b5c037 100644 --- a/frontend/src/api/user/signup.ts +++ b/frontend/src/api/user/signup.ts @@ -9,8 +9,7 @@ const signup = async ( ): Promise | ErrorResponse> => { try { const response = await axios.post(`/user`, { - email: props.email, - name: props.name, + ...props, }); return { diff --git a/frontend/src/pages/SignUp/SignUp.tsx b/frontend/src/pages/SignUp/SignUp.tsx new file mode 100644 index 0000000000..37253a0aeb --- /dev/null +++ b/frontend/src/pages/SignUp/SignUp.tsx @@ -0,0 +1,219 @@ +import { + Button, + Input, + notification, + Typography, + Switch, + Space, + Card, +} from 'antd'; +import signup from 'api/user/signup'; +import ROUTES from 'constants/routes'; +import history from 'lib/history'; +import React, { useEffect, useState } from 'react'; +import setLocalStorageKey from 'api/browser/localstorage/set'; + +import AppActions from 'types/actions'; +const { Title } = Typography; +import { PayloadProps } from 'types/api/user/getUserPreference'; + +import { + ButtonContainer, + Container, + FormWrapper, + Label, + LeftContainer, + Logo, + MarginTop, +} from './styles'; +import { IS_LOGGED_IN } from 'constants/auth'; +import { useDispatch } from 'react-redux'; +import { Dispatch } from 'redux'; +import setPreference from 'api/user/setPreference'; + +const Signup = ({ version, userpref }: SignupProps): JSX.Element => { + const [loading, setLoading] = useState(false); + + const [firstName, setFirstName] = useState(''); + const [email, setEmail] = useState(''); + const [organizationName, setOrganisationName] = useState(''); + const [hasOptedUpdates, setHasOptedUpdates] = useState( + userpref.hasOptedUpdates, + ); + const [isAnonymous, setisAnonymous] = useState(userpref.isAnonymous); + + const dispatch = useDispatch>(); + + useEffect(() => { + setisAnonymous(userpref.isAnonymous); + setHasOptedUpdates(userpref.hasOptedUpdates); + }, []); + + const setState = ( + value: string, + setFunction: React.Dispatch>, + ) => { + setFunction(value); + }; + + const handleSubmit = (e: React.FormEvent): void => { + (async (): Promise => { + try { + e.preventDefault(); + setLoading(true); + + const userPrefernceResponse = await setPreference({ + isAnonymous, + hasOptedUpdates, + }); + + if (userPrefernceResponse.statusCode === 200) { + const response = await signup({ + email: email, + name: firstName, + organizationName, + }); + + if (response.statusCode === 200) { + setLocalStorageKey(IS_LOGGED_IN, 'yes'); + dispatch({ + type: 'LOGGED_IN', + }); + + history.push(ROUTES.APPLICATION); + } else { + notification.error({ + message: 'Something went wrong', + }); + } + } else { + notification.error({ + message: 'Something went wrong', + }); + } + + setLoading(false); + } catch (error) { + notification.error({ + message: 'Something went wrong', + }); + setLoading(false); + } + })(); + }; + + console.log(userpref); + + const onSwitchHandler = ( + value: boolean, + setFunction: React.Dispatch>, + ) => { + setFunction(value); + }; + + return ( + + + + + SigNoz + + + Monitor your applications. Find what is causing issues. + + + SigNoz {version} + + + + +
+ Create your account +
+ + { + setState(e.target.value, setEmail); + }} + required + id="signupEmail" + /> +
+ +
+ + { + setState(e.target.value, setFirstName); + }} + required + id="signupFirstName" + /> +
+
+ + { + setState(e.target.value, setOrganisationName); + }} + required + id="organizationName" + /> +
+ + + + onSwitchHandler(value, setHasOptedUpdates)} + checked={hasOptedUpdates} + /> + Keep me updated on new SigNoz features + + + + + + onSwitchHandler(value, setisAnonymous)} + checked={isAnonymous} + /> + + Anonymise my usage date. We collect data to measure product usage + + + + + + + + +
+
+ ); +}; + +interface SignupProps { + version: string; + userpref: PayloadProps; +} + +export default Signup; diff --git a/frontend/src/pages/SignUp/index.tsx b/frontend/src/pages/SignUp/index.tsx index f5ac704dc8..88131c7143 100644 --- a/frontend/src/pages/SignUp/index.tsx +++ b/frontend/src/pages/SignUp/index.tsx @@ -1,148 +1,43 @@ -import { Button, Input, notification, Typography } from 'antd'; -import signup from 'api/user/signup'; -import ROUTES from 'constants/routes'; -import history from 'lib/history'; -import React, { useState } from 'react'; -import { connect } from 'react-redux'; -import { bindActionCreators } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; -import { UserLoggedIn } from 'store/actions'; -import AppActions from 'types/actions'; +import useFetch from 'hooks/useFetch'; +import React from 'react'; +import SignUpComponent from './SignUp'; +import getVersion from 'api/user/getVersion'; +import { PayloadProps as VersionPayload } from 'types/api/user/getVersion'; +import { PayloadProps as UserPrefPayload } from 'types/api/user/getUserPreference'; -import { - ButtonContainer, - Container, - FormWrapper, - LogoImageContainer, - Title, -} from './styles'; +import Spinner from 'components/Spinner'; +import { Typography } from 'antd'; +import getPreference from 'api/user/getPreference'; -const Signup = ({ loggedIn }: SignupProps): JSX.Element => { - const [notificationsInstance, Element] = notification.useNotification(); +const SignUp = () => { + const versionResponse = useFetch(getVersion); - const [loading, setLoading] = useState(false); + const userPrefResponse = useFetch(getPreference); - const [formState, setFormState] = useState({ - firstName: { value: '' }, - email: { value: '' }, - }); + if (versionResponse.error || userPrefResponse.error) { + return ( + + {versionResponse.errorMessage || + userPrefResponse.errorMessage || + 'Somehthing went wrong'} + + ); + } - const updateForm = ( - name: string, - target: EventTarget & HTMLInputElement, - ): void => { - if (name === 'firstName') { - setFormState({ - ...formState, - firstName: { ...formState.firstName, value: target.value }, - }); - } else if (name === 'email') { - setFormState({ - ...formState, - email: { ...formState.email, value: target.value }, - }); - } - }; + if ( + versionResponse.loading || + versionResponse.payload === undefined || + userPrefResponse.loading || + userPrefResponse.payload === undefined + ) { + return ; + } - const handleSubmit = (e: React.FormEvent): void => { - (async (): Promise => { - try { - e.preventDefault(); - setLoading(true); - const payload = { - first_name: formState.firstName, - email: formState.email, - }; + const version = versionResponse.payload.version; - const response = await signup({ - email: payload.email.value, - name: payload.first_name.value, - }); + const userpref = userPrefResponse.payload; - if (response.statusCode === 200) { - loggedIn(); - history.push(ROUTES.APPLICATION); - } else { - notificationsInstance.error({ - message: 'Something went wrong', - }); - } - setLoading(false); - } catch (error) { - notificationsInstance.error({ - message: 'Something went wrong', - }); - setLoading(false); - } - })(); - }; - - return ( -
- {Element} - - - Create your account - - Monitor your applications. Find what is causing issues. - - - - - - -
-
- - updateForm('email', e.target)} - required - id="signupEmail" - /> -
- -
- - updateForm('firstName', e.target)} - required - id="signupFirstName" - /> -
- - - - - -
-
- ); + return ; }; -interface DispatchProps { - loggedIn: () => void; -} - -const mapDispatchToProps = ( - dispatch: ThunkDispatch, -): DispatchProps => ({ - loggedIn: bindActionCreators(UserLoggedIn, dispatch), -}); - -type SignupProps = DispatchProps; - -export default connect(null, mapDispatchToProps)(Signup); +export default SignUp; diff --git a/frontend/src/pages/SignUp/styles.ts b/frontend/src/pages/SignUp/styles.ts index e90f440a92..ca9e899165 100644 --- a/frontend/src/pages/SignUp/styles.ts +++ b/frontend/src/pages/SignUp/styles.ts @@ -1,31 +1,53 @@ -import { Space, Typography } from 'antd'; +import { Card, Space } from 'antd'; +import React from 'react'; import styled from 'styled-components'; -export const Container = styled(Space)` +export const Container = styled.div` &&& { - padding-left: 2rem; - margin-top: 3rem; + display: flex; + justify-content: center; + align-items: center; + min-height: 100vh; + + max-width: 1024px; + margin: 0 auto; } `; -export const Title = styled(Typography)` - &&& { - font-size: 1rem; - font-weight: bold; - } -`; - -export const FormWrapper = styled.div` +export const FormWrapper = styled(Card)` display: flex; justify-content: center; + max-width: 432px; + flex: 1; +`; - margin-top: 2rem; +export const Label = styled.label` + margin-bottom: 11px; + margin-top: 19px; + display: inline-block; + font-size: 1rem; + line-height: 24px; +`; + +export const LeftContainer = styled(Space)` + flex: 1; `; export const ButtonContainer = styled.div` - margin-top: 0.5rem; + margin-top: 1.8125rem; + display: flex; + justify-content: center; + align-items: center; `; -export const LogoImageContainer = styled.img` - width: 320px; +interface Props { + marginTop: React.CSSProperties['marginTop']; +} + +export const MarginTop = styled.div` + margin-top: ${({ marginTop }) => marginTop}; +`; + +export const Logo = styled.img` + width: 60px; `; diff --git a/frontend/src/store/actions/app/index.ts b/frontend/src/store/actions/app/index.ts index e6c98da1d9..308027be21 100644 --- a/frontend/src/store/actions/app/index.ts +++ b/frontend/src/store/actions/app/index.ts @@ -1,3 +1,2 @@ export * from './toggleDarkMode'; export * from './toggleSettingsTab'; -export * from './userLoggedIn'; diff --git a/frontend/src/store/actions/app/userLoggedIn.ts b/frontend/src/store/actions/app/userLoggedIn.ts deleted file mode 100644 index 3f03415b72..0000000000 --- a/frontend/src/store/actions/app/userLoggedIn.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { IS_LOGGED_IN } from 'constants/auth'; -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; -import setLocalStorageKey from 'api/browser/localstorage/set'; - -export const UserLoggedIn = (): ((dispatch: Dispatch) => void) => { - return (dispatch: Dispatch): void => { - setLocalStorageKey(IS_LOGGED_IN, 'yes'); - - dispatch({ - type: 'LOGGED_IN', - }); - }; -}; diff --git a/frontend/src/types/api/user/getUserPreference.ts b/frontend/src/types/api/user/getUserPreference.ts new file mode 100644 index 0000000000..fe0a30bf41 --- /dev/null +++ b/frontend/src/types/api/user/getUserPreference.ts @@ -0,0 +1,6 @@ +export interface PayloadProps { + hasOptedUpdates: boolean; + id: number; + isAnonymous: boolean; + uuid: string; +} diff --git a/frontend/src/types/api/user/getVersion.ts b/frontend/src/types/api/user/getVersion.ts new file mode 100644 index 0000000000..78f07363ef --- /dev/null +++ b/frontend/src/types/api/user/getVersion.ts @@ -0,0 +1,3 @@ +export interface PayloadProps { + version: string; +} diff --git a/frontend/src/types/api/user/setUserPreference.ts b/frontend/src/types/api/user/setUserPreference.ts new file mode 100644 index 0000000000..92ebd3d1b1 --- /dev/null +++ b/frontend/src/types/api/user/setUserPreference.ts @@ -0,0 +1,8 @@ +export interface Props { + isAnonymous: boolean; + hasOptedUpdates: boolean; +} + +export interface PayloadProps { + data: string; +} diff --git a/frontend/src/types/api/user/signup.ts b/frontend/src/types/api/user/signup.ts index 00edef563f..8e6feca0d6 100644 --- a/frontend/src/types/api/user/signup.ts +++ b/frontend/src/types/api/user/signup.ts @@ -1,4 +1,5 @@ export interface Props { email: string; name: string; + organizationName: string; } From 07f4fcb21646de027771113d595846ec779d2c4b Mon Sep 17 00:00:00 2001 From: Pranshu Chittora Date: Wed, 9 Feb 2022 11:48:54 +0530 Subject: [PATCH 64/81] fix(FE): Sidebar navigation when collapsed (#686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update README.md * ci(k3s): 💚 fix correct raw github URL for hotrod (#661) Signed-off-by: Prashant Shahi (cherry picked from commit d92a3e64f58477af46fc56781489a4be05ec42f0) * chore: 🚚 rename config .yaml to yml for behaviorbot (#673) Signed-off-by: Prashant Shahi (cherry picked from commit cd04a39d3dcc1579dc5807ad9cece4eed4437f0d) * fix(FE): sidebar navigation when collapsed Co-authored-by: Pranay Prateek Co-authored-by: Prashant Shahi Co-authored-by: Ankit Nayan --- frontend/src/container/SideNav/index.tsx | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/frontend/src/container/SideNav/index.tsx b/frontend/src/container/SideNav/index.tsx index c628d939f1..c96e46b18d 100644 --- a/frontend/src/container/SideNav/index.tsx +++ b/frontend/src/container/SideNav/index.tsx @@ -73,10 +73,12 @@ const SideNav = ({ toggleDarkMode }: Props): JSX.Element => { mode="inline" > {menus.map(({ to, Icon, name }) => ( - }> -
onClickHandler(to)}> - {name} -
+ } + onClick={(): void => onClickHandler(to)} + > + {name} ))} From acbe7f91cbd05225d07c65cdaf3524c9fe2a7f1a Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Wed, 9 Feb 2022 11:50:29 +0530 Subject: [PATCH 65/81] bug(UI): optimisation config is updated (#650) --- frontend/webpack.config.prod.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index 026781a76b..1e7f2198f0 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -110,7 +110,7 @@ const config = { plugins: plugins, optimization: { chunkIds: 'named', - concatenateModules: true, + concatenateModules: false, emitOnErrors: true, flagIncludedChunks: true, innerGraph: true, //tells webpack whether to conduct inner graph analysis for unused exports. From 51b11d01196dbab98d6cdaa073c7688d098bbef0 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 9 Feb 2022 16:00:53 +0530 Subject: [PATCH 66/81] =?UTF-8?q?fix(makefile):=20=F0=9F=A9=B9=20buildx=20?= =?UTF-8?q?fix=20for=20pushing=20images?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 21edec847d..b8528257ed 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ build-frontend-amd64: @echo "--> Building frontend docker image for amd64" @echo "------------------" @cd $(FRONTEND_DIRECTORY) && \ - docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" + docker build -f Dockerfile --no-cache -t $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --build-arg TARGETPLATFORM="linux/amd64" . # Step to build and push docker image of frontend(used in push pipeline) build-push-frontend: @@ -50,9 +50,7 @@ ifndef DOCKER_SECOND_TAG docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . else @cd $(FRONTEND_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 . \ - --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) \ - --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) + docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) . endif # Steps to build and push docker image of query service @@ -63,9 +61,7 @@ build-query-service-amd64: @echo "--> Building query-service docker image for amd64" @echo "------------------" @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . \ - --build-arg TARGETPLATFORM="linux/amd64" \ - --build-arg LD_FLAGS=$(LD_FLAGS) + docker build -f Dockerfile --no-cache -t $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETPLATFORM="linux/amd64" --build-arg LD_FLAGS=$(LD_FLAGS) # Step to build and push docker image of query in amd64 and arm64 (used in push pipeline) build-push-query-service: @@ -74,17 +70,10 @@ build-push-query-service: @echo "------------------" ifndef DOCKER_SECOND_TAG @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push \ - --platform linux/arm64,linux/amd64 . \ - --build-arg LD_FLAGS=$(LD_FLAGS) \ - --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) + docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . else @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache \ - --push --platform linux/arm64,linux/amd64 . \ - --build-arg LD_FLAGS=$(LD_FLAGS) \ - --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) \ - --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) + docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) . endif # Steps to build and push docker image of flattener From ffd2c9b466bfbb53ca5c2c9871735218f71cb157 Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Wed, 9 Feb 2022 16:47:34 +0530 Subject: [PATCH 67/81] bug(UI): frontend build is fixed (#692) --- frontend/src/constants/env.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/frontend/src/constants/env.ts b/frontend/src/constants/env.ts index 6e12260df1..e7fe9610e0 100644 --- a/frontend/src/constants/env.ts +++ b/frontend/src/constants/env.ts @@ -1,3 +1,7 @@ export const ENVIRONMENT = { - baseURL: process?.env?.FRONTEND_API_ENDPOINT || process.env.GITPOD_WORKSPACE_URL.replace('://','://8080-') ||'http://localhost:8080'|| '', + baseURL: + process?.env?.FRONTEND_API_ENDPOINT || + process.env?.GITPOD_WORKSPACE_URL?.replace('://', '://8080-') || + 'http://localhost:8080' || + '', }; From 03eac8963f7ad21579f99cd834220a7d4a0f9fd6 Mon Sep 17 00:00:00 2001 From: palash-signoz Date: Wed, 9 Feb 2022 17:22:21 +0530 Subject: [PATCH 68/81] chore: Env fix (#693) * bug(UI): frontend build is fixed * chore; build is fixed * chore: build is fixed --- frontend/src/constants/env.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/frontend/src/constants/env.ts b/frontend/src/constants/env.ts index e7fe9610e0..2c5230dfcc 100644 --- a/frontend/src/constants/env.ts +++ b/frontend/src/constants/env.ts @@ -1,7 +1,6 @@ export const ENVIRONMENT = { baseURL: process?.env?.FRONTEND_API_ENDPOINT || - process.env?.GITPOD_WORKSPACE_URL?.replace('://', '://8080-') || - 'http://localhost:8080' || + process?.env?.GITPOD_WORKSPACE_URL?.replace('://', '://8080-') || '', }; From 420f601a68b33e44fcc59b7d560a222e0f65a9ee Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 9 Feb 2022 20:35:25 +0530 Subject: [PATCH 69/81] =?UTF-8?q?ci(push):=20=F0=9F=91=B7=20add=20develop?= =?UTF-8?q?=20branch=20and=20remove=20second=20tag=20(#694)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Prashant Shahi --- .github/workflows/push.yaml | 25 +++++++++++++++++++------ Makefile | 10 ---------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 76385569e1..7cf2f7e0a3 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -7,6 +7,7 @@ on: - 'frontend/**' branches: - main + - develop tags: - ^v[0-9]+.[0-9]+.[0-9]+$ @@ -33,10 +34,16 @@ jobs: - name: Get branch name id: branch-name uses: tj-actions/branch-names@v5.1 + - name: Set docker tag environment + run: | + if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then + echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV + elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then + echo "DOCKER_TAG=latest" >> $GITHUB_ENV + else + echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV + fi - name: Build and push docker image - env: - DOCKER_TAG: ${{ steps.short-sha.outputs.sha }} - DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }} run: make build-push-query-service image-build-and-push-frontend: @@ -69,8 +76,14 @@ jobs: - name: Get branch name id: branch-name uses: tj-actions/branch-names@v5.1 + - name: Set docker tag environment + run: | + if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then + echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV + elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then + echo "DOCKER_TAG=latest" >> $GITHUB_ENV + else + echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV + fi - name: Build and push docker image - env: - DOCKER_TAG: ${{ steps.short-sha.outputs.sha }} - DOCKER_SECOND_TAG: ${{ steps.branch-name.outputs.current_branch }} run: make build-push-frontend diff --git a/Makefile b/Makefile index b8528257ed..625a0c6953 100644 --- a/Makefile +++ b/Makefile @@ -45,13 +45,8 @@ build-push-frontend: @echo "------------------" @echo "--> Building and pushing frontend docker image" @echo "------------------" -ifndef DOCKER_SECOND_TAG @cd $(FRONTEND_DIRECTORY) && \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) . -else - @cd $(FRONTEND_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/amd64 --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_TAG) --tag $(REPONAME)/$(FRONTEND_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) . -endif # Steps to build and push docker image of query service .PHONY: build-query-service-amd64 build-push-query-service @@ -68,13 +63,8 @@ build-push-query-service: @echo "------------------" @echo "--> Building and pushing query-service docker image" @echo "------------------" -ifndef DOCKER_SECOND_TAG @cd $(QUERY_SERVICE_DIRECTORY) && \ docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) . -else - @cd $(QUERY_SERVICE_DIRECTORY) && \ - docker buildx build --file Dockerfile --progress plane --no-cache --push --platform linux/arm64,linux/amd64 --build-arg LD_FLAGS=$(LD_FLAGS) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_TAG) --tag $(REPONAME)/$(QUERY_SERVICE_DOCKER_IMAGE):$(DOCKER_SECOND_TAG) . -endif # Steps to build and push docker image of flattener .PHONY: build-flattener-amd64 build-push-flattener From a5bf4c1a61aadbb01f9af5277369a3001f80894f Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Wed, 9 Feb 2022 21:46:07 +0530 Subject: [PATCH 70/81] =?UTF-8?q?ci(push):=20=F0=9F=91=B7=20push=20workflo?= =?UTF-8?q?w=20update=20(#695)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci(push): 👷 remove prefix v for docker images Signed-off-by: Prashant Shahi * ci(push): 👷 remove path trigger and update tags regex Signed-off-by: Prashant Shahi --- .github/workflows/push.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 7cf2f7e0a3..2f39f28af1 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -2,14 +2,11 @@ name: push on: push: - paths: - - 'pkg/query-service/**' - - 'frontend/**' branches: - main - develop tags: - - ^v[0-9]+.[0-9]+.[0-9]+$ + - v* jobs: @@ -37,7 +34,9 @@ jobs: - name: Set docker tag environment run: | if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then - echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV + tag="${{ steps.branch-name.outputs.tag }}" + tag="${tag:1}" + echo "DOCKER_TAG=$tag" >> $GITHUB_ENV elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then echo "DOCKER_TAG=latest" >> $GITHUB_ENV else @@ -79,7 +78,9 @@ jobs: - name: Set docker tag environment run: | if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then - echo "DOCKER_TAG=${{ steps.branch-name.outputs.tag }}" >> $GITHUB_ENV + tag="${{ steps.branch-name.outputs.tag }}" + tag="${tag:1}" + echo "DOCKER_TAG=$tag" >> $GITHUB_ENV elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then echo "DOCKER_TAG=latest" >> $GITHUB_ENV else From 9bf37b391e91866851ae2f5a26826f3b1579eaab Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Wed, 9 Feb 2022 21:52:59 +0530 Subject: [PATCH 71/81] release: v0.6.0 --- deploy/docker/clickhouse-setup/docker-compose.arm.yaml | 8 ++++---- deploy/docker/clickhouse-setup/docker-compose.yaml | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml index cd68ead27f..0a09efbca3 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -23,7 +23,7 @@ services: - '--storage.path=/data' query-service: - image: signoz/query-service:0.5.4 + image: signoz/query-service:0.6.0 container_name: query-service command: ["-config=/root/config/prometheus.yml"] volumes: @@ -40,7 +40,7 @@ services: condition: service_healthy frontend: - image: signoz/frontend:0.5.4 + image: signoz/frontend:0.6.0 container_name: frontend depends_on: - query-service @@ -50,7 +50,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/otelcontribcol:0.4.3 + image: signoz/otelcontribcol:0.5.0 command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml @@ -63,7 +63,7 @@ services: condition: service_healthy otel-collector-metrics: - image: signoz/otelcontribcol:0.4.3 + image: signoz/otelcontribcol:0.5.0 command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 71b678459f..0f5109b635 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -26,7 +26,7 @@ services: query-service: - image: signoz/query-service:0.5.4 + image: signoz/query-service:0.6.0 container_name: query-service command: ["-config=/root/config/prometheus.yml"] volumes: @@ -43,7 +43,7 @@ services: condition: service_healthy frontend: - image: signoz/frontend:0.5.4 + image: signoz/frontend:0.6.0 container_name: frontend depends_on: - query-service @@ -53,7 +53,7 @@ services: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/otelcontribcol:0.4.3 + image: signoz/otelcontribcol:0.5.0 command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml @@ -66,7 +66,7 @@ services: condition: service_healthy otel-collector-metrics: - image: signoz/otelcontribcol:0.4.3 + image: signoz/otelcontribcol:0.5.0 command: ["--config=/etc/otel-collector-metrics-config.yaml", "--mem-ballast-size-mib=683"] volumes: - ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml From d7fa503f049ecdafd3ff7ae6d16b2dc2d615df20 Mon Sep 17 00:00:00 2001 From: Pranay Prateek Date: Thu, 10 Feb 2022 00:28:00 +0530 Subject: [PATCH 72/81] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e7af0d5bef..6bc8fec353 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,9 @@ SigNoz helps developers monitor applications and troubleshoot problems in their 👉 Run aggregates on trace data to get business relevant metrics -![SigNoz Feature](https://signoz-public.s3.us-east-2.amazonaws.com/signoz_hero_github.png) +![screenzy-1644432902955](https://user-images.githubusercontent.com/504541/153270713-1b2156e6-ec03-42de-975b-3c02b8ec1836.png) + +![screenzy-1644432986784](https://user-images.githubusercontent.com/504541/153270725-0efb73b3-06ed-4207-bf13-9b7e2e17c4b8.png)

From fdc8670fab202288689b690b1445186c969ceeb2 Mon Sep 17 00:00:00 2001 From: Patrik Date: Wed, 9 Feb 2022 22:05:27 +0100 Subject: [PATCH 73/81] fix: added support for custom alertmanager url --- .../app/clickhouseReader/reader.go | 8 ++++---- pkg/query-service/constants/constants.go | 8 +++++++- pkg/query-service/constants/constants_test.go | 20 +++++++++++++++++++ 3 files changed, 31 insertions(+), 5 deletions(-) create mode 100644 pkg/query-service/constants/constants_test.go diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index ab124de5e9..876c66fa18 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -660,7 +660,7 @@ func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiErr return &model.ApiError{Typ: model.ErrorBadData, Err: err} } - response, err := http.Post(constants.ALERTMANAGER_API_PREFIX+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data))) + response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data))) if err != nil { zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err) @@ -730,7 +730,7 @@ func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { values := map[string]string{"name": channelToDelete.Name} jsonValue, _ := json.Marshal(values) - req, err := http.NewRequest(http.MethodDelete, constants.ALERTMANAGER_API_PREFIX+"v1/receivers", bytes.NewBuffer(jsonValue)) + req, err := http.NewRequest(http.MethodDelete, constants.GetAlertManagerApiPrefix()+"v1/receivers", bytes.NewBuffer(jsonValue)) if err != nil { zap.S().Errorf("Error in creating new delete request to alertmanager/v1/receivers\n", err) @@ -855,7 +855,7 @@ func (r *ClickHouseReader) EditChannel(receiver *model.Receiver, id string) (*mo } } - req, err := http.NewRequest(http.MethodPut, constants.ALERTMANAGER_API_PREFIX+"v1/receivers", bytes.NewBuffer(receiverString)) + req, err := http.NewRequest(http.MethodPut, constants.GetAlertManagerApiPrefix()+"v1/receivers", bytes.NewBuffer(receiverString)) if err != nil { zap.S().Errorf("Error in creating new update request to alertmanager/v1/receivers\n", err) @@ -917,7 +917,7 @@ func (r *ClickHouseReader) CreateChannel(receiver *model.Receiver) (*model.Recei } } - response, err := http.Post(constants.ALERTMANAGER_API_PREFIX+"v1/receivers", "application/json", bytes.NewBuffer(receiverString)) + response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer(receiverString)) if err != nil { zap.S().Errorf("Error in getting response of API call to alertmanager/v1/receivers\n", err) diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 3339432513..0e19f83f93 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -23,7 +23,13 @@ func IsTelemetryEnabled() bool { const TraceTTL = "traces" const MetricsTTL = "metrics" -const ALERTMANAGER_API_PREFIX = "http://alertmanager:9093/api/" +func GetAlertManagerApiPrefix() string { + if os.Getenv("ALERTMANAGER_API_PREFIX") != "" { + return os.Getenv("ALERTMANAGER_API_PREFIX") + } + return "http://alertmanager:9093/api/" +} + const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db" const ( diff --git a/pkg/query-service/constants/constants_test.go b/pkg/query-service/constants/constants_test.go new file mode 100644 index 0000000000..97bed19271 --- /dev/null +++ b/pkg/query-service/constants/constants_test.go @@ -0,0 +1,20 @@ +package constants + +import ( + . "github.com/smartystreets/goconvey/convey" + "os" + "testing" +) + +func TestGetAlertManagerApiPrefix(t *testing.T) { + Convey("TestGetAlertManagerApiPrefix", t, func() { + res := GetAlertManagerApiPrefix() + So(res, ShouldEqual, "http://alertmanager:9093/api/") + + Convey("WithEnvSet", func() { + os.Setenv("ALERTMANAGER_API_PREFIX", "http://test:9093/api/") + res = GetAlertManagerApiPrefix() + So(res, ShouldEqual, "http://test:9093/api/") + }) + }) +} From 828bd3bac6853989c2a1bc992655bd1ac17f6162 Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Thu, 10 Feb 2022 16:37:14 +0530 Subject: [PATCH 74/81] feat: now webpack filename are hashed --- frontend/webpack.config.prod.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index 1e7f2198f0..c90c4616d8 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -34,8 +34,8 @@ const plugins = [ path: resolve(__dirname, './build/css'), // Public path of the CSS resources. This prefix is removed from the href publicPath: resolve(__dirname, './public/css'), - fonts: true - }) + fonts: true, + }), ]; if (process.env.BUNDLE_ANALYSER === 'true') { @@ -48,6 +48,7 @@ const config = { output: { path: resolve(__dirname, './build'), publicPath: '/', + filename: '[name].[contenthash].js', }, resolve: { extensions: ['.ts', '.tsx', '.js', '.jsx'], From 0ae5b824d91d0bc3fdcba457a5df2c852a51142b Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Thu, 10 Feb 2022 16:44:38 +0530 Subject: [PATCH 75/81] bug: signup state is now not toggled when component is not toggled --- frontend/src/pages/SignUp/SignUp.tsx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/frontend/src/pages/SignUp/SignUp.tsx b/frontend/src/pages/SignUp/SignUp.tsx index 37253a0aeb..c7f540836a 100644 --- a/frontend/src/pages/SignUp/SignUp.tsx +++ b/frontend/src/pages/SignUp/SignUp.tsx @@ -82,17 +82,19 @@ const Signup = ({ version, userpref }: SignupProps): JSX.Element => { history.push(ROUTES.APPLICATION); } else { + setLoading(false); + notification.error({ message: 'Something went wrong', }); } } else { + setLoading(false); + notification.error({ message: 'Something went wrong', }); } - - setLoading(false); } catch (error) { notification.error({ message: 'Something went wrong', From dc737f385a757c0e357c1023a560a1b435fc2229 Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Thu, 10 Feb 2022 22:20:31 +0530 Subject: [PATCH 76/81] bug: in the error state bar panel is added --- .../src/container/GridGraphLayout/Graph/index.tsx | 14 ++++++++++++-- .../src/container/GridGraphLayout/Graph/styles.ts | 6 ++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/frontend/src/container/GridGraphLayout/Graph/index.tsx b/frontend/src/container/GridGraphLayout/Graph/index.tsx index 60a086befa..74ff7c8511 100644 --- a/frontend/src/container/GridGraphLayout/Graph/index.tsx +++ b/frontend/src/container/GridGraphLayout/Graph/index.tsx @@ -23,7 +23,7 @@ import { Widgets } from 'types/api/dashboard/getAll'; import Bar from './Bar'; import FullView from './FullView'; -import { Modal, FullViewContainer } from './styles'; +import { Modal, FullViewContainer, ErrorContainer } from './styles'; const GridCardGraph = ({ widget, @@ -131,7 +131,17 @@ const GridCardGraph = ({ }, [deleteWidget, widget, onToggleModal, isDeleted]); if (state.error) { - return
{state.errorMessage}
; + return ( + <> + onToggleModal(setModal)} + widget={widget} + onDeleteHandler={(): void => onToggleModal(setDeletModal)} + /> + + {state.errorMessage} + + ); } if (state.loading === true || state.payload === undefined) { diff --git a/frontend/src/container/GridGraphLayout/Graph/styles.ts b/frontend/src/container/GridGraphLayout/Graph/styles.ts index 61f4c7c9c9..7a5d9f1a62 100644 --- a/frontend/src/container/GridGraphLayout/Graph/styles.ts +++ b/frontend/src/container/GridGraphLayout/Graph/styles.ts @@ -15,3 +15,9 @@ export const Modal = styled(ModalComponent)` export const FullViewContainer = styled.div` height: 70vh; `; + +export const ErrorContainer = styled.div` + margin-top: 2rem; + padding-left: 2rem; + padding-right: 2rem; +`; From 744dfd010a361a82f29691344cd96393abc8a434 Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Fri, 11 Feb 2022 12:00:46 +0530 Subject: [PATCH 77/81] chore: modal is updated in the error state --- .../container/GridGraphLayout/Graph/index.tsx | 59 +++++++++++-------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/frontend/src/container/GridGraphLayout/Graph/index.tsx b/frontend/src/container/GridGraphLayout/Graph/index.tsx index 74ff7c8511..977b274ac0 100644 --- a/frontend/src/container/GridGraphLayout/Graph/index.tsx +++ b/frontend/src/container/GridGraphLayout/Graph/index.tsx @@ -124,6 +124,38 @@ const GridCardGraph = ({ [], ); + const getModals = () => { + return ( + <> + onToggleModal(setDeletModal)} + visible={deleteModal} + title="Delete" + height="10vh" + onOk={onDeleteHandler} + centered + > + Are you sure you want to delete this widget + + + onToggleModal(setModal)} + width="85%" + destroyOnClose + > + + + + + + ); + }; + const onDeleteHandler = useCallback(() => { deleteWidget({ widgetId: widget.id }); onToggleModal(setDeletModal); @@ -133,6 +165,7 @@ const GridCardGraph = ({ if (state.error) { return ( <> + {getModals()} onToggleModal(setModal)} widget={widget} @@ -156,31 +189,7 @@ const GridCardGraph = ({ onDeleteHandler={(): void => onToggleModal(setDeletModal)} /> - onToggleModal(setDeletModal)} - visible={deleteModal} - title="Delete" - height="10vh" - onOk={onDeleteHandler} - centered - > - Are you sure you want to delete this widget - - - onToggleModal(setModal)} - width="85%" - destroyOnClose - > - - - - + {getModals()} Date: Fri, 11 Feb 2022 14:09:05 +0530 Subject: [PATCH 78/81] bug: dashboard graph is now fixed --- frontend/src/components/Graph/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/Graph/index.tsx b/frontend/src/components/Graph/index.tsx index eac5ae2712..b6905495d4 100644 --- a/frontend/src/components/Graph/index.tsx +++ b/frontend/src/components/Graph/index.tsx @@ -148,7 +148,7 @@ const Graph = ({ useEffect(() => { buildChart(); - }, []); + }, [buildChart]); return (
From db9052ea6e06cef3b3bfdf2e01b405188c61dce1 Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Fri, 11 Feb 2022 15:00:00 +0530 Subject: [PATCH 79/81] bug: full view legend is now fixed --- frontend/src/container/GridGraphLayout/Graph/index.tsx | 2 +- frontend/src/container/GridGraphLayout/index.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/src/container/GridGraphLayout/Graph/index.tsx b/frontend/src/container/GridGraphLayout/Graph/index.tsx index 60a086befa..2c163c2b05 100644 --- a/frontend/src/container/GridGraphLayout/Graph/index.tsx +++ b/frontend/src/container/GridGraphLayout/Graph/index.tsx @@ -168,7 +168,7 @@ const GridCardGraph = ({ destroyOnClose > - + diff --git a/frontend/src/container/GridGraphLayout/index.tsx b/frontend/src/container/GridGraphLayout/index.tsx index c72885f9ef..cbbec4a2a7 100644 --- a/frontend/src/container/GridGraphLayout/index.tsx +++ b/frontend/src/container/GridGraphLayout/index.tsx @@ -61,7 +61,7 @@ const GridGraph = (): JSX.Element => { x: (index % 2) * 6, Component: (): JSX.Element => ( From 3db790c3c79f3ffc2c378f981a4b9897e9457f38 Mon Sep 17 00:00:00 2001 From: Palash gupta Date: Fri, 11 Feb 2022 15:52:04 +0530 Subject: [PATCH 80/81] bug: merge conflit is resolved --- frontend/src/container/GridGraphLayout/Graph/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/GridGraphLayout/Graph/index.tsx b/frontend/src/container/GridGraphLayout/Graph/index.tsx index 977b274ac0..0cf795b06d 100644 --- a/frontend/src/container/GridGraphLayout/Graph/index.tsx +++ b/frontend/src/container/GridGraphLayout/Graph/index.tsx @@ -149,7 +149,7 @@ const GridCardGraph = ({ destroyOnClose > - + From 5510c67dbf847a6f36ce231a7475ea26d6c63fcb Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Fri, 11 Feb 2022 16:22:51 +0530 Subject: [PATCH 81/81] release: v0.6.1 --- deploy/docker/clickhouse-setup/docker-compose.arm.yaml | 4 ++-- deploy/docker/clickhouse-setup/docker-compose.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml index 0a09efbca3..dba80353da 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.arm.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.arm.yaml @@ -23,7 +23,7 @@ services: - '--storage.path=/data' query-service: - image: signoz/query-service:0.6.0 + image: signoz/query-service:0.6.1 container_name: query-service command: ["-config=/root/config/prometheus.yml"] volumes: @@ -40,7 +40,7 @@ services: condition: service_healthy frontend: - image: signoz/frontend:0.6.0 + image: signoz/frontend:0.6.1 container_name: frontend depends_on: - query-service diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 0f5109b635..13eaa5790e 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -26,7 +26,7 @@ services: query-service: - image: signoz/query-service:0.6.0 + image: signoz/query-service:0.6.1 container_name: query-service command: ["-config=/root/config/prometheus.yml"] volumes: @@ -43,7 +43,7 @@ services: condition: service_healthy frontend: - image: signoz/frontend:0.6.0 + image: signoz/frontend:0.6.1 container_name: frontend depends_on: - query-service