feat: helm chart for clickhouse setup (#479)

* minor change for volume permission spec

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added basic files of clickhouse chart

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added a simple deployment yaml

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* added clickhouse support in signoz

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* clickhouse working

Signed-off-by: Yash Sharma <yashrsharma44@gmail.com>

* chore: helm charts wip

* chore: fixing path of otel-path in hotrod

* chore: wip running clickhouse in templates

* chore: clickhouse working in templates

* chore: clickhouse helm chart upgraded to latest query-service and frontend images

* chore: cleanup and upgrading signoz chart version

* chore: adding alertmanager and minor fixes

* chore: persistence enabled for query-service and clickhouse

* chore: scrape interval reduced to 30s

* chore: changed crd api version from v1beta1 to v1

* chore: removed druid parts in values.yaml

* chore: log container removed from clickhouse

* chore: removed *.tgz from gitignore to add charts

* chore: added dependency charts

* chore: added clickhouse-operator templates

Co-authored-by: Yash Sharma <yashrsharma44@gmail.com>
This commit is contained in:
Ankit Nayan 2021-12-11 11:08:25 +05:30 committed by GitHub
parent c79223742f
commit dc9ffcdd45
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 3100 additions and 550 deletions

1
.gitignore vendored
View File

@ -35,7 +35,6 @@ frontend/cypress.env.json
.idea
**/.vscode
*.tgz
**/build
**/storage
**/locust-scripts/__pycache__/

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: retention-config
data:
retention-spec.json: |
[{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}]

View File

@ -1,29 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: set-retention
annotations:
"helm.sh/hook": post-install,post-upgrade
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: set-retention
image: theithollow/hollowapp-blog:curl
volumeMounts:
- name: retention-config-volume
mountPath: /app/retention-spec.json
subPath: retention-spec.json
args:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans"
volumes:
- name: retention-config-volume
configMap:
name: retention-config
restartPolicy: Never
backoffLimit: 8

View File

@ -1,76 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: supervisor-config
data:
supervisor-spec.json: |
{
"type": "kafka",
"dataSchema": {
"dataSource": "flattened_spans",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "StartTimeUnixNano",
"format": "nano"
},
"dimensionsSpec": {
"dimensions": [
"TraceId",
"SpanId",
"ParentSpanId",
"Name",
"ServiceName",
"References",
"Tags",
"ExternalHttpMethod",
"ExternalHttpUrl",
"Component",
"DBSystem",
"DBName",
"DBOperation",
"PeerService",
{
"type": "string",
"name": "TagsKeys",
"multiValueHandling": "ARRAY"
},
{
"type": "string",
"name": "TagsValues",
"multiValueHandling": "ARRAY"
},
{ "name": "DurationNano", "type": "Long" },
{ "name": "Kind", "type": "int" },
{ "name": "StatusCode", "type": "int" }
]
}
}
},
"metricsSpec" : [
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": "NONE",
"rollup": false
}
},
"tuningConfig": {
"type": "kafka",
"reportParseExceptions": true
},
"ioConfig": {
"topic": "flattened_spans",
"replicas": 1,
"taskDuration": "PT20M",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "signoz-kafka:9092"
}
}
}

View File

@ -1,27 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: create-supervisor
annotations:
"helm.sh/hook": post-install,post-upgrade
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: create-supervisor
image: theithollow/hollowapp-blog:curl
volumeMounts:
- name: supervisor-config-volume
mountPath: /app/supervisor-spec.json
subPath: supervisor-spec.json
args:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor"
volumes:
- name: supervisor-config-volume
configMap:
name: supervisor-config
restartPolicy: Never
backoffLimit: 8

View File

@ -1,60 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
queued_retry:
num_workers: 4
queue_size: 100
retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
kafka/traces:
brokers:
- signoz-kafka:9092
topic: 'otlp_spans'
protocol_version: 2.0.0
kafka/metrics:
brokers:
- signoz-kafka:9092
topic: 'otlp_metrics'
protocol_version: 2.0.0
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [memory_limiter, batch, queued_retry]
exporters: [kafka/traces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [kafka/metrics]

View File

@ -2,20 +2,14 @@ dependencies:
- name: zookeeper
repository: https://charts.bitnami.com/bitnami
version: 6.0.0
- name: kafka
repository: https://charts.bitnami.com/bitnami
version: 12.0.0
- name: druid
repository: https://charts.helm.sh/incubator
version: 0.2.18
- name: flattener-processor
repository: file://./signoz-charts/flattener-processor
version: 0.3.6
- name: query-service
repository: file://./signoz-charts/query-service
version: 0.3.6
version: 0.5.1
- name: frontend
repository: file://./signoz-charts/frontend
version: 0.3.6
digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228
generated: "2021-08-23T12:06:37.231066+05:30"
version: 0.5.2
- name: alertmanager
repository: file://./signoz-charts/alertmanager
version: 0.5.0
digest: sha256:f1fac36fa869480261d1bf091b95e18147654e52b891ed743d7e3d985fa6ed18
generated: "2021-12-09T17:07:48.919918+05:30"

View File

@ -15,29 +15,23 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.3.2
version: 0.5.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.3.2
appVersion: 0.5.2
dependencies:
- name: zookeeper
repository: "https://charts.bitnami.com/bitnami"
version: 6.0.0
- name: kafka
repository: "https://charts.bitnami.com/bitnami"
version: 12.0.0
- name: druid
repository: "https://charts.helm.sh/incubator"
version: 0.2.18
- name: flattener-processor
repository: "file://./signoz-charts/flattener-processor"
version: 0.3.6
- name: query-service
repository: "file://./signoz-charts/query-service"
version: 0.3.6
version: 0.5.1
- name: frontend
repository: "file://./signoz-charts/frontend"
version: 0.3.6
version: 0.5.2
- name: alertmanager
repository: "file://./signoz-charts/alertmanager"
version: 0.5.0

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
apiVersion: v2
name: alertmanager
description: The Alertmanager handles alerts sent by client applications such as the Prometheus server.
type: application
version: 0.5.0
appVersion: 0.5.0

View File

@ -0,0 +1,2 @@
configmapReload:
enabled: true

View File

@ -2,20 +2,20 @@
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }})
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alertmanager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ .Values.service.port }}:80
{{- end }}

View File

@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "flattener-processor.name" -}}
{{- define "alertmanager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "flattener-processor.fullname" -}}
{{- define "alertmanager.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@ -27,16 +27,16 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flattener-processor.chart" -}}
{{- define "alertmanager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "flattener-processor.labels" -}}
helm.sh/chart: {{ include "flattener-processor.chart" . }}
{{ include "flattener-processor.selectorLabels" . }}
{{- define "alertmanager.labels" -}}
helm.sh/chart: {{ include "alertmanager.chart" . }}
{{ include "alertmanager.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
@ -46,17 +46,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
{{/*
Selector labels
*/}}
{{- define "flattener-processor.selectorLabels" -}}
app.kubernetes.io/name: {{ include "flattener-processor.name" . }}
{{- define "alertmanager.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alertmanager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "flattener-processor.serviceAccountName" -}}
{{- define "alertmanager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }}
{{ default (include "alertmanager.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{- if .Values.config }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
data:
alertmanager.yml: |
{{- toYaml .Values.config | default "{}" | nindent 4 }}
{{- range $key, $value := .Values.templates }}
{{ $key }}: |-
{{- $value | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "alertmanager.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@ -2,9 +2,9 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "flattener-processor.serviceAccountName" . }}
name: {{ include "alertmanager.serviceAccountName" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
{{- include "alertmanager.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}

View File

@ -0,0 +1,48 @@
apiVersion: v1
kind: Service
metadata:
name: alertmanager
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "alertmanager.fullname" . }}-headless
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
spec:
clusterIP: None
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
- port: 9094
targetPort: 9094
protocol: TCP
name: cluster-tcp
- port: 9094
targetPort: 9094
protocol: UDP
name: cluster-udp
{{- end }}
selector:
{{- include "alertmanager.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,152 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "alertmanager.fullname" . }}
labels:
{{- include "alertmanager.labels" . | nindent 4 }}
{{- if .Values.statefulSet.annotations }}
annotations:
{{ toYaml .Values.statefulSet.annotations | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "alertmanager.selectorLabels" . | nindent 6 }}
serviceName: {{ include "alertmanager.fullname" . }}-headless
template:
metadata:
labels:
{{- include "alertmanager.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 8 }}
{{- end }}
annotations:
{{- if not .Values.configmapReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "alertmanager.serviceAccountName" . }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- if and (.Values.configmapReload.enabled) (.Values.config) }}
- name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }}
image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}"
imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}"
args:
- --volume-dir=/etc/alertmanager
- --webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload
resources:
{{- toYaml .Values.configmapReload.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /etc/alertmanager
{{- end }}
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
{{- if .Values.command }}
command:
{{- toYaml .Values.command | nindent 12 }}
{{- end }}
args:
- --storage.path=/alertmanager
- --config.file=/etc/alertmanager/alertmanager.yml
{{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }}
- --cluster.advertise-address=$(POD_IP):9094
- --cluster.listen-address=0.0.0.0:9094
{{- end }}
{{- if gt .Values.replicaCount 1.0}}
{{- $fullName := include "alertmanager.fullname" . }}
{{- range $i := until (int .Values.replicaCount) }}
- --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:9094
{{- end }}
{{- end }}
{{- if .Values.additionalPeers }}
{{- range $item := .Values.additionalPeers }}
- --cluster.peer={{ $item }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
ports:
- name: http
containerPort: 9093
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
{{- if .Values.config }}
- name: config
mountPath: /etc/alertmanager
{{- end }}
- name: storage
mountPath: /alertmanager
{{- if .Values.config }}
volumes:
- name: config
configMap:
name: {{ include "alertmanager.fullname" . }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
{{- toYaml .Values.persistence.accessModes | nindent 10 }}
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass }}
{{- end }}
{{- end }}
{{- else }}
- name: storage
emptyDir: {}
{{- end -}}

View File

@ -0,0 +1,48 @@
should match snapshot of default values:
1: |
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 1.0.0
helm.sh/chart: alertmanager-1.0.0
name: RELEASE-NAME-alertmanager
spec:
ingressClassName: nginx-test
rules:
- host: alertmanager.domain.com
http:
paths:
- backend:
service:
name: RELEASE-NAME-alertmanager
port:
number: 9093
path: /
pathType: ImplementationSpecific
should match snapshot of default values with old kubernetes ingress:
1: |
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx-test
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 1.0.0
helm.sh/chart: alertmanager-1.0.0
name: RELEASE-NAME-alertmanager
spec:
rules:
- host: alertmanager.domain.com
http:
paths:
- backend:
serviceName: RELEASE-NAME-alertmanager
servicePort: 9093
path: /

View File

@ -0,0 +1,81 @@
suite: test ingress
templates:
- ingress.yaml
tests:
- it: should be empty if ingress is not enabled
asserts:
- hasDocuments:
count: 0
- it: should have apiVersion extensions/v1beta1 for k8s < 1.14
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 13
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: extensions/v1beta1
- it: should have apiVersion networking.k8s.io/v1beta1 for k8s < 1.19
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 18
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: networking.k8s.io/v1beta1
- it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19
set:
ingress.enabled: true
capabilities:
majorVersion: 1
minorVersion: 19
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- isAPIVersion:
of: networking.k8s.io/v1
- it: should have an ingressClassName for k8s >= 1.19
set:
ingress.enabled: true
ingress.className: nginx-test
capabilities:
majorVersion: 1
minorVersion: 19
asserts:
- hasDocuments:
count: 1
- equal:
path: spec.ingressClassName
value: nginx-test
- it: should match snapshot of default values
set:
ingress.enabled: true
ingress.className: nginx-test
chart:
version: 1.0.0
appVersion: 1.0.0
asserts:
- matchSnapshot: { }
- it: should match snapshot of default values with old kubernetes ingress
set:
ingress.enabled: true
ingress.className: nginx-test
capabilities:
majorVersion: 1
minorVersion: 17
chart:
version: 1.0.0
appVersion: 1.0.0
asserts:
- matchSnapshot: { }

View File

@ -0,0 +1,189 @@
# Default values for alertmanager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/alertmanager
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "0.5.0"
extraArgs: {}
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext:
fsGroup: 65534
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
securityContext:
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
runAsUser: 65534
runAsNonRoot: true
runAsGroup: 65534
additionalPeers: []
service:
annotations: {}
type: ClusterIP
port: 9093
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: alertmanager.domain.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - alertmanager.domain.com
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}
statefulSet:
annotations: {}
podAnnotations: {}
podLabels: {}
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
command: []
persistence:
enabled: true
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 100Mi
config:
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
templates:
- '/etc/alertmanager/*.tmpl'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: '{{ template "slack.title" . }}'
text: '{{ template "slack.text" . }}'
route:
receiver: 'slack-notifications'
## Monitors ConfigMap changes and POSTs to a URL
## Ref: https://github.com/jimmidyson/configmap-reload
##
configmapReload:
## If false, the configmap-reload container will not be deployed
##
enabled: false
## configmap-reload container name
##
name: configmap-reload
## configmap-reload container image
##
image:
repository: jimmidyson/configmap-reload
tag: v0.5.0
pullPolicy: IfNotPresent
## configmap-reload resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
templates:
title.tmpl: |-
{{ define "slack.title" }}
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
{{ end }}
text.tmpl: |-
{{ define "slack.text" }}
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}
{{ end }}

View File

@ -1,21 +0,0 @@
apiVersion: v2
name: flattener-processor
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6

View File

@ -1,65 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "flattener-processor.fullname" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "flattener-processor.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "flattener-processor.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/root/flattener"
ports:
- name: http
containerPort: 8080
protocol: TCP
env:
- name: KAFKA_BROKER
value: {{ .Values.configVars.KAFKA_BROKER }}
- name: KAFKA_INPUT_TOPIC
value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }}
- name: KAFKA_OUTPUT_TOPIC
value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }}
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "flattener-processor.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "flattener-processor.fullname" . }}
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "flattener-processor.selectorLabels" . | nindent 4 }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "flattener-processor.fullname" . }}-test-connection"
labels:
{{- include "flattener-processor.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -1,74 +0,0 @@
# Default values for flattener-processor.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signoz/flattener-processor
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configVars:
KAFKA_BROKER: signoz-kafka:9092
KAFKA_INPUT_TOPIC: otlp_spans
KAFKA_OUTPUT_TOPIC: flattened_spans
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
version: 0.5.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6
appVersion: 0.5.2

View File

@ -24,6 +24,9 @@ data:
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api/alertmanager{
proxy_pass http://{{ .Values.config.alertmanagerUrl }}/api/v2;
}
location /api {
proxy_pass http://{{ .Values.config.queryServiceUrl }}/api;
}

View File

@ -17,6 +17,7 @@ configVars: {}
config:
name: signoz-nginx-config
queryServiceUrl: signoz-query-service:8080
alertmanagerUrl: alertmanager:9093
serviceAccount:
# Specifies whether a service account should be created

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.6
version: 0.5.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.3.6
appVersion: 0.5.1

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
data:
prometheus.yml: |
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password

View File

@ -1,10 +1,11 @@
apiVersion: apps/v1
kind: Deployment
kind: StatefulSet
metadata:
name: {{ include "query-service.fullname" . }}
labels:
{{- include "query-service.labels" . | nindent 4 }}
spec:
serviceName: query-service
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
@ -14,19 +15,17 @@ spec:
labels:
{{- include "query-service.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "query-service.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["-config=/root/config/prometheus.yml"]
ports:
- name: http
containerPort: 8080
@ -38,7 +37,10 @@ spec:
value: {{ .Values.configVars.DruidDatasource }}
- name: STORAGE
value: {{ .Values.configVars.STORAGE }}
- name: ClickHouseUrl
value: {{ .Values.configVars.ClickHouseUrl}}
- name: GODEBUG
value: netdns=go
# livenessProbe:
# httpGet:
# path: /
@ -47,6 +49,13 @@ spec:
# httpGet:
# path: /
# port: http
volumeMounts:
- name: prometheus
mountPath: /root/config
- name: signoz-db
mountPath: /var/lib/signoz/
- name: dashboards
mountPath: /root/config/dashboards
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
@ -61,3 +70,18 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: prometheus
configMap:
name: prometheus-config
- name: dashboards
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: signoz-db
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -16,7 +16,8 @@ fullnameOverride: ""
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
STORAGE: clickhouse
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: initdb-config
data:
init-db.sql: |-
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@ -0,0 +1,104 @@
{{ if (eq (.Values.cloud | toString) "gcp" )}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gce-resizable
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
fstype: ext4
replication-type: none
reclaimPolicy: Retain
#volumeBindingMode: Immediate
allowVolumeExpansion: true
{{- else if (eq (.Values.cloud | toString) "aws") }}
#
# AWS resizable disk example
#
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gp2-resizable
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Retain
#volumeBindingMode: Immediate
allowVolumeExpansion: true
{{- end }}
---
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: signoz
spec:
defaults:
templates:
dataVolumeClaimTemplate: default-volume-claim
# logVolumeClaimTemplate: default-volume-claim
serviceTemplate: chi-service-template
configuration:
zookeeper:
nodes:
- host: signoz-zookeeper
port: 2181
session_timeout_ms: 6000
clusters:
- name: cluster
# Templates are specified for this cluster explicitly
templates:
dataVolumeClaimTemplate: default-volume-claim
# logVolumeClaimTemplate: default-volume-claim
podTemplate: pod-template-with-volume
layout:
shardsCount: 1
replicasCount: 1
templates:
hostTemplates:
- name: port-distribution
portDistribution:
- type: ClusterScopeIndex
spec:
tcpPort: 9000
httpPort: 8123
interserverHTTPPort: 9009
podTemplates:
- name: pod-template-with-volume
spec:
containers:
- name: clickhouse
image: yandex/clickhouse-server:21.7
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
- name: initdb
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: initdb
configMap:
name: initdb-config
serviceTemplates:
- name: chi-service-template
generateName: signoz-clickhouse
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
type: {{ .Values.clickhouseOperator.serviceType }}
volumeClaimTemplates:
- name: default-volume-claim
reclaimPolicy: Retain
spec:
{{- if (eq (.Values.cloud | toString) "gcp" )}}
storageClassName: gce-resizable
{{- else if (eq (.Values.cloud | toString) "aws") }}
storageClassName: gp2-resizable
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.clickhouseOperator.storage | quote }}

View File

@ -0,0 +1,157 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=#
# ROLE_KIND=ClusterRole
# ROLE_NAME=clickhouse-operator-posthog
# ROLE_BINDING_KIND=ClusterRoleBinding
# ROLE_BINDING_NAME=clickhouse-operator-posthog
#
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: clickhouse-operator-posthog
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- services
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- persistentvolumes
- pods
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- apps
resources:
- replicasets
verbs:
- delete
- get
- patch
- update
- apiGroups:
- apps
resourceNames:
- clickhouse-operator
resources:
- deployments
verbs:
- get
- patch
- update
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- patch
- update
- list
- watch
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations
verbs:
- delete
- get
- patch
- update
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations
- clickhouseinstallationtemplates
- clickhouseoperatorconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/finalizers
- clickhouseinstallationtemplates/finalizers
- clickhouseoperatorconfigurations/finalizers
verbs:
- update
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/status
- clickhouseinstallationtemplates/status
- clickhouseoperatorconfigurations/status
verbs:
- create
- delete
- get
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.clickhouseOperator.enabled }}
# Setup ClusterRoleBinding between ClusterRole and ServiceAccount.
# ClusterRoleBinding is namespace-less and must have unique name
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: clickhouse-operator-posthog
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: clickhouse-operator-posthog
subjects:
- kind: ServiceAccount
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,418 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAME=etc-clickhouse-operator-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
config.yaml: |
################################################
##
## Watch Namespaces Section
##
################################################
# List of namespaces where clickhouse-operator watches for events.
# Concurrently running operators should watch on different namespaces
#watchNamespaces: ["dev", "test"]
watchNamespaces: []
################################################
##
## Additional Configuration Files Section
##
################################################
# Path to folder where ClickHouse configuration files common for all instances within CHI are located.
chCommonConfigsPath: config.d
# Path to folder where ClickHouse configuration files unique for each instance (host) within CHI are located.
chHostConfigsPath: conf.d
# Path to folder where ClickHouse configuration files with users settings are located.
# Files are common for all instances within CHI
chUsersConfigsPath: users.d
# Path to folder where ClickHouseInstallation .yaml manifests are located.
# Manifests are applied in sorted alpha-numeric order
chiTemplatesPath: templates.d
################################################
##
## Cluster Create/Update/Delete Objects Section
##
################################################
# How many seconds to wait for created/updated StatefulSet to be Ready
statefulSetUpdateTimeout: 300
# How many seconds to wait between checks for created/updated StatefulSet status
statefulSetUpdatePollPeriod: 5
# What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
# Possible options:
# 1. abort - do nothing, just break the process and wait for admin
# 2. delete - delete newly created problematic StatefulSet
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
onStatefulSetCreateFailureAction: ignore
# What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
# Possible options:
# 1. abort - do nothing, just break the process and wait for admin
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
# Pod would be recreated by StatefulSet based on rollback-ed configuration
# 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
onStatefulSetUpdateFailureAction: rollback
################################################
##
## ClickHouse Settings Section
##
################################################
# Default values for ClickHouse user configuration
# 1. user/profile - string
# 2. user/quota - string
# 3. user/networks/ip - multiple strings
# 4. user/password - string
chConfigUserDefaultProfile: default
chConfigUserDefaultQuota: default
chConfigUserDefaultNetworksIP:
- "::1"
- "127.0.0.1"
chConfigUserDefaultPassword: "default"
# Default host_regexp to limit network connectivity from outside
chConfigNetworksHostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
################################################
##
## Access to ClickHouse instances
##
################################################
# ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances
# for:
# 1. Metrics requests
# 2. Schema maintenance
# 3. DROP DNS CACHE
# User with such credentials can be specified in additional ClickHouse .xml config files,
# located in `chUsersConfigsPath` folder
chUsername: "clickhouse_operator"
chPassword: "clickhouse_operator_password"
# Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
# Can be used instead of explicitly specified username and password
chCredentialsSecretNamespace: ""
chCredentialsSecretName: ""
# Port where to connect to ClickHouse instances to
chPort: 8123
################################################
##
## Log parameters
##
################################################
logtostderr: "true"
alsologtostderr: "false"
v: "1"
stderrthreshold: ""
vmodule: ""
log_backtrace_at: ""
################################################
##
## Runtime parameters
##
################################################
# Max number of concurrent reconciles in progress
reconcileThreadsNumber: 10
reconcileWaitExclude: true
reconcileWaitInclude: false
################################################
##
## Labels management parameters
##
################################################
# When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
# exclude labels from the following list:
#excludeFromPropagationLabels:
# - "labelA"
# - "labelB"
# Whether to append *Scope* labels to StatefulSet and Pod.
# Full list of available *scope* labels check in labeler.go
# LabelShardScopeIndex
# LabelReplicaScopeIndex
# LabelCHIScopeIndex
# LabelCHIScopeCycleSize
# LabelCHIScopeCycleIndex
# LabelCHIScopeCycleOffset
# LabelClusterScopeIndex
# LabelClusterScopeCycleSize
# LabelClusterScopeCycleIndex
# LabelClusterScopeCycleOffset
appendScopeLabels: "no"
################################################
##
## Pod management parameters
##
################################################
# Grace period for Pod termination.
# How many seconds to wait between sending
# SIGTERM and SIGKILL during Pod termination process.
# Increase this number is case of slow shutdown.
terminationGracePeriod: 30
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-confd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-confd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-configd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-configd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
<yandex>
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
</yandex>
01-clickhouse-02-logger.xml: |
<yandex>
<logger>
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
<level>debug</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
<console>1</console>
</logger>
</yandex>
01-clickhouse-03-query_log.xml: |
<yandex>
<query_log replace="1">
<database>system</database>
<table>query_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<query_thread_log remove="1"/>
</yandex>
01-clickhouse-04-part_log.xml: |
<yandex>
<part_log replace="1">
<database>system</database>
<table>part_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
</yandex>
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-templatesd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
001-templates.json.example: |
{
"apiVersion": "clickhouse.altinity.com/v1",
"kind": "ClickHouseInstallationTemplate",
"metadata": {
"name": "01-default-volumeclaimtemplate"
},
"spec": {
"templates": {
"volumeClaimTemplates": [
{
"name": "chi-default-volume-claim-template",
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "2Gi"
}
}
}
}
],
"podTemplates": [
{
"name": "chi-default-oneperhost-pod-template",
"distribution": "OnePerHost",
"spec": {
"containers" : [
{
"name": "clickhouse",
"image": "yandex/clickhouse-server:19.3.7",
"ports": [
{
"name": "http",
"containerPort": 8123
},
{
"name": "client",
"containerPort": 9000
},
{
"name": "interserver",
"containerPort": 9009
}
]
}
]
}
}
]
}
}
}
default-pod-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-oneperhost-pod-template"
spec:
templates:
podTemplates:
- name: default-oneperhost-pod-template
distribution: "OnePerHost"
default-storage-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-storage-template-2Gi"
spec:
templates:
volumeClaimTemplates:
- name: default-storage-template-2Gi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
readme: |
Templates in this folder are packaged with an operator and available via 'useTemplate'
---
# Template Parameters:
#
# NAME=etc-clickhouse-operator-usersd-files
# NAMESPACE=posthog
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: etc-clickhouse-operator-usersd-files
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
data:
01-clickhouse-user.xml: |
<yandex>
<users>
<clickhouse_operator>
<networks>
<ip>127.0.0.1</ip>
<ip>0.0.0.0/0</ip>
<ip>::/0</ip>
</networks>
<password_sha256_hex>716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448</password_sha256_hex>
<profile>clickhouse_operator</profile>
<quota>default</quota>
</clickhouse_operator>
</users>
<profiles>
<clickhouse_operator>
<log_queries>0</log_queries>
<skip_unavailable_shards>1</skip_unavailable_shards>
<http_connection_timeout>10</http_connection_timeout>
</clickhouse_operator>
</profiles>
</yandex>
02-clickhouse-default-profile.xml: |
<yandex>
<profiles>
<default>
<log_queries>1</log_queries>
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
<parallel_view_processing>1</parallel_view_processing>
</default>
</profiles>
</yandex>
03-database-ordinary.xml: |
<!-- Remove it for ClickHouse versions before 20.4 -->
<yandex>
<profiles>
<default>
<default_database_engine>Ordinary</default_database_engine>
</default>
</profiles>
</yandex>
{{- end }}

View File

@ -0,0 +1,129 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=
# OPERATOR_IMAGE=altinity/clickhouse-operator:latest
# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:latest
#
# Setup Deployment for clickhouse-operator
# Deployment would be created in kubectl-specified namespace
kind: Deployment
apiVersion: apps/v1
metadata:
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
spec:
replicas: 1
selector:
matchLabels:
app: clickhouse-operator
template:
metadata:
labels:
app: clickhouse-operator
annotations:
prometheus.io/port: '8888'
prometheus.io/scrape: 'true'
spec:
serviceAccountName: clickhouse-operator
volumes:
- name: etc-clickhouse-operator-folder
configMap:
name: etc-clickhouse-operator-files
- name: etc-clickhouse-operator-confd-folder
configMap:
name: etc-clickhouse-operator-confd-files
- name: etc-clickhouse-operator-configd-folder
configMap:
name: etc-clickhouse-operator-configd-files
- name: etc-clickhouse-operator-templatesd-folder
configMap:
name: etc-clickhouse-operator-templatesd-files
- name: etc-clickhouse-operator-usersd-folder
configMap:
name: etc-clickhouse-operator-usersd-files
containers:
- name: clickhouse-operator
image: altinity/clickhouse-operator:latest
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
env:
# Pod-specific
# spec.nodeName: ip-172-20-52-62.ec2.internal
- name: OPERATOR_POD_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
- name: OPERATOR_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# metadata.namespace: kube-system
- name: OPERATOR_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# status.podIP: 100.96.3.2
- name: OPERATOR_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# spec.serviceAccount: clickhouse-operator
# spec.serviceAccountName: clickhouse-operator
- name: OPERATOR_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Container-specific
- name: OPERATOR_CONTAINER_CPU_REQUEST
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: requests.cpu
- name: OPERATOR_CONTAINER_CPU_LIMIT
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: limits.cpu
- name: OPERATOR_CONTAINER_MEM_REQUEST
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: requests.memory
- name: OPERATOR_CONTAINER_MEM_LIMIT
valueFrom:
resourceFieldRef:
containerName: clickhouse-operator
resource: limits.memory
- name: metrics-exporter
image: altinity/metrics-exporter:latest
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# NAMESPACE=posthog
# COMMENT=
#
# Setup ClusterIP Service to provide monitoring metrics for Prometheus
# Service would be created in kubectl-specified namespace
# In order to get access outside of k8s it should be exposed as:
# kubectl --namespace prometheus port-forward service/prometheus 9090
# and point browser to localhost:9090
kind: Service
apiVersion: v1
metadata:
name: clickhouse-operator-metrics
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
labels:
app: clickhouse-operator
spec:
ports:
- port: 8888
name: clickhouse-operator-metrics
selector:
app: clickhouse-operator
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.clickhouseOperator.enabled }}
# Template Parameters:
#
# COMMENT=
# NAMESPACE=posthog
# NAME=clickhouse-operator
#
# Setup ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
name: clickhouse-operator
namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-metrics-conf
labels:
app: opentelemetry
component: otel-collector-metrics-conf
data:
otel-collector-metrics-config: |
receivers:
otlp:
protocols:
grpc:
http:
# Data sources: metrics
prometheus:
config:
scrape_configs:
- job_name: "otel-collector"
scrape_interval: 30s
static_configs:
- targets: ["otel-collector:8889"]
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhousemetricswrite:
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
service:
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp, prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector-metrics
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector-metrics
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1 #TODO - adjust this to your own requirements
template:
metadata:
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
containers:
- command:
- "/otelcontribcol"
- "--config=/conf/otel-collector-metrics-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: signoz/otelcontribcol:0.4.2
name: otel-collector
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 55679 # Default endpoint for ZPages.
- containerPort: 55680 # Default endpoint for OpenTelemetry receiver.
- containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver.
- containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver.
- containerPort: 14250 # Default endpoint for Jaeger GRPC receiver.
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.
volumeMounts:
- name: otel-collector-metrics-config-vol
mountPath: /conf
# - name: otel-collector-secrets
# mountPath: /secrets
livenessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
readinessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
volumes:
- configMap:
name: otel-collector-metrics-conf
items:
- key: otel-collector-metrics-config
path: otel-collector-metrics-config.yaml
name: otel-collector-metrics-config-vol
# - secret:
# name: otel-collector-secrets
# items:
# - key: cert.pem
# path: cert.pem
# - key: key.pem
# path: key.pem

View File

@ -0,0 +1,31 @@
apiVersion: v1
kind: Service
metadata:
name: otel-collector-metrics
labels:
app: opentelemetry
component: otel-collector-metrics
spec:
ports:
- name: otlp # Default endpoint for OpenTelemetry receiver.
port: 55680
protocol: TCP
targetPort: 55680
- name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver.
port: 55681
protocol: TCP
targetPort: 55681
- name: otlp-grpc # Default endpoint for OpenTelemetry receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver
port: 14250
- name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver.
port: 14268
- name: zipkin # Default endpoint for Zipkin receiver.
port: 9411
- name: metrics # Default endpoint for querying metrics.
port: 8888
selector:
component: otel-collector-metrics

View File

@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
otlp/spanmetrics:
protocols:
grpc:
endpoint: "localhost:12345"
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
clickhousemetricswrite:
endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: "0.0.0.0:8889"
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhouse]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]

View File

@ -21,11 +21,11 @@ spec:
spec:
containers:
- command:
- "/otelcol"
- "/otelcontribcol"
- "--config=/conf/otel-collector-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
image: otel/opentelemetry-collector:0.18.0
image: signoz/otelcontribcol:0.4.2
name: otel-collector
resources:
limits:
@ -43,6 +43,7 @@ spec:
- containerPort: 14268 # Default endpoint for Jaeger HTTP receiver.
- containerPort: 9411 # Default endpoint for Zipkin receiver.
- containerPort: 8888 # Default endpoint for querying metrics.
- containerPort: 8889 # Default endpoint for prometheus exported metrics.
volumeMounts:
- name: otel-collector-config-vol
mountPath: /conf

View File

@ -27,5 +27,7 @@ spec:
port: 9411
- name: metrics # Default endpoint for querying metrics.
port: 8888
- name: prometheus-metrics # Default endpoint for querying prometheus metrics.
port: 8889
selector:
component: otel-collector

View File

@ -1,51 +1,15 @@
zookeeper:
autopurge:
purgeInterval: 1
kafka:
zookeeper:
enabled: false
externalZookeeper:
servers: ["signoz-zookeeper:2181"]
zookeeperConnectionTimeoutMs: 6000
druid:
image:
tag: 0.21.1-rc2
configVars:
# To store data on local disks attached
druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]'
druid_storage_type: local
# # To store data in S3
# druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]'
# druid_storage_type: s3
# druid_storage_bucket: signoz-druid
# druid_storage_baseKey: baseKey
# AWS_ACCESS_KEY_ID: <your secret id>
# AWS_SECRET_ACCESS_KEY: <your secret key>
# AWS_REGION: <your region>
historical:
persistence:
size: "20Gi"
zkHosts: "signoz-zookeeper:2181"
zookeeper:
enabled: false
flattener-processor:
configVars:
KAFKA_BROKER: signoz-kafka:9092
KAFKA_INPUT_TOPIC: otlp_spans
KAFKA_OUTPUT_TOPIC: flattened_spans
autopurge:
purgeInterval: 1
query-service:
configVars:
DruidClientUrl: http://signoz-druid-router:8888
DruidDatasource: flattened_spans
STORAGE: druid
configVars:
ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password
STORAGE: clickhouse
cloud: aws
clickhouseOperator:
enabled: true
storage: 20Gi
serviceType: ClusterIP

View File

@ -16,14 +16,14 @@ spec:
service: hotrod
spec:
containers:
- args:
- all
env:
- name: JAEGER_ENDPOINT
value: http://otel-collector.platform.svc.cluster.local:14268/api/traces
image: jaegertracing/example-hotrod:latest
imagePullPolicy: IfNotPresent
name: hotrod
ports:
- containerPort: 8080
- args:
- all
env:
- name: JAEGER_ENDPOINT
value: http://otel-collector.platform.svc.cluster.local:14268/api/traces
image: jaegertracing/example-hotrod:latest
imagePullPolicy: IfNotPresent
name: hotrod
ports:
- containerPort: 8080
restartPolicy: Always