From dc9ffcdd45ba0e406d30eca9f0dcc8d4728a9186 Mon Sep 17 00:00:00 2001 From: Ankit Nayan Date: Sat, 11 Dec 2021 11:08:25 +0530 Subject: [PATCH] feat: helm chart for clickhouse setup (#479) * minor change for volume permission spec Signed-off-by: Yash Sharma * added basic files of clickhouse chart Signed-off-by: Yash Sharma * added a simple deployment yaml Signed-off-by: Yash Sharma * added clickhouse support in signoz Signed-off-by: Yash Sharma * clickhouse working Signed-off-by: Yash Sharma * chore: helm charts wip * chore: fixing path of otel-path in hotrod * chore: wip running clickhouse in templates * chore: clickhouse working in templates * chore: clickhouse helm chart upgraded to latest query-service and frontend images * chore: cleanup and upgrading signoz chart version * chore: adding alertmanager and minor fixes * chore: persistence enabled for query-service and clickhouse * chore: scrape interval reduced to 30s * chore: changed crd api version from v1beta1 to v1 * chore: removed druid parts in values.yaml * chore: log container removed from clickhouse * chore: removed *.tgz from gitignore to add charts * chore: added dependency charts * chore: added clickhouse-operator templates Co-authored-by: Yash Sharma --- .gitignore | 1 - .../jobs/retention/retention-config.yaml | 7 - .../kubernetes/jobs/retention/retention.yaml | 29 - .../jobs/supervisor/supervisor-config.yaml | 76 - .../jobs/supervisor/supervisor.yaml | 27 - deploy/kubernetes/otel-collector/config.yaml | 60 - deploy/kubernetes/platform/Chart.lock | 20 +- deploy/kubernetes/platform/Chart.yaml | 20 +- .../platform/charts/alertmanager-0.5.0.tgz | Bin 0 -> 5529 bytes .../platform/charts/frontend-0.5.2.tgz | Bin 0 -> 3847 bytes .../platform/charts/query-service-0.5.1.tgz | Bin 0 -> 4122 bytes .../platform/charts/zookeeper-6.0.0.tgz | Bin 0 -> 22052 bytes .../crds/clickhouse-operator-install.yaml | 1223 +++++++++++++++++ .../.helmignore | 2 + .../signoz-charts/alertmanager/Chart.yaml | 7 + .../alertmanager/ci/config-reload-values.yaml | 2 + .../templates/NOTES.txt | 14 +- .../templates/_helpers.tpl | 20 +- .../alertmanager/templates/configmap.yaml | 15 + .../alertmanager/templates/ingress.yaml | 61 + .../alertmanager/templates/pdb.yaml | 13 + .../templates/serviceaccount.yaml | 4 +- .../alertmanager/templates/services.yaml | 48 + .../alertmanager/templates/statefulset.yaml | 152 ++ .../__snapshot__/ingress_test.yaml.snap | 48 + .../alertmanager/unittests/ingress_test.yaml | 81 ++ .../signoz-charts/alertmanager/values.yaml | 189 +++ .../flattener-processor/Chart.yaml | 21 - .../templates/deployment.yaml | 65 - .../templates/ingress.yaml | 41 - .../templates/service.yaml | 15 - .../templates/tests/test-connection.yaml | 15 - .../flattener-processor/values.yaml | 74 - .../signoz-charts/frontend/Chart.yaml | 4 +- .../frontend/templates/config.yaml | 3 + .../signoz-charts/frontend/values.yaml | 1 + .../signoz-charts/query-service/Chart.yaml | 4 +- .../templates/prometheus-configmap.yaml | 33 + .../{deployment.yaml => statefulset.yaml} | 42 +- .../signoz-charts/query-service/values.yaml | 3 +- .../platform/templates/clickhouse-config.yaml | 33 + .../templates/clickhouse-instance.yaml | 104 ++ .../clickhouse-operator/clusterrole.yaml | 157 +++ .../clusterrolebinding.yaml | 18 + .../clickhouse-operator/configmap.yaml | 418 ++++++ .../clickhouse-operator/deployment.yaml | 129 ++ .../clickhouse-operator/service.yaml | 26 + .../clickhouse-operator/serviceaccount.yaml | 15 + .../otel-collector-metrics-config.yaml | 53 + .../otel-collector-metrics-deployment.yaml | 72 + .../otel-collector-metrics-service.yaml | 31 + .../otel-collector/otel-collector-config.yaml | 67 + .../otel-collector-deployment.yaml} | 5 +- .../otel-collector-service.yaml} | 2 + deploy/kubernetes/platform/values.yaml | 60 +- sample-apps/hotrod/deployment.yaml | 20 +- 56 files changed, 3100 insertions(+), 550 deletions(-) delete mode 100644 deploy/kubernetes/jobs/retention/retention-config.yaml delete mode 100644 deploy/kubernetes/jobs/retention/retention.yaml delete mode 100644 deploy/kubernetes/jobs/supervisor/supervisor-config.yaml delete mode 100644 deploy/kubernetes/jobs/supervisor/supervisor.yaml delete mode 100644 deploy/kubernetes/otel-collector/config.yaml create mode 100644 deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz create mode 100644 deploy/kubernetes/platform/charts/frontend-0.5.2.tgz create mode 100644 deploy/kubernetes/platform/charts/query-service-0.5.1.tgz create mode 100644 deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz create mode 100644 deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml rename deploy/kubernetes/platform/signoz-charts/{flattener-processor => alertmanager}/.helmignore (96%) create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml rename deploy/kubernetes/platform/signoz-charts/{flattener-processor => alertmanager}/templates/NOTES.txt (66%) rename deploy/kubernetes/platform/signoz-charts/{flattener-processor => alertmanager}/templates/_helpers.tpl (71%) create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml rename deploy/kubernetes/platform/signoz-charts/{flattener-processor => alertmanager}/templates/serviceaccount.yaml (63%) create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/Chart.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/deployment.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/ingress.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/service.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/tests/test-connection.yaml delete mode 100644 deploy/kubernetes/platform/signoz-charts/flattener-processor/values.yaml create mode 100644 deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml rename deploy/kubernetes/platform/signoz-charts/query-service/templates/{deployment.yaml => statefulset.yaml} (64%) create mode 100644 deploy/kubernetes/platform/templates/clickhouse-config.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-instance.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml create mode 100644 deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml create mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml create mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml create mode 100644 deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml create mode 100644 deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml rename deploy/kubernetes/{otel-collector/deployment.yaml => platform/templates/otel-collector/otel-collector-deployment.yaml} (93%) rename deploy/kubernetes/{otel-collector/service.yaml => platform/templates/otel-collector/otel-collector-service.yaml} (90%) diff --git a/.gitignore b/.gitignore index 710242e1df..4e720c6728 100644 --- a/.gitignore +++ b/.gitignore @@ -35,7 +35,6 @@ frontend/cypress.env.json .idea **/.vscode -*.tgz **/build **/storage **/locust-scripts/__pycache__/ diff --git a/deploy/kubernetes/jobs/retention/retention-config.yaml b/deploy/kubernetes/jobs/retention/retention-config.yaml deleted file mode 100644 index b369e2f212..0000000000 --- a/deploy/kubernetes/jobs/retention/retention-config.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: retention-config -data: - retention-spec.json: | - [{"period":"P3D","includeFuture":true,"tieredReplicants":{"_default_tier":1},"type":"loadByPeriod"},{"type":"dropForever"}] \ No newline at end of file diff --git a/deploy/kubernetes/jobs/retention/retention.yaml b/deploy/kubernetes/jobs/retention/retention.yaml deleted file mode 100644 index 5cee0d8347..0000000000 --- a/deploy/kubernetes/jobs/retention/retention.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: set-retention - annotations: - "helm.sh/hook": post-install,post-upgrade -spec: - ttlSecondsAfterFinished: 100 - template: - spec: - containers: - - name: set-retention - image: theithollow/hollowapp-blog:curl - volumeMounts: - - name: retention-config-volume - mountPath: /app/retention-spec.json - subPath: retention-spec.json - args: - - /bin/sh - - -c - - "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://signoz-druid-router:8888/druid/coordinator/v1/rules/flattened_spans" - - volumes: - - name: retention-config-volume - configMap: - name: retention-config - - restartPolicy: Never - backoffLimit: 8 \ No newline at end of file diff --git a/deploy/kubernetes/jobs/supervisor/supervisor-config.yaml b/deploy/kubernetes/jobs/supervisor/supervisor-config.yaml deleted file mode 100644 index 53bc5dd143..0000000000 --- a/deploy/kubernetes/jobs/supervisor/supervisor-config.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: supervisor-config -data: - supervisor-spec.json: | - { - "type": "kafka", - "dataSchema": { - "dataSource": "flattened_spans", - "parser": { - "type": "string", - "parseSpec": { - "format": "json", - "timestampSpec": { - "column": "StartTimeUnixNano", - "format": "nano" - }, - "dimensionsSpec": { - "dimensions": [ - "TraceId", - "SpanId", - "ParentSpanId", - "Name", - "ServiceName", - "References", - "Tags", - "ExternalHttpMethod", - "ExternalHttpUrl", - "Component", - "DBSystem", - "DBName", - "DBOperation", - "PeerService", - { - "type": "string", - "name": "TagsKeys", - "multiValueHandling": "ARRAY" - }, - { - "type": "string", - "name": "TagsValues", - "multiValueHandling": "ARRAY" - }, - { "name": "DurationNano", "type": "Long" }, - { "name": "Kind", "type": "int" }, - { "name": "StatusCode", "type": "int" } - ] - } - } - }, - "metricsSpec" : [ - { "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" } - ], - "granularitySpec": { - "type": "uniform", - "segmentGranularity": "DAY", - "queryGranularity": "NONE", - "rollup": false - } - }, - "tuningConfig": { - "type": "kafka", - "reportParseExceptions": true - }, - "ioConfig": { - "topic": "flattened_spans", - "replicas": 1, - "taskDuration": "PT20M", - "completionTimeout": "PT30M", - "consumerProperties": { - "bootstrap.servers": "signoz-kafka:9092" - } - } - } - diff --git a/deploy/kubernetes/jobs/supervisor/supervisor.yaml b/deploy/kubernetes/jobs/supervisor/supervisor.yaml deleted file mode 100644 index f01ace6e7a..0000000000 --- a/deploy/kubernetes/jobs/supervisor/supervisor.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: create-supervisor - annotations: - "helm.sh/hook": post-install,post-upgrade -spec: - ttlSecondsAfterFinished: 100 - template: - spec: - containers: - - name: create-supervisor - image: theithollow/hollowapp-blog:curl - volumeMounts: - - name: supervisor-config-volume - mountPath: /app/supervisor-spec.json - subPath: supervisor-spec.json - args: - - /bin/sh - - -c - - "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://signoz-druid-router:8888/druid/indexer/v1/supervisor" - volumes: - - name: supervisor-config-volume - configMap: - name: supervisor-config - restartPolicy: Never - backoffLimit: 8 \ No newline at end of file diff --git a/deploy/kubernetes/otel-collector/config.yaml b/deploy/kubernetes/otel-collector/config.yaml deleted file mode 100644 index 8e4f4d8a01..0000000000 --- a/deploy/kubernetes/otel-collector/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: otel-collector-conf - labels: - app: opentelemetry - component: otel-collector-conf -data: - otel-collector-config: | - receivers: - otlp: - protocols: - grpc: - http: - jaeger: - protocols: - grpc: - thrift_http: - processors: - batch: - send_batch_size: 1000 - timeout: 10s - memory_limiter: - # Same as --mem-ballast-size-mib CLI argument - ballast_size_mib: 683 - # 80% of maximum memory up to 2G - limit_mib: 1500 - # 25% of limit up to 2G - spike_limit_mib: 512 - check_interval: 5s - queued_retry: - num_workers: 4 - queue_size: 100 - retry_on_failure: true - extensions: - health_check: {} - zpages: {} - exporters: - kafka/traces: - brokers: - - signoz-kafka:9092 - topic: 'otlp_spans' - protocol_version: 2.0.0 - - kafka/metrics: - brokers: - - signoz-kafka:9092 - topic: 'otlp_metrics' - protocol_version: 2.0.0 - service: - extensions: [health_check, zpages] - pipelines: - traces: - receivers: [jaeger, otlp] - processors: [memory_limiter, batch, queued_retry] - exporters: [kafka/traces] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [kafka/metrics] \ No newline at end of file diff --git a/deploy/kubernetes/platform/Chart.lock b/deploy/kubernetes/platform/Chart.lock index 09a4a30ea0..5db85c3154 100644 --- a/deploy/kubernetes/platform/Chart.lock +++ b/deploy/kubernetes/platform/Chart.lock @@ -2,20 +2,14 @@ dependencies: - name: zookeeper repository: https://charts.bitnami.com/bitnami version: 6.0.0 -- name: kafka - repository: https://charts.bitnami.com/bitnami - version: 12.0.0 -- name: druid - repository: https://charts.helm.sh/incubator - version: 0.2.18 -- name: flattener-processor - repository: file://./signoz-charts/flattener-processor - version: 0.3.6 - name: query-service repository: file://./signoz-charts/query-service - version: 0.3.6 + version: 0.5.1 - name: frontend repository: file://./signoz-charts/frontend - version: 0.3.6 -digest: sha256:b160e903c630a90644683c512eb8ba018e18d2c08051e255edd3749cb9cc7228 -generated: "2021-08-23T12:06:37.231066+05:30" + version: 0.5.2 +- name: alertmanager + repository: file://./signoz-charts/alertmanager + version: 0.5.0 +digest: sha256:f1fac36fa869480261d1bf091b95e18147654e52b891ed743d7e3d985fa6ed18 +generated: "2021-12-09T17:07:48.919918+05:30" diff --git a/deploy/kubernetes/platform/Chart.yaml b/deploy/kubernetes/platform/Chart.yaml index 29bee67bbd..b04e0a872d 100644 --- a/deploy/kubernetes/platform/Chart.yaml +++ b/deploy/kubernetes/platform/Chart.yaml @@ -15,29 +15,23 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.2 +version: 0.5.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.3.2 +appVersion: 0.5.2 dependencies: - name: zookeeper repository: "https://charts.bitnami.com/bitnami" version: 6.0.0 - - name: kafka - repository: "https://charts.bitnami.com/bitnami" - version: 12.0.0 - - name: druid - repository: "https://charts.helm.sh/incubator" - version: 0.2.18 - - name: flattener-processor - repository: "file://./signoz-charts/flattener-processor" - version: 0.3.6 - name: query-service repository: "file://./signoz-charts/query-service" - version: 0.3.6 + version: 0.5.1 - name: frontend repository: "file://./signoz-charts/frontend" - version: 0.3.6 \ No newline at end of file + version: 0.5.2 + - name: alertmanager + repository: "file://./signoz-charts/alertmanager" + version: 0.5.0 \ No newline at end of file diff --git a/deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz b/deploy/kubernetes/platform/charts/alertmanager-0.5.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9ffd5d40ed35a3892a99e95e9efff89285717253 GIT binary patch literal 5529 zcmV;K6=v!miwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH>OZW}q0^Shs-PWB?U&uU5j%53AjfY+HF7@H3v&SbGzEHYGg zN#eMh?2v5P8e4~ZmV2~&k^{;9{*dg%gHf`kdmM>hcL^?x)P9URs4|7bKke%Ak|Xj||Krz8m#*u{q_7Zn|J zoh|q^@JYmX%k>k*a%+PZH1jbxa#l_MCNoS1zI;fZhfH(=mF%PM~^x$Ip zmaDNqZG0SnEqJ#;A!vZc)_}nZip=18&Lu*S&X?MseZA*FVfS1IbY~BG#=`U*2XB-h zXJRHjxLu`Gj*~FFL|>qi9{lUy#&%`HdeG}R(Fa^<0>__iUSD({hyv=4V?o(ioRx6 z8eSWBO)~Il8=T?7_+L@!^pZFnP||l+JR}HMHGEq-82VA4cwh zd*~jS?-Ggd=UDoc$&o7;zUzmHR4DoZj|gQh-jFB`G4B>F-GE8g!n-n=PU(&Lk;j&M z1wylZv4AU?J_D;$9`p`+wQd+NIdo)ms$1Zbm`rF$6)ngO1dof6&R)MZp9PZO9SfK5 zIahz9A^ zGV@}rRg??m!JmhJJ~YJAskQ)cO{^qMxzM5o(n`M%j0ZU8Lb>oJkxGktA`z~snrDLO z>M>5=rv2=?r(ZnBp=>5l8jc~;HYo65NBl>ejfatob}LeE?#j<|%*$(hocs_WrMDKMw-%o>=~JFq()>Bswny(q z+GHdWg=_aA5=st~X_F&S>X|X${;xeykucy4kS(a-Omj(EAquHdsb1%#?B`DD8_K9& zR##g#EvEs`MKzmA2`piDw#wQw9)|pyvYEjiQl`6*;1?~&YL1Z}fT53460zJj^A{L# zGkXL-5#bUhf*9UR6$*G!u=<6YB8_OVPJWztFd7b{^m~Mn*(*l}-@l>h6h9{@8xVDX z!-F@}akLg)Vu-%t!tQa3huUge1<|@src=tOTH2qbA_}KTc!{db8fYtiR(lrn;5C_G znD*F$_c--nu2d|&!Jw*90r%xV5qTvCB4PcK`#QPxi%EG)5TPaJ!Kf|gS0=N7bfVx%8&k%L+HI!ap?7XZ9u)=K3vbKKZk%y>tY2< zMk4A%Eck+I*CmxegDqhx1+b%qEY-qY;36A{C1JP^j5h`|0uMBui$t}rwUOuBcc#(# zxY|k7eeF(L!shAOXaVuH@W0^!=2r1NlHebLDtyO$&00x+MI+N%AnhvauCtI&Na%rq z(pnKN@ROnu@(ZwjH-Du(Z$w7)hQKg0b z#qDiAW9V77n4fy!!fKTfVmy)d&UmA;zJ?z>QRt;hV_DP14`*u&-f*UUdI{EfdqZNw zjaj-SjNe^;u#PMI^!~L5sF(;lRWrX|XcW<4DRX-ek+?4~s~me!j;I zvdL8)45!det^P(*jV^j{*H2~j_BJ)NHF;jG(hmQ+y#@DD5tT@|1I2%eV-&Dj-QH@4 z6hG%b_NG)&Hq)SHf_;g*yn5SupM5tXBWlL%ZiZia4|TnrqbA9BWo9cW-xtl&SPI>mIpMz@P9`shLozjE8b#tmCzPD~h8Ih|^yo3@m z3oW!J!Zu|A-t5D6s5^sq3hpHr3WG7BLRM%<`A?@Q5Uf`FkTa;(t)F5vqr+--+Pl5Y z9(~k@B#pXPBdLZddsHT3*Xih9CPdq<&5~n`TN&PRB$b5HsC(v#w`Uf(7xZSMyeb7=n`4#2vQ;2OXehnC64$bcq@$MCbG3s1`=9@X zJ&iqG8E?mOV(Qc2gLYSd~e&2*7W8$MR z-S06(BGGQX=BeHdIQ$|x(?p*3Ze4xsay3@@ui?MlEVQ-iW$`a*wp-?hNr2F+glV#f zs`buyYxG@=Yt(7n@{4Gs3L`_FG;WhifkRnHLM&w1t$Zl`2;+d02FSpS-n z;IAYk%tx_~{Q#g%lm{0IBC-TUt`G=J@mino6O~?MXt6G%go|-{Gm!@d+_I@;%gY{O;AKw`XsD zsPQc3!F|O(gdT}ww|bP(fTx}OEO@!!1f@(W&9l?`keXGUS{9)q0a3)@F7dPXkxHsE zwvG;7x!UTPy zO{L@!!ryZ%ZYMpZ7zPrEKpXzT^pc%Q*qJO1LteeT)P1$1pk2CM*SVO2u(s49$u~8d zd|>a&m5n^wi_gH`iD}|fYS;6(*vm7vl-%=rE<)a?v%|gku6O9{%eI+=cY6P&Ezy~? z!(yp!k2$TCrw=AD7L=(e^!_CKf0Df#HaqS^gstmyMF>l)s}(0rUDGRTRyY+G+slT+ zHmtJg+UuNCndSC#n$rSzATSQ~4&M9E9`ru-zC^Q3V9^pPHxOz&Sxj>bBUjD`hIp^r z9Ze_aS`VUb;nU{|=@hi#L;SB_og z8j=B+p8AxVT`d`J$ilVaZw4X1>H2r*e->wblvMBy`hPTRIsY9$>;IFq4UzoZzt>9q zHudp0t9cG47yg)wO8CDL9xCnl+C{FDktc4#4&w@TBt{Dq=RAt_0j&3z(~&#+zCYBy zZz*fm)rYn$o%=_s(vQ2lg%(nqzq~Ui^6$88j9bcRgWj6F&|V4U_9s0{eTx2KY~AzU zaEC#s(8iiS-)%VtI9#!J9ZNnWs>TnZOWK{LZ${={7km--I z+SfA0ZdVtUjqRINA<~brDV0+&)-1p2S{x4e4H={F6(YPrWxh^Z?BFTdQYK<4 z(%T%fPH;|Y`vqQ_wCq%qP&)f&>C~lE=@L}wKGC+mp|yOZ>&z}G=Spj!;ydJLf!d_< zFCqzX;~Xl)Wl76x;a&Ho#k_6$c=ZOf3Y|$$mR7z-TDBh`E=4Qy z)iW|alyC347Q(g(@vo2tUgcpx2s4p_)UWZnWvNmQ7xi~D1ZLCgn1qAW~{jw z<0Oqz*WwIq?7M>$=Aa%P{MKv#UAO+HJ4zbS{Yq1?QU6Ev_}|NugVD48KSjHDxV<$V zcvdIZTPK6?3hRUAD^ie*0 z&0BEAeI9!7;e70r&zPl7DN}az@q7nAw^LT+C^v33^UUGWkvxA-zK!(k$LhOD`GK(N zL4BFKWZ2*D_t)+sUw)_5pCbuEG_T)2lHcfO*`^-6v01P&7kl@cgIY_Q|d~~s#{ng^h-(ozGd0+@*mY(n3niS+N}j$ zC2rkV>dL)tHSZ%Y1yFuLEhEC8qkkonC{1~)aCOIISi-jZA!VEUS0Zst)Es)X6e;b8 z!5zpsIXFI%N!030HJNf&7vTb?$(plPd2QoQCbMm~lC|<>#juUSX07DZM{d1UXrAuB zEELc}e_s5n7_F!W_N5dzMk$xXsk+gZ75%=?Ser-7s_T@*JFBD8)VGVOKDta0^bi*Z12 z4Up;L7Nt6}@)B#iE24$FS08CyD%!Dm%%a3c4HpqJS-r08L%+W;#ftaBB`t3Bf zRx50?s9NCqTD9K3x9ekCM$05loUYeKE5h!ShS=2IK|M>hEfeJHqYM>#?8)9G!2*S% z68q_k;iua>cByy0Qiz}l7@7iWIn>Y?mSGd7BYEUDudLHGp|m69-Fy{B<@C-CkgYQ{IPOnp z$K&;x+j$k=Hj0WoZbQ!^t$nk39&&0sRfJ-*<=qx*fgqO@?x(uiy5eTL?4csqJ2ay#@0I^6k7U zHDWtv;%Dz@RN3>;uubbc>j&7PTxRuXZK*vE35`C~Yb@4-4$#WA;smQ(qkv?m43$YL zL6s$HSYEeQ=`2@IEm}%o#p5XtC+V+vFIJUHWHtbH@UwXda}!`?4{56xT3*9if?>0J z*OWRbZHPs^qE3rj+|&zefelmr$F2+SShxhv45TIrVicyFw(GdBEkzuVP^S%^u8W1N~3-RW8_|Kr|u#RT7)p7t+~t z<#p`rxf62bn9{$_p34QDIeY)@Y{5q&C{H9@y!t^pZY=mO=qtyi0g{33F8D8wyO2H) zaNsxzqe|(WWbk{nXchhUX&Ti|>%R6M=MTbc(*NO6UH@Mm44>=&JxOaRmRzlR{;dgo bZqMzxJ-6rfo3#HA00960`FVn`0CE5TPx=O| literal 0 HcmV?d00001 diff --git a/deploy/kubernetes/platform/charts/frontend-0.5.2.tgz b/deploy/kubernetes/platform/charts/frontend-0.5.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7ae432d3e86d6aa695e0ddb2b335419d7b4ba1f8 GIT binary patch literal 3847 zcmV+i5BTsOiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH+#bK5wQ`OII@Cub{~>*n^vH zK@bG{dwcdX2!iHkFbJMM=2dMtj2zvo&6Cn)DM zLXVRKLnxHX(~A^CAZf~IL<}l#UZ--`V)(ahYx|!QmZAEWIDjkce|Inpn)W}~-@Uj0 z+h{xR8Yd)U8k_=Ht1gtn_jcgpj4GfC2>g8T?rnD>WJ0vY7$%gV0eFoOBNAawBqd{p z3bcSRLQ0gv7%0~Q79x|NF-aNGsC>_pXkEdf$hZ!n?|C#KQw%)-iK$Rj3%LkE(J2?d z_ttI%Ak7#X2}Ywu2uG7+p+^!Ga-Aa>WsIF*BvGpne)-k9q4#r?lExT9r{hgB#x^>= zNbm`rekM|duv``24L8n@IX=34O8G_Cz4QQn%TO+IhxbXcHJI)1D)%CM5xfXI0K!n} zgm5xNX@RR9;opM4?Rs85tg)kvZp~ZB-xzN`6+7%*kY(Df1oA?a3C9IW*8UJyHRvaKEZKDjbOrdXKk0!u{ z289|3k8@^>7HOH`GawETRWL=4(y$DfqI_x*7i+Xum|`6~FBS0#Mwz5~acFenLa*7I zsGp?B(hvqg5ExmugGD4IW6G$esGNiWVky$%xeEtx-`Z!1B!17?;)4+S@06ihD2+*I zr{DlGexQy8HvrAp}OX$LU7cWSX zGW3~<2(#}2Qle+h!T>ZYo_5SoWzFx{+D#2XS2L9hxg@sZ@O4{hc@mY#WQ3&>{u3cn z8DVSz5+ji$Ch0J$rUfW#Dhr5nhqfMVbx;dnNahIII5c7$S|qrNsK^{!(M%Y1HvZ3O zVWv>}&ifIj1W<0YSfMhk^2mKkt;~9Z;DCHirQiv2t>B#M88E6f%XOk)=2`$$sN;9j@N&Y>-#Cqud;Ke(dPw}@5)pbXB zT*P>S3?nV1bGlkE;|iRD|MIKnk;#N|su%Y4UB}5kYg^}k>*(A&e*f{!iLWp8P0_e2 z{|ol}jr?zKICy@a|J_FG`|uMwr&8IT!KV*zVZ1QDjpt!mAs7!tc&tKix$FX+fdAQT z8j84+2Q65wEQBO{itx}ldk9}`K-(Z-U;#>K--Z^~JJUK{E)AiF&9Ryp7`0r22>`2A z*y}Bqj+Dg?IdCfNJ!mTvzv!+-^6Z*4=+sAhU53I z-<*!#fB5+7;bY^;Bh6rn8oFJRt*Del2!@Fd$V}kykJr7c6?6rBQG%Phqk5Rj0_Zl)*%K#IhKnv(C?pRH}cICoe?_thH_;4+&En-fnZPP96FmSJ_Y}gBhJ-9YlQRSY z6Fi56@=WI&7^BWrlyEf4cO_&_C^K5>=c$ZsKZ^_^!r`3gXy&q8J5Q7AR?OhhH!riA zN63-g2E zi8V0aTA8z_&Z=eo%bA3R*T%Bi$!@j8b(VY$Qyjg2eR_QG?oC5psfe$u?hHC4P5pYB zYzDq+m1QZ%XPd()SK7$yRg;_%(^kt$P?MNwVsTew+4)Qr)rCm=gXezW2Y%mfEb~p5 z+h14ww@K&=L0oJ8Mv7){e@h-+ zD*qb>&HQh;*T2vIZlx`ky{9mzNoWsiCX}IZuCEd!mS~0{JndNz-Fxc2xk&9sx^WI< zx52)b!*$K}oAOOx*)3n&-MQ~BoUB_c{Co9e4!(kx8INFp*FMtZB%4g=1$4S4GUJoX zvt#hk9W#+N4HNMe@LNV0oluO8hqSEw-apY{xA5A~Fw;;l#)ue)szieDkF{5K6P{3H zu>vB|?T#@&L8A&Dj~7-Lua8d*tVtG3O83O~jwT?T$8|{70nS~bxPvea=aey%pJdAL zsZ36jF_v??UAARRFGNyF%bSOtYnZJ7*OIx3)Ww&FmjCe8^_R86=UNmFOFdMsd9C$+ zGlQw5Tu-3$7uEfX>NK!9y5FK`+n#F*SXo(PF!@!NSyj8bxvDrQM#ji4TVV^<**c%9 zxZQJpe+OSdV#-W>@BFm`ozu?uSd>T-!5x$85+)sh!!kpb_-fX(jEC*1Xg(?5Wq&OI zDnA-}nTi-C6q-SHuSNJUx8XJ4UO)Tz&KJJbeVJ&;g&;6lQ zY{kLW)q-ozv)jsqyM?$S5p5Y)B%8z?38OPiFQyt9DZwb2j zMsK-XJC(E=b%@RHf0ZgHRSvGPnM!eC zvR5E{QF}2ba?zYqB$D)$;T#$4*ItaX$poc>{yw}otG$_Not_p+O5g9lEJRT(fXEMp zd+Rj;kV0qx=w(Xw)Qm{%+1tk6OegH+8p<6dxewpGDj!QAEf=R|m6-ikCStIg2WL^{(Tg8kW5?evCzV&R>wyp2NV591-#)%d` zlZ3UCtL%D<#Hn5})ru)+XtNHhjRe44uq?p}SKVBH8l`kai;BcEtlrLDBlFA$YeG57 z7JB6wz;SKNr4!#Wd9`)s{XreGD{6#Jg>&2Gmqm`ST7|6#6suLIiD^_Re%wG=Rz}u& zu$Rj9ITb=NNlWcTMTR3=mv%|M-58ikp+zKE2pCK0qPm7DfPaoda5Zj{EmZkqpr1CF8QfJVq;XC6lwuoYjKAM)8VI!hvA@op3c?M)a z{kQ_U1B}j*qf#R&#u(OKxmt#wP&d0>DP0J?&3?N8jU{qbiS%DgrK|e$`?~s_*$a0U&>y2F z{rX98bqHQ)mQj!kv$)6#BW=wE?|)2h-}$p;k<8hLqngtLy`i8N(|$uQ63r zyc-&}d~ky`d%#nk{Mh>UnrGiEUo2GOJ#D+!+8()nzVz*jDj7j7Z^^2mTb`n5PVlv8CouOWd;e&}M+Ni+(l zi*x*AoTI!$7qYg0-^i6txsZ6%G_H*QyTgtD|FL@?|8J%3z=&v#(*08ixBhTGLk{DN zvY7HIq$D~cQ&j8!2BNam=H8%Y$QVqS7`rQ1%BRmjA|sm4(Vlu#`U#J{9pE^11t*VF zi4%H(u?yM%@x+JsoGn0bJJPTODN6g}cHi?~pPZg(A<^4`yeRqe;R(c4D$k!%-Lrok z`<_4kUH0tXqGLAgnLou>HRrt&(U?SMS!#d#puDHPI#0c){+OJ3Pko)3XCdj-d-}iL z4tyq(icG=L>o>~tQz^b+q&=U;NO}%jiZ7l&SCNRZ_s7l*?%RF4Z-4goe*pjh|Nm#e J=8^zV000v}kd*)c literal 0 HcmV?d00001 diff --git a/deploy/kubernetes/platform/charts/query-service-0.5.1.tgz b/deploy/kubernetes/platform/charts/query-service-0.5.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..cf5f65d2729337192ff56d37f08b30a1b999005f GIT binary patch literal 4122 zcmV+#5asV5iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH;Na@#nP^P5l6M`zcQ+=!C=6)&rNTVC6jaZO_D;v`d3sVNsk zwj_)}fCGTCHHz=quK4D2Jc2Axe%KmB&efA=Ko})3X#qAZf~IM9dW4v_AW{X3zimR@eV2 zVHv82HUMk%e|LXpN&k2D51#b@G1?ZK;)rBSgWG$n)it3Aye)V)p$e!10>2!;dEFTa znGmfph7n~J;1nZ9B*K(PN`?#-XaPfnlqiKUP_6~cL?%IFk}{%E1)e9->ivnxxDKK3 zc{Cwo3_SpesZdl4ISWD2F&BSy>xKg$%@`X9Mx$8>=c5au2ND%>onskfj9p z{%Wnyi>80mIf{*C<1Ch$maVj$!pD>DCzn3h8iPmY@?K+Y=vFYtd zk!h6S;o;$7G2)bHqC_Sm3}M6!r#QyfDkWTb04Iz_H$RF@VGGX4bWTiX__toK_xDVp zG-MtvI<7^El4v3SYPfqVWZc$yU37W?SMM(09=|#ZVFBj>3@)$U{doE6`gm}D{nObm zA^1ObCi?V*;LDS@yKg`2zZ}2+BN6Q2{P-uG?4Q3mnGU1mdUVTQcWw=@^36DoBD;eg zz!qGk7|{_$1-BE_6O=$8hXdjOK}|%)Vj~`rM557<=$=9t5oU?of@98wwobtc6NzK< zM^CVc&jTO}*bwITcKo{ug5ey9QQIhkEK?X**z*x^p+TVr!sDD6Bk)>g_yZ7!h$U+zT|B7RtWu{l%bj_jY(*i-~cjytS$sM0L|C$70M9$ zIVqM=W(MbDD{afA5U?P;ZGaD9|AEAp%I(SEtlwrU`M40UdNQs^}GXS7j z@w8=zDr+MyY;00P(9ulgJYGz;vrC-S4&j^{y2xBvl7>Oi79$QJ%0+iL1 z8N|8$Ek|1s)B+fiDS|epml)F<39ce4GDlW45k{Jgxi&&*>_p(~G+{~r}04j?nX8JdjQ?n_7(UqCVAQo^-cx@$> zVHtO;)lNeMW8#+bvBl0PHy|YbEmO8XF%xi%P$?p3KiKt4OoUxMLX=51gP|mrHzSP_ zwim444sxUkUF=bmW>%V${C9$h^(FnC!#6Zv;_n%%O-FcK#CU}aBQ2!eT~7;UEP)g7 zkH2~z8I35XdS<)7wOik~)#Ja?J9jTG-<@3r`c6N<9P8r0es8}K|LyPhcb?+E$7uZk zUZK;{N*V_5-@b<7tW59p1Qmi&X~JU_dh>Y)=m>%jE?+1NdF}<^{@y}J!p8{DjP-=@ zXbIX{RRasKfDWv!c5h8&G@qM^o;Am6W?Qc;WtjK8Ij|-GO<{ zN2bu=j|A6*as~c{h;bmK_Dgb$FxdD)hY#z0@AC$M=Ys}$2D$nwBB{p2(x)`hnF>f8o6SJ`F8^`wTLPkqC<5DOIT_)z zl3^=&|5}jvAA}iqB|pc105Ik$gmVoEnSmxZ2n0rW3kl_!&a)UJ(^ZskKFG5YG9{E5 zQ4R6{!M0yS1`*+KOLR1GA){TV2_P%Fu>Hk*t)>xlZa3b+?ZpmWoxS~Ve$uju7Ud>$ zZ#pS-I)(8Ee?Ir;^IRdnr#!|xDE6FH7+Po}^qyON3#`&P_44mqBw?|f&=B0%V7ua# zidhDs8))X>^7Q)R_{~{^X{m@e6?_IhNz{U(aw{~Jr?W^0W`0sjxEJdjTo&M=k;M)4%!EPh|JJ@@w|36Nf&$};RN|VqQ zKSq?Hv8G3fVM{c@5MFexN$$Sz&hAp1nKtSwHm?l49Ij*XS;|)!$Ib%A=DdMBaI$u= zunvFdG4O`<2?8T5>sYeyZ`S#_}Bi|=$1$l!5yV) z9ich_jrTOv#7YD9_Qh6ojbv{J^6_Iz#J}wV3P2X}nR-QAO@Mmer zC-^-R8XFnoGM0pJGRC+woQ1VGsyG2{Fw++$A5cI@!w;#HI=;5yBqDf$UXc91%+~)nnnYY zl2__~wS2##=?ar6$`g^KCb;td_c}e})hdTzfeFZRI~}~Dx-i+!EG|A~;izwg zyrFy?+#EWS-yfnT{kn2jhv1z?ZXQ(~)>;1>%Ht5u9fQ0gGY^ZyK4T&lT(V}v+4nFv zAe~iwW2G1S1f231xrV((<lesXb;}<^NMD64VpSRL6Z1laTalgMl^vzyI=}asIc{??2Ul9;em5|Lo^Z?!@l+ z8h59k&ef>k-ZUvzxU8{)kGYukFl6j{It$+l=MT4s6?n*q7TS zLkp>?F|#N(8Cvm+QdM zs{enh)@uLr@>P41v(a(#17trgFDbr3Y&kDp9Q)c2CyI{&(<1&|b-wJB&{;zlESr$N z_*75|Eh51}cy}_$-?^pdKt(xj?ZTFel#K;`>#etKjsEAQygz3K*6M%1`Tf_+mj_Sv zpGRpMS6M%2D01*GvKMV--v_cBEp?N|x*o86^1rsSyIEVQ|C(qVWlZ6pzYn-Z|Mz=4 z{igo!_Fg{e|6{bOT9Y(Y-Gy%F5^#lIrr7_qK%iC%YhDM+?-Hq+=dWHcaHSfNx#9A8j8hmB0{!RLJXPF9?CHTfzWz{Bn3#F(rCouyCZla0~wIC?&XAW16pAy++beLCPx-B?j zq9#K@WV{g(-B$3$Ev0oPZE^Xl-Iy&I>gt801z7ExlGiq0yJ2pa<)zt*yG@G`czLBDJ`URl>JxqTL+|1 z?NQsR{7)MhvHu!r=NK6s{K`ylUH#`^XX*TJXXh#Zdz98%))|PnCFS!s&G|YFvGE_I zrn6L{@r$oYCqmr#A=p_}W-VixG&b|1!dl;sQRfygPZEZinhnKWnNQifjv;*9HlW*u zMa`r6?M->{nTt*qr$i%vy2vUnaEkIV>qvF|4-89<(~ zv^hcLJ>jvp1suoj6!!a6;)vd1?4rPbdmg|g&yQkLM-rqc?f(`Kc){t_^_3P9y)DSU zLjQ1b1u>P%3&vD;?Z1wEFBtwIyY}DW#bn$y{}eyfly?`1h9tVlQhWDBc`pKWn|d#T zA-VBh1UfO#LejDK;=jEu_&_8TnS%4vGvx)T6u)7ly@19@x(-~5-@IU|A`xTvU$Gwi Y)SlW?`~TDaCjbEd|5CT1=>Spy0Q|l5s{jB1 literal 0 HcmV?d00001 diff --git a/deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz b/deploy/kubernetes/platform/charts/zookeeper-6.0.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..453520ca311c92047cd1de95bb0cbd3098233cee GIT binary patch literal 22052 zcmV)-K!?8{iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ(dfc{>FnE6VRn$tEvE-xfmgJ9k`kl0-p3( zKNHwIu%yRDjDK%ILSmNmw;BMc;6oPb;vthtux_R_$Wamu1W(xSwgO2)uV`QJEOqTl zY}l*a03c}^vB1#XY7BTxJ1|fxm7R9G&(t96wgMivyG&_J+JT-MvaJNA?U+bKMgA$D zKn)gX(j6509l;POsgiA}NN_8Q8f_N$i(gIr+}zpO{13fF(hc8Szb1nuIh77?oSO?580p4w*Fd`vw(cMcwdzCIn3skRf3a=|*{p3CMQL zn+w|4(td=i0=_xVqUe$af+|f=W8K2(G}Zyibvh7_+~21prbP)4hN%b8^a}q-N9Jo@ zODZl%pYBIQN)7hX``yswS)mO7aFfsyWTmDqB51EzQ?yt%A{eD}%_7uRq%Yf=M1-W5 zbdeKgt1^ynQ!bgprG9)V15l)ISN3K|yNo2wo$aQ0^`_APSYLfR8{>nP?G5t_;!G;&Qiw^CA&fv!qSY0aAbab@SEeMOtqNVT zo-jq^t!(F0rJ3@unXmJ0O^G3{nF=-}iZ)y`o%P^wbbB1D3MV;W+R@# z9nYc=B6dsl7fC75k4g|=HR>=5QU|_@V^X< z>19!*M+p>-Q_Y{Sfl|)tTsVw^qCCXm+-$7l|7R9fr_!1RIA2SRb#U*>z<$gb1NNg# zE1z({H9~rt2j-1=(E7g94Xzmq7|6gSKQTn(tO{DT_L;YSju5ry}X!akAI zFsLmZP@z~)n~xNygW(5Ik!+MP&}u>xDkfL+mX5S$V08qf$LSNHSvOC3NIS6m=8b(V zbD}4}8E-_1Oe%Q~tr$SIow44WgF@`HbD{g^G}kh_B!IA^&0nD zTBX(?cWH1NRWQR|IA$@^N|~PJS=^;U3yhS9(g{;RP1`i|JtRiu*F6#Py96YQvq+JI z@=Qh}BS-brNfZ$y2$>WaiYS~&R|5*?2j5#;A&Ev>^yCt!=UUvyiNz%J@srR^Ca^P4 z7X~~DXsTrUj*HuflTfxbN$oNsjg*H6`J3;vu&?I>KukWqPjd6313TDJ=UPWV(#s=0 z(jOPYrQo(=jBy-O*M_x7gg;z40-d?oSI~%j1U8Bs7NCIT0j^ zX!JnyK(8_dBF)s8A7-Ra^7n2H&5_aAoaW5`56*uymRG_GeZ;dR)MO5}~@3nCYolaC^nHWZT`|-bS!R@l2YfB+@e!r(#5t zz8c`f)XnNwMR7flT7hB-{FZT%#V{a4)Xf8)Bs4(Hhd@M0NCjGh2~#-xJFsKge#h^i zM+ERp=|+t7vVb8ViJqr8wS6#hlmr&ctH}~Y&jM?>TXc-#h*AV3OZpLoAY!^eOhynB zV^`$gV(LJZ$)<^k! zEf4)pYcFws6C#E=x*PB_-Vit)$lqX#ZYWPoG7scBICEFF|A4z3zZo_t&8ID zr&JFSPf%SxI!7ijpU;xLC^|ZqzJX$3>0Su52r2a(jzgA^ObzrbH3C{&y$BN(n!SW4 zqnKy%nD@0IU{*f8tfiSS%IW$#Tu`l)nembdNzHE~d}A&Z2pZ^7(ck2ND3|DC zWy%xu>IZ7wPA zxCl237lEsEX|+Uzc4&I4a82o!TH3HjS#`O0dE7o=eJWM+Lx;3$6YwKCQ>eom>W$)x0I}(SZOBo zx=)kE@zo$-8l)lrd)NM5M}X^GX>}F$yPkk>@;w69y!iwdGC2!D?yDk7fWc*qF+?Ze62@F6w4nF=0efnHG~jsvBoBu?ds_m4G39n&zPc|epd*oTk_8YnJC`Ea4M z)xBeS&OkTC$_vk(V9E%#if~3>-hk>Ao*eW30TUny+PkP@yH5v9w2Z>eavPaINJG2a zAbJ${am_@~I*-|R9ndJ1K!%)!dQDBK)t$Rk-BF52pwCa2)r)~%SX!Qyjka}xUI%LY z+V->VLBHhMn(06@bU8ct!)_qHnLX8?L{-YCXA*TO}OQrdC;!=rN}ZX&Pz(Rq1}XaX+n#{CAjKBp}R zBA$iT@x+47Mg8+`OTN9X5o&4#x(?xdJu0K6vDa2k2X=Sf>@)y+TyrNC=jP>`H*fU9 z6B%X6ynJC^rdc;)a&XEOS|xj7D5#X$SV8p&Kt3QH4DEuKlXnbpd zdw-9CFV*%M@cUz7)W_>pP`3q^M7^$dsR2@PM}xM`!iY-C0M+lfl3DgAL4X>vo!y1Z z%_OR4D>_Jv_jYzPZG&xZ4)2*)9e?|cp>0o5mJ^4qEeLXedp3o}l50fxv!tP2S+}pV zxGJC=rDkWR2MJdwje@1BglJ(}W6ZshCx4`u2yO`RMs`rm^kka~{H_ zb^y^#@wI&Lg zK&u!hJfv2tG*LPHNvmdONuVoAR8Zgiz-ncZr`lkBzw~mwc%S{aNKbm(I@R7pm5GPc zZA+rBgooCAiD{x5=&G15Q$3|3;h~nDRs%q(sb5{<8!mEre#Ijy>@MbVjpqK{lCsv; zf?PF85;j%d1fo&=lpXl_7h{tk)eE)8hgu-hJ9I0vq~FxqX_I$*l68O@2R(%q=Io2-hrMgp7o%OuEK^%c__E4g%e6cS+=|_lMYA{t}tYJ z33hEYQ$cB*no)BvOCs9zX0OK*joQ8Pjsb!yfo9tFI$|UBpBQl&q?!J?9aoSvp)1Z4 zp$9W!!(t~yICEaFgk37{Q^9i$HSN#OL`Ij! z4cUPg+uHV_g7Gkax1--#$7TNVl~$7oi!wp427<}~kHQYT*2`3eOPTj}$HZcHsdbL$ zj>z3cLwFvL1@L_J$Y^T0M3t6l`L@+U6W1B9&BQ=qNxyCXY&w8u^)WB)-qU22{nRfv zS!1s_RSS*AR*ZQ_BXj_!X*6oVPXn4bbr7p;nVnbM=|#sz?$Hly+?(}H<5FVQecVv8 zgjmJKd6Kj_3l}tGQtJR_OT)!p0Z%Lf#vUl-`=oshl8nI2Y6D=SiXntl>knotTDQH~ zDpWP}R2_a)R3s$wHvB#NLGW02cqZZSqpnWHn6IS~QzckX7u{o!nhDNKbwn>WwA@?c zllYIFmpd+c(i)a!G(4xOb=u6)v^!g&Mrldy8u#}s0L)U614|16oF3{0gpIpa>tNd+ zc5V3JD`!x!#TQVCun^zf$X-ymTjItAgzD&PSZmu# zYj&VKLe5gH$?@DNcES^;e@+`iTgDJIwl#t+lzoSu@$KMWxFvNX^tOtO-`RvA<{&ox zPR$*Tokng3xP3OFtjU=VN-la#SBLenUtU;dzy;;9UG!p;+YV}XBi?nVWDam&Q^W{>1ie^_5=-{XoDmn4Idv}gFwE#_&8TaP+B|0 z+B>sQo#V_=4H@g+yDCfiSUa02kFP80bs*NNu=_b>3KZ`yvS@-BOG{-In*tU1_c#SA zkpIL(V>pe+)NS2?$RcXPJ@HSpI4!IukcestnG5O5Q4-Mbt@WkLywP!KY@!1prc#nV z?HF%i9_%3zPVSU1mID(tf^jv#65rK^QNtKL6pxxNm|ANRsACLB0=srgFq78TL5T|> zk-V@h3z!(a14Pi|nZme^EWy~Jq_2B1r9$gPM?GK)LQbVFrEQ65NP#IyBeTgD5UsY1 z;Fe)1mQ{8_2^&12+FIoa-E0+|LO-UW12OryVS3>@?}blx1^8~QAhGEdmoJ;}pRiki z>2cA4S8?&eaAT?WB^^?Mc3&WvVgZRJ5A9i|&{M~f{)M%XmL6VSz$%QovLjrZ!k<3D zHwNpL{`uw0cr4$x9aG)OEHT@PK(qmXX9$u&^MK4~p`o7h5cCRbgWd_;4R}Py`rkwG z+MzhT|KY!kp|M*3f9<1N_wslrhph8XwolKl4li5kqq?UyW}N?i_4@Uz%K5*S-|lWd zo&S4`=hLSqu%5G3WUf6sty$Xg?3P9&Eo!)=VND#S zHkhb2K%)^#FH~5Qkuh$HpgnEB#xxj^gvq#N2zZMMqTSsRlt`Y)Ex5yoW1jS*5y-6H zr&5{K4?|Jzn9^D>hqc_kTLW&+jjXxYfVCFvNgxp-rr3~f$y%c&_S)#izUy!U;`RP+}SZr;4 zgL4g9@a0QuLaAIuwZ-HxXn8hl>2w$N#8oN87tyJP1%t?{w75uDi!w!{9L~=Uu21() z4!`?m<2LKkKt<4}%4j^zW{%n<3M;rnGlDgermeCIO_%y^3ItPmYrG;$^r{|U<(5_S zTez=KTRh-#$`hKX?{fHS(B$x|FL?Q z;q}pZWk`*Byv&$R=s>B6%4CbUh#LLQ{&3ytBXoXtaaBQSb!4;zL$40Dp4e{svR35% zm}_bpY~Ap}H?{{m-{hE^LBT`HrI9JGFjGcHD^wjgGSTje+-kkINADnJNv3FFGWh`E z=-lpm<)>3x)^!E&6H(3&f&+o#sRPga4wl1UsZPF|uiKuL*zWT1;=|E?4d*S1#at*r zvso1S^l9zWClGjCbb}=!{Roar{dlq#OZu8iVS5w4eEIaLrT?+(%-R>ny9GBCgmp0d zYyvBI3-Q!h>bgdETC>N+9TDL&{Ue7#d%ix>SR?B2?`6_LC`+^_?9IhtTn|y{~7-Eo6mAU zc3-{jtZi;00#?b(53ym+fRHL0C>p|sw?tM^fG=M$&~bSdJ73dXdW_$5(bf(LJi4e~{`jwVAUIPL|v8h>R;kLUiQkO#8mX9Fq@K(`0{ zuBmuCOF}BphHctbn!1&ySF$ud_o)BZ*?-pq8l_aomP+p>70h(|Z|BwPS5^D()$5(7 z{4bC4eEQUW4nr1qaKo_2BC6%pcd_o4U_d+Yyp6he`+4K=W9m)=`N4vye7735T+>Dx zjjMUh;ycEfwM@b})acx7^^+fHks-1qfY&eZPZnQhy&n4rYs~_fo)AqVvD(iY`z9EX z(6RJ ziM-6j0&{h`UjE#VZ;*h_ck_>{hiBdw21;4Y&v7liT^PYsutfD>?cZec-{e{anrU|t zPMh?1i37c%)l426NwFj=s2gB$8kxiCD4AKWiGv*RJ_}ln zy-0F&umwbe(Xu{2cg%XZ{Ex`*BNyI^*(oKu4@qJJ$BH5IIM*g8%ekrMW2>B?%ocUU zi>clIH`&42J*{?iaB<+8Uh^gHp4>>8sb)j<*}Xh}R)D_fM6xO1j&K&`U1H9#nhoKZ z)5=Sev-VXP+PJ3gfuj-71M&0}e)c+2mmE)*r|TNqe2(gn=YY|MT*Vjc+uvMNWR#ad zDE~5+!l|b0oH*B3oQZ82#+>NP?t|(ieCm%$IvM68b0Hp6WP&MSBtg9+8(Xi@ItqVF zV?d+0B?l%(WKwyn(P4$jo^L~I#r0iN-kt<$c1PtJF3fO(Kt-oRA;<08`IFc@M))U%aX8T4^f|xwBaJ?I{6JlbA_Wc z{8Kq|2D>!!fP6DI$1PgtxM{&>NLUhT1>xlcCDYGwUn;VMccTQ$b8NuoBt-0-o`98q zsuD|;GlZ;!r4M@ssve?lh;0J24t#Dl{PjukMdOHkUKPOPen{z2&`*)*=4{T#rg`%!Ia2x|6aU5{L10& z^;A!~`?s|$u)x&OJPmXaG_gz%OoLq7Bbe16^RA_<4%Pw-Xw8V(A2Ovuk&bgpwgMM5 zY?3eJ>BP(&Q$gy@AjmT`%+7vbiz_1fRGGXK1t3`@%&PCH(#FFov+L7?#Iim;i$8Vi zzs>q$ZdAAj4Pe&#|LyCl{rBzm>nHo~QJ$)|!_P?MYXN52FC{E0X$D0~q}J^locP0? zdhK9((B6}c_@}cG-I|v2@UoP{!dosDUs7J1cIgNcrqB^09pFzBy7aqQcsmJVj z2RyIs05=zSrp6}&9zXrPoE@e91X11kpYpJ{JgbXWWv!$I%<})fdQmz5v%9_f^2z>x zl*dI$6qm4dz5|vN(ysKa*kA3dO0}J;ufV8}?cq+BD&qVG=i|4q{fS)3|0=$NKZ`~o z?_Hf(XcRFb5Muy~z07g`^%~i&#HD`i$vwwYEC2IEVHD;O4s@M{*A z#p;%|6RgyVm6OGjJqjfv&*VjU;YnP*(?h)$(esc3!=BlK+qKe3~cs#*8Q+TY7#;UzZ5vE6IGpBl_gKyT^6lNvi&-o+5DiAC#f|@-y$7@#@ab7e^*EtoHE4Mt& zMrZ<@5>~m)s0o)s!QmoHn)B+w%PU>A>%St3_OeK>8290x>FfWC*DuHJ|9JiC>Hd$$ zdHf}tq^a~)Xv+nce<~l|0t@7m>Gxpk7K+5v1jtWBu1>3J9;mh|7_ z*ACv#Up^J?jclpxIT4CsYL!|@`QmF!mHTTdpb~R^Wi7)ohr7kO5HtXb3LAi>gvT2{ zLUaKEQX??(JJ_Le)_?k3tk0d6!P@wgX?jojqIPef?n7!+fX~Laf~}JrHF%e9 z9rqQp(q-g+Ja2u4`u-WGbv!ZX1P&i}VlLzFtfSao2eofH(KoPN%cTNE9CA%v0C$<*$XJC5NYtK^^}Tl#XRsWKX`AR8%ffnJGsc?EEU+@x`)+-SlFA%=-sSj6?ETpImH=w_uut1dMyK z6n0>{F%hxxVG{NlVp(o0e_b0&H4Th+{$sZcvz90W>O?OgVlzCl8%k=3y|KxSJ&o@68UBzJmKAm&E&f=g6BBV>?peiwF zP&wH74~G}mM+dq|idRjZ zpB=z}ONG}UR9kp=c6o&t6yCTZ(3Cf0#aIu1{u#b;rQy5(4I8cJo6U`%x10a*%U?IY zfnR?4U!Vpw8LJ9#c5(3CH=n-WyL@+jad@(Kwg2wt-Cw@c6tp*;mFW2Hn~jaLi-W)J zY;KO#pzezE%IjdjVeKtpkx6OTrwYtSCltw9tHpyU*ctoKW8)Znqye;x!D-0> zv_+P*O2b{@+oh!3(lU)E!+KGsyZK=6YVZ2^?1$@vql*f0r~*wAFtyXO4Ep@+;_CYJ z{mJ)-vmuubiOvE0>S+JR>#L)a!Flm8T9gS%FQ^v&#%DJt$6Ov>ULKvD;`BLt zKfA%0e7vNnSh+2ic~~CBca#rjQhnCZWDiW~DDng_^~aSf#NcLhu@{Y? zYV*OD9q?n2^1^Se%WVRvT8~^kq<7Zids#S(HPFiCRy8fJIbU(u0au-o*sE@8??7Yy+-|a;`)qGUxzFAm?wwy>9{ppsdO4sZy=1@7rCw?^ z`}pv~;cTrU;{9Vfq%+lsz2oDvpRV^#&rbjQ4&Ps1pB!FY z9PLj)6?<*6u`>o`A^XRnox5Sqrx-%>^+cZKE|OMUUz<*uYF8a!I%#dXXKu@FyqS5- zxT3iq5C8k}>g?k1`t8wjt=`g+R9w*Ol&Hb7jZa&m7C^w>j}-ucs9lv~LgNLnve_MQ zu^z24-*07-(T_@y!({k@h_R)-#u-^EKG`2RVXdkpjk6tCNde~%NXVp~iPv2s>9rLz zX^m0tI@}bgbWLe(og+nEj##aTT4|}Z#(|3D!Z>r(C;($AYcwykv z+$_tfcjegW*_hO;U-PhX1;2<5X+otu7ra}ugRY-ak@6&O zvZUQ5a`0>mo;3sguh}vY8NWrBk9G9)s#62 zpP?^k3M}Tg&wi=tqxy@99)oC}(nQJ#r0sub{+2PSpc#m$2ThwZu%NOt*Q}mR@c1-m z2~&(j2Q(t1OB(PbEJt{j$*Cz_1(;I7csLtWsazF^?bq2*$_1-nWEKQe%Bz8(a=@c- z3ZQDyDySY|Q6}g-psGdZ*l!!#>?0wYArs1F{yajVLfgDjVZLgAT(K}8{(m01P@!if z!BFj+B_%@z%nG8R0%;}LP=U3CaQL4RU{s4var0FjD(34H(s`=J$;X@2+V<|GN@baK zP1U;3^P!A}&IfPM<~5E=br!Oa42yW&F|!vZ_F&-d5-YRX;t~I~-BlA|S{FZB`^|0u z>_>#fb)-~gL0(sxe#Lraq5}OYkC-j?IM-kUZz-RNMQd}6UVC)JUuUOA$WFwc#;F<| zFySTWzV9wACg*&;TbcC_)58;yn%vo%_KlfczBxlEO`&wbv8b8VV<%503jjISS&})2 z9TSCZOe|II-~~ow6E+czVKMSo?xbp`QLApwoXpL!b4E}%HO4MSg1L^>`_0$fm*l|I z9jc!u)BH0s00cFK6CPHDvB9ktm!e7;7ZYh+=RU0Aydn^=WF!2RQCZa*>}9XzQv&{F zuoLzsC!B*Q-%?$BfU7bvP$VQDJUSHp35h1DZfLV^PSGKBOeLC4DMejdi#h zFbOhCarpo_piu<yf@_K}8g$J7A+ zkIfdGCD90Yf;BZtkWv8=OK7XnI=H;PR9w);I_%?e4j=X}A!I@}T79P4_^&~~(dzy# z+W4<~G3dATKkla-ChY=Hmjt(23OyE4*?8WPcWL8!t4nSh&s!=^8_)lDV;w#a!FVR& z=-^N`TB+c_(m*v@ETp7u$_f7KpHWU0^Z(*-@8IOH6^HlU#?<-0v-5IyyPE%f_sReJ zNY6U_gYzHFGT-<)|NEDXfl{gLw2NSt7D-7kpe-)??M+}32m~}yf<)|h8oG3N(BlGP z!V;JF#e{ieCP5jnq{l^!e{X3G2qi-*_>iHf37J%abu*;_j*?&?c*1_S#kD3~6vd10 zT&uDLdr<_u2Wmhi1#QMk{nLF1A}C~%hQJb#JVrcYZEkTjpaQz1QrjfO)7@w+v>J`| zb+|hIUk4YB#?8%5z!S+M+V}?af(e4A97q^~js5}GalwEHC0q72X1}+8yQ(aaibN5_ zqozIe4dW41H#awkz)_+E4>P0uHBb&?3()0UNhL_CN2G29`%De8u8)BGzZvr2mWpQH z+9rfFjrb_mS|U$0z#nt<&_L|8+a-;0(|I=`lX_g#fWpu3XcQL+XkuG(u<2G3h%qs% zXf+z&+fFl`1p!H*YX=IK1#F(AsVtz~+BZnhCi0tyv}Tex&-fazG?>DNR68sZ4ev#ZwL9XNP9d8E~GD23%x`VlgeFmF7Xe^6px8+?CSUu0xA^iX}+U!3nJd{>kdVV^2-#E-Vx744Ue7+D(G(+lT13p^6b%jF12Hr}`PK97ZP8d+X zI~~v4UUF&vO2STa7=@qDi6Al6EceUCdYb>(1ZgZeh?rCo!4zLX4Ty5n!9k@Vgqbiy zfuxyc8jb&g=g+TL`uuqZj+s=TskS6bJ1uXpOLS94oV-uiq_NA~r*=pqswjT;2g;jo zwPR+h6*cl+c6|(j#(b#7vgHxKYn=rufk?>%hVvt5AHz)=hG|CnOl9Xm-i@YFV|XK> zq#IG`^>{vD-Kf<4W+m}hplDVNC=>aJqpL}xrfSF2Goc%e&jm6t@ADZBs0;+dJI3d( zb-$0s!4SA`&Bo{E;?FX5?)zCDhtG}AaHGv25}9Oy7qqXniP6pUKK=o#zyYqpCIuk~ zTXmJuTVS-{CSlQ3G$+-p((L6-o(!)_b55y2$RUpuHjN&rV%TZ7)z*#Iwpc zH`dp4TRHVa>yvDIXt{EK&x*25Ps_6*6@rCyE++a6mkKo%#mx-`B#K7BEeIIjIb3ES zXX3DuY;%~+O}9_8uKd{a!s;}a5{%6QrI<@K%5iA&ls)uQ;N8OWYV%WXX!bU3=j3g> z!>f#CqW_DcVy)oaUumG^DkRN;!_B8J%M7zmv$rEU3)E-uw@<$g;VaTyx))+T-u8Gp z#N#PxZa!~hlKPfrGsk6fQcbgN|HDZhYr#A%;E%a@-ruw00&Y^6i+Riv*T;qJ z`!x&`gTsw+P_`IfY~Gh>PPt@?&q1|M^AXbgfGhPx9B#&(RP)j9(X2>+-u@b=S&@e^ zL8jqw^XY2u2TVh>n1J_aF4FTaG|rygmzw1MkMn$no1@+-SLcFCny3Y+uFyQw!a(6*9>jBnt?m9W(SrkH-f~1SDA`8;oBaxiXYAV;% zG`kG(b65Wi-xJ9KOkV(5!qAZ)vnXQH<|n|-m9FpJ?r{MFN`&fCav$p3XSmtj-rin9 zRWsx4($~)~!!U_{*9rTC*p5ZoENE8KP$#y#jc%{Vs5jlp#&8wwIHZ+$L{?Db8oMv&C3QcanlOPj< zCd!Scu}lU;>0V$-KcY5-4`MQcnA}n`;`Uoi5!4bizg}US$MV(BmfI)fBa5>bq^XzZ ze5uZBBwfz|%K~>SiU4!xnw<{Q7m7nd`&==iXls6&Uv4iop^@f^mjX>Y7V>CDBaPwjX-kxyv!ZgSD)qonQo!@EA5*YAI7?SS>EHFD@hHypXEpAyW zu4lRutly`ChO5!M9BW&LS^d2l(`eqqtvEBE<5){CZJ#O{!dspRIHpS5ray5J%3^y~ zalMAEKxY|;8!3ij3*to0sc5^Jn&$F79&^dCJ)Dw-U9LD7C^*c%Fhl9hl!TPxo@9dV z7BVC(!u`slbFD#{vtaYiWmsJ&smaGZYXij4ti^8-&{U06nI$As1DYs|ehBpdi+gn> zeTJJ6ozK5lt!d|TH8cDyn%_$bu7*>xH%tVggjCQW1{AvFt&`k1t2`?@gZ&E{=5&l6 zn&A9G3)3)`Xn*-^=I|LD+m$MlMrd~_m>a-XS#lS8?vG|Se$(kLXm(P{45*Z>pV(zsNE0po zP+hSi&F%_}1?*?MF@q&=@;xrdTI2c~84`F$NeVnQi`K^94u~QU z@jir1;CZ=6+`(`-+^nrFO>%xvdR8F9XDBWW)4W^^ah`9f1rCTDi^au?i@OPxY5EliWX2UZB0w_0zp=lZQ7KFHb2NRm@og> zn4=9kBvFP24v|3egeFr|e@F8IK~@!-^@#gvH6H6(k!I5Cv4p94q?c=N&sIU zn8@gl^Q=g-USuw9I)-`IDjrhD(qD({6=_yHqGE2wHacNZZjy(!syh-C5^C=JSWjlT)`$N(oLz+#>oY@}X+zsq%g3Y!A8wF`G z!4Tn;KZxgBkFyfl5?0S#{9#sqL-dcvEzd{lOv{ku`- z(!(xXm9IwhiZuI){^#Ak(^cFZANNATqR)zU)!6MObA;F64gS=leB=H-`ZI7?)s{S9 zQe^1f`aTQ$RJCIA@qLm93fxrNJ0Tw-$$cfLWrX}L0m9-H74(>N|OU`sxq;U-TDy~Kfr`s9vNJ!S7D zH5)votdci=R>>Q0Pgg3USO7=o`kgKWu9QhRk<8=5AES9WYsLR-rkW={GXFr-|n|a-&Nu#U}WsrTIaRsH7%0&5i3% zChah70WpA>gg#BEm^Vu2GyISjFEV@GjGtt`Dbjn7(>$KJcd|FZj%J&Gw{9D!AeSk5(pV9?b8L=1HldZ_4dmVD2Y3z9_OW2H5dxEA2Uj(@@J5j`*W;xL&L7K_~HOzNI6nxS-c5QG=Sb>PI@ll&!{Pp$oO( zg7!@GWa%105{EE;F)3z})PG{yI~Gw1EK%H?ftYLY`V2R16-kZhHKN+i|GL%yr;ONa znrk9uJ(}J37W_BQ@Hkc`DbQrd1UI`KLn0WaBfDQFU(uUTL$eZ@R1-(~GhFHKUv;G2 zI3uQZq`kkVjP+bJkBxMR=C3r;_c+dWt@|QPEUE9XVr{8Q*N58g<4PZJ7+>a3h^t_oKSVg#jS}Mo9A>^ZeyJKHJ8VKnP)|P zyG%14KRo=Xs7NLqsQnBFJb9*YUzUm7)WB$)u~U{0^sM41!5xnC;+ET|O^!$1V7R|$ z#Z8Wyd)8+O_2LAk^@fE+5%2(6XU_SD{T0S?Iu0v22q#|NKG}(Kj+X*ZAW0Enb(2pY z^`W>qez`5jIWTu=+7^y08>4xyyt1Eh{4Q-i+5J49Y~dHiVK@8P4@~nCHLEliw~9{> z`~I*Gdsq~7*+-|?gt}7_{v?>9XGySVsC)JD`#DaX`r~|I!i*l7?$2=Z<_C5^<2;XY zTCb!%oV=>XVYRUD$rsd5Qdnk=^E3ItC)w_CoIRQ+i}1zdw(X1C`|XQWuM}v$_t$}H-8LB(zk~+`JH}ElF;6HWVZzK z38%ccHT1fqoS7}cn>>cMQy;;+qG2{p^Nc*BpW&!Xtbx!>z4`Od*+y5)Dmt0w33*26 zr}^Qsje1!{W0tUxcIOUwo<{S;Jfri|{BYSuHO&}nwJ>u}rFmkW(fMh9xNM`J;bt$o zBcsI=9ZjHlMu7Yn%@ac6?}N69n6G-Z#R*k{1SoKnGKss=QZF1eYMUL=wQ2s8=Jn*0FI@=Lg>?h~HR<4MtXpFw|ty8#sx z2>gvIH7ezsq#si*sn+aWzqmfQ_vbU*{KwAAC8NkIgwKp7E=3PZKYDn>3+dg0n^PWA z(|cT0;%v=bg{3~j?m~Cm0v)z|6!B!jC&sUQu0u`w(JYVmJj94kbV(F3JEY={I4@87 zOz~#OWWa~ixGL^{)_;bZPlc#yG2WJeASvy@ni3gZ+k)~}bA$Yv?yR*hH?@SHm>VT) zWZh?Ay*zl(#1m?h*rWO^rGh9fTn|}Tut6fp-KLAIk*DUItARb&8JqhQZWY4wls7j; zW99rg-hQZ~@WQD#5B)5iKgU*|LO2`CMXR+qn{t*Yn`kzl>P4n>WV!HxLh=e+tVgDf z-%6J*)-upE!VLj_-er?GoM#G1PiuNbj$Kn07|!BH$?*4(gpXw=C`tAFWQQyFxS2MV z*WXQbpYTEqUSv_-p4kGz?YsnQG2v!{u_s_9vWUu?&3hGYl_usBZWRjul)|k-`2Sc| zH%wJ-%p)8J*bA(N4M{0@2aAy@=tmN#5&a`&v^3Ql4R@0VCBdMu)cunQ&8DOZZt3Vd zBw^i!kg(u_28{TR`By_fC#YlpeZ`CH4}tj$?b- ziJR`;^~Uyk2y>f04fVo@j_A^j(0F0kQr*++l~`*+FL<|B`;m73ecC^JV1C)~er)1J z&-5(>Z)AbL35mGWM=p}nY{ zCNk3t58nOmU3i~gren4Y+?Dbyz#BK2Jw8NY<7#Enrc4W6-p`QrdKB-`F#gTrPJT4# z-H;MU=$*Yo4k6$*37^h_VoucAGt+S#++SNWpkAMwb10Oix7~ zm#+dBdUBhQOD8lANMM{2!6e7j$R@jHKZ}ScCva$0n4L+3iRN5NLf2<*2Jl1=Npo^) zLvsVW9>qoz{G5p$*lGN=^5%t^dCeOCm0ed#;3_&!2T)&W{B`_;)_M$VJw|yt;YPy| zo8Mtyk#bR|2R9lK0f>5`uOm};qgsv>}?CaAGuE20%qnLHS*mi67JG~=sxB> zvucP+Mh`QXxcz){bo%z}24X5D=~FPnYa;|vt`NBSY475+R0NHFSIXWzUu=RKDgU#Z zOgY7cwCOW7$hs(JoP@OhH|-r=xxn1CI|mrSTwN$I07N5MVo3@1I+&Yw&4qF)Q3J+bdvqEDp z{56vb{H++?8{a|owoe<4J=4#2J2FdvXgQk!tQAx0GzI4hx?{|j&_J_>7@6BqvAulv zRu0(YeOr*cxV)#>O3}0CPB3M=83X8u*tY=ekbRt^OmpYBk2LkO)F56-$Vnx=u<_dmIzlx72EPq{WK~ zQJJ#Uuf-2qfR^zvOht>s0~e-JG?wAA8q(0-c@k1|H)?e);(ZAl-jYxVM`VUP=w(y4 zIG|AqWXM^l3+Z);+VrShs_rOFG`zm)%n)x<8gQJY96}C~ZyCv{Wrd$#yU?;xRWK@~ z{-elNhZhs+Ra+w`NFs@uq#FMgRJ2vo2$NO7y5HkejkLbxMqGiQQm=n@(DX9VrTVLG zPSJdgEq4dz9m`{c+#1@Y12SY>Y_=MWp9VB>oHe=cw{n%h43d^Hdigg~*I_i)h4lfu z1y4XB=DdL1vdAkwH0#8lIa(vMzR=?$CdJxwC3x12s2p&v$D`4BtF@VGS#5V@3I^sn zo{*Br41&248mJIlNUik|tMb?d?Em|J{$C02w7!qC6gAeLaWqxAUFc^lq*HWia|%6j z3bkLosSS!IHqc~JW>jt(+TN#qp%?GInN?RDUbnVe+l|K0X9=A0p}EU#=fxK6Ztrey z8AOM>PL{wOK{F^uIj4)e7XVG*FvXSjFU5E^ZXHl3(^N=)+nCVt?@Fa?1ABCQoQ%3l3t-f-RazT_ehjJa=N?Zvf)W!r$ zEU7PWYTH{$c;XhAeBxp$OMzC~Y1Gr|DG{NQE3V59&yHJ-#`EVt`C&+R6uC$ljVUy< zvf2s%=kw=UwLEuM8`;;=bO)0@y?R>Rkd%EeWjZk2xiJDyTW`7>x+CavH87QSk-QhK z?>#_zWSvE$Ct3;S?l?@W`T}?ojW%2G^E)a$Ue$x&k^|(~w&E!Zq~(S7x@G~AGD}pm z$7mF8mMw<~a}5P+ftR*CXcV>@O>~2KW2;}|)ld@6F(dh&Bb{hFJU5oYfJo?Ssm?`+ zH@=Z59gr@~Z9Z*F4tUJ0OhJ#Jf<6Kx4(4ythQb z90g5COhavngiK13QX-6??2#cy!#$yQn)9`Z-Sqspxbx0r4na$?`hbj&;#JYFbSXc{tG^n)XuWE^YQZU2)-P#vCoLFJ^LJ*4 zn*|Nq46cs>BDZC1_JoJjIHD42`UY?DB?|P+K=(*~HK>t}?jkBHKFj){OPGU(Q=^7Z z4+FTvGlbre z((Vd=CuzG}YqPFj7Cc%~P7V`Iw!Ri7bTK5)O#c_Hcv6YyJmpD zC&BF8{QaJQJ%y#yn)bk&f*H?oxXRq?3IpS#m8Q7oN&u+rstH%iZZrG0*kIsNo-dGYP5-PbR6U+(VyWqaq<>ld&8 z0^9e|_^c<>QvEO6E4Ni8?oaZpufq=!?~(`((C6$z)Qec?^>w?-ZE1n#7IeM%;JVE1 zg<_tww5Ysgbp|bMLYmSfq)EW2+^X?a>iT_y3bG<@@YyTgi#w0|Q<3eMHy7^k2yO}L zH|JRtnQMDBL5+0_r_)#mDA(ygJaT`Zl9(1HJQ$`PK+`MyBORHqc`g4auLgVR{YJNj zw>e-Xq#?+Pa9u>uUZ>D)vu?|tUD1lQ829YA73s^iCJ`ZZ8jzo)=|Fz$1>d^kDxI4B zSpY@)9eB}t-P&!2w981++}Uo5S8o~(fc157+wr968gnSZe6~U8xb!?TCla0k>v?BB zn1p9qr0X{J0ZHk#X-5<^@w6>O;O95>c&N9{V&n{%yxF^y|2H8>DT zXwA}E+ZY5Gv0I}DQ)&~Y`_2yQ_BAk$6sSxj629NNe23HUbnoN{LfXxYCz>T-Pa=6; z0aLcdZ0RO8EZgfEe3K{5E|G&~4%g9*FWi1*11}U5){Bu6p%{)%F1p5(S{6AiH&DvO zgkiUjsYjcQb^PDmtJtZurUA~^Qez$5yD~6uBgMvj!vWU_>1iIAH|9kL?9W!j0}^5F zy)eHyOYz(VWK>HU7FR>%=}Qj44}qQ&ig$=R2UR{S*Yk$@s(gEB1vBi0V{<ivVF(JZNy0^+nS_y8IeZKJtW#znRLUb z{p3GU{s-F-ZfSgtF%Dz8=Tq{6cjpG1F7A;P#4X(YCw2y^UZ? z(Z0BrnB&su7^F$x25ISL%Q0W({2=ba+nnL(7dOFC2@Onm4@8uNRG>MSFomG z!y^9{QwOR{HccdryOEZxjhKAgM-cnTN3ANpHe{WUk1Wn&kQ5a*>^x0kb;NRKr+<7E zbpzvrHP|T*3GH*mh@!2MS-ZQ9pbhD6Xj>MsOVhYLUDSb>+it}7AYxLPPKsmQ-&zUQ zvhXd>1RPVPh2KxyI+SCAxT1$d!|w4+M0fl4!%qXIsD{@8DTZPT;zWrp5E;KUjUhRO z{}uU5>&oW#O^V^ICR*$09C_4yK1=qZ=;&Pf29$wiPdTUMfkigS5|XKb79xSYG{AU| z8rZ3O2~S2b&*U-hYeU7X%zD*LGhvkH^>w(QT4^+WDie~L-)3!yxl|x%pohh6`54h< z;^u^)GUZ8F7mI|&jFUPK)KJb=tTd#jJR)<20~=%DgE<|?5*YAIn2`G7uozMRS_{U# zc@u@91nc*;JZw3GngE&rY-k|%sep|g6J6TnL%Qj!k^>XSji6c5Zx!AP(WmaTUI(_Z z3kSCMw6dBfQZm{cY_NWUF{AlzfXi>YS@;NCrAw2XbER#M|Gtt~p+JS>p9I3GAoLjV)1LSI3U^s!cK+>26JmLwM z@l^I8QZXsrWOauz3f-lZ`@qzI#wKl)31Hju&(d^YP1oJo(*N#mkuYY-TF!YWdNh%8 zS;X9EO79jYBQAsu;aC`74f3Ty8uGt)?ca3-xXzVUS7E>7mgLFz`O%ZVks*P1l%&8@ zd;aL}@kiwSzn{p>{D{L4hD2yB&~Pm(h^8R?-3B3hj^iOC@V5^qZ;$tWxYUAXKuM|_ z*Gt^)OS3%X;pGmMf?*8uEx>SbFQT^v?+DIOh*=Uk7nr097|!FP)3;}OTz=ZSI7N^b z-tCe@yIq|JmqWyHpfFw#MJ(?hYm7RkVMKF0n^`_wCO(Tv+j9oGDOQ&3CRP|%qq6k# zLnTdYCkTQDFY4It(*YALqp-8wNcKY{5j_(oSww)EIPL+BQfVSk=MO*i%xpBS3-mfr z9hqw#Iw*HGz&Htb-d{*3=MmVf}B)uL@UxZ*yTT0hwx`fJ*(a|RsrQGHTyc{`^F43jD8jwt-oum zIJz)x^FvR2^!qux*!J@|566zKG%6nZl26{wQ0wg1s=&6E8O1h$c~fo$qr}F~K9v$I zHw^iBh*~dv{_d`En*OK%>c4DxyREs_T&t_Hwzg#r2`f+}gd@H31?F(Bwjx>5x3IFr zwhkNK8Np4hjM!$9EQ+=u;bTpq#0G{`XuVBsK``LFI96g8(7aWx8No(B-PIN%p4h@u zuSA4kRwsZTel&-<;NRgarOBlt!R;nGV2VQwbz_5OaK8OuTsUD(_iGNr<7YYV<(+^2 zVL#uQd-WZQIOyOHv+x^G+X*FCo9aV<7lx6OO% zEhed89A$TaN$S0`W8G2t&Q7^iEu2sq%CgmMd2~RMaD^e$E3j*;nF>nd)Qp;YS(4DE zH+z^nV|2f~V}PJWpqaM4PHZ#CPb5*S>E56x&Hi2JD8Yb+Swsb{oip|(j2?mpyp|ZE zX@dnk6(r6g#nOm2r2}k&iPj;1VR}W&zuSuKG?x1lMG-o*Wn&>DLEDfS?h{-4scro7 zZnqb=eoY%jQpPmKo%P*UuTPA$4AM;h+>R?qnz$9`g~0QEY*_4qRL+RY`_%7TL(ROA zrfHj=+ZzS3F} zVNoXN)j&`=;8ECt*Ls!8a3%BJY7Gi8v!3VNvAx@92+#Ym0G_WN8BHyh%soQ+meoQN z*BP!&dm9l;`fdAX(*ZQAkAGXy6pg)nAJasIz1*3F{$lIJJfso&{?arWwE)kzT4fM* z{4KNTikrOoaX|Ez2R7!dbUPBR`nZ>5>2O(1^XuD$f(sg&^ZjPKz{OnwPb>n)8Ysm3 zq&)+YjKEB417M?yp?^*r+uqDw;iY?^T=$f1ha$Vh;YU2_CB~dAjXdwgnD5%4G!vXX zAg*8JMwWkUrvv}7^K!?_E+t?sD1~ z+A@Zyu{{xNVg6*38=<%FsOnOuZTg*>dm1~9+yq!4zhiCGmlu#3a6!3j7rofzwu9Q; zh#HW669pS80sbDZZ)&G`7}mhwoJajuMDCCl6_RGrh? z9ywNmQiq=VR71wP_pZv4KGx14$|LKFdL4+h9_)TjnF5uRfwfbhoSAO1DKIG|;p$VM z0{KroG=|f7O!EU=nEcO7T>KNwO$)0DB%)eE=0f^%lms+Dx=BmLFbTz$z@dvJ+g}!Jj_CHwNpL{`uw0co5&V9n%1F`I!Ag zAnrsk=HoQySaa>~oa0GqYoG#wy8(~rSpRz{UON=0_dooXu`*Wc|F3;$>*;xVo}Q=Y T9-jX{00960FY4+G0MG#dA~2NU literal 0 HcmV?d00001 diff --git a/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml b/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml new file mode 100644 index 0000000000..732d6fc9fe --- /dev/null +++ b/deploy/kubernetes/platform/crds/clickhouse-operator-install.yaml @@ -0,0 +1,1223 @@ +# source: https://github.com/Altinity/clickhouse-operator/blob/master/deploy/operator/clickhouse-operator-install-crd.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clickhouseinstallations.clickhouse.altinity.com +spec: + group: clickhouse.altinity.com + scope: Namespaced + names: + kind: ClickHouseInstallation + singular: clickhouseinstallation + plural: clickhouseinstallations + shortNames: + - chi + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + description: Operator version + priority: 1 # show in wide view + jsonPath: .status.version + - name: clusters + type: integer + description: Clusters count + priority: 0 # show in standard view + jsonPath: .status.clusters + - name: shards + type: integer + description: Shards count + priority: 1 # show in wide view + jsonPath: .status.shards + - name: hosts + type: integer + description: Hosts count + priority: 0 # show in standard view + jsonPath: .status.hosts + - name: taskID + type: string + description: TaskID + priority: 1 # show in wide view + jsonPath: .status.taskID + - name: status + type: string + description: CHI status + priority: 0 # show in standard view + jsonPath: .status.status + - name: updated + type: integer + description: Updated hosts count + priority: 1 # show in wide view + jsonPath: .status.updated + - name: added + type: integer + description: Added hosts count + priority: 1 # show in wide view + jsonPath: .status.added + - name: deleted + type: integer + description: Hosts deleted count + priority: 1 # show in wide view + jsonPath: .status.deleted + - name: delete + type: integer + description: Hosts to be deleted count + priority: 1 # show in wide view + jsonPath: .status.delete + - name: endpoint + type: string + description: Client access endpoint + priority: 1 # show in wide view + jsonPath: .status.endpoint + schema: + openAPIV3Schema: + description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters" + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + type: object + description: "Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other" + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + # x-kubernetes-preserve-unknown-fields: true + description: | + Specification of the desired behavior of one or more ClickHouse clusters + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md" + properties: + taskID: + type: string + description: "Allow define custom taskID for named update and watch status of this update execution in .status.taskIDs field, by default every update of chi manifest will generate random taskID" + # Need to be StringBool + stop: + type: string + description: | + Allow stop all ClickHouse clusters described in current chi. + Stop mechanism works as follows: + - When `stop` is `1` then setup `Replicas: 0` in each related to current `chi` StatefulSet resource, all `Pods` and `Service` resources will desctroy, but PVCs still live + - When `stop` is `0` then `Pods` will created again and will attach retained PVCs and `Service` also will created again + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + restart: + type: string + description: "restart policy for StatefulSets. When value `RollingUpdate` it allow graceful restart one by one instead of restart all StatefulSet simultaneously" + enum: + - "" + - "RollingUpdate" + # Need to be StringBool + troubleshoot: + type: string + description: "allows troubleshoot Pods during CrashLoopBack state, when you apply wrong configuration, `clickhouse-server` wouldn't startup" + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + namespaceDomainPattern: + type: string + description: "custom domain suffix which will add to end of `Service` or `Pod` name, use it when you use custom cluster domain in your Kubernetes cluster" + templating: + type: object + # nullable: true + description: "optional, define policy for auto applying ClickHouseInstallationTemplate inside ClickHouseInstallation" + properties: + policy: + type: string + description: "when defined as `auto` inside ClickhouseInstallationTemplate, it will auto add into all ClickHouseInstallation, manual value is default" + enum: + - "auto" + - "manual" + reconciling: + type: object + description: "optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side" + # nullable: true + properties: + policy: + type: string + configMapPropagationTimeout: + type: integer + description: | + timeout in seconds when `clickhouse-operator` will wait when applied `ConfigMap` during reconcile `ClickhouseInstallation` pods will updated from cache + see details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically + minimum: 0 + maximum: 3600 + cleanup: + type: object + description: "optional, define behavior for cleanup Kubernetes resources during reconcile cycle" + # nullable: true + properties: + unknownObjects: + type: object + description: "what clickhouse-operator shall do when found Kubernetes resources which should be managed with clickhouse-operator, but not have `ownerReference` to any currently managed `ClickHouseInstallation` resource, default behavior is `Delete`" + # nullable: true + properties: + statefulSet: + type: string + description: "behavior policy for unknown StatefulSet, Delete by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + pvc: + type: string + description: "behavior policy for unknown PVC, Delete by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + configMap: + type: string + description: "behavior policy for unknown ConfigMap, Delete by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + service: + type: string + description: "behavior policy for unknown Service, Delete by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + reconcileFailedObjects: + type: object + description: "what clickhouse-operator shall do when reconciling Kubernetes resources are failed, default behavior is `Retain`" + # nullable: true + properties: + statefulSet: + type: string + description: "behavior policy for failed StatefulSet reconciling, Retain by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + pvc: + type: string + description: "behavior policy for failed PVC reconciling, Retain by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + configMap: + type: string + description: "behavior policy for failed ConfigMap reconciling, Retain by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + service: + type: string + description: "behavior policy for failed Service reconciling, Retain by default" + enum: + # List ObjectsCleanupXXX constants from model + - "Retain" + - "Delete" + defaults: + type: object + description: | + define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level + More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults + # nullable: true + properties: + # Need to be StringBool + replicasUseFQDN: + type: string + description: | + define should replicas be specified by FQDN in ``, then "no" then will use short hostname and clickhouse-server will use kubernetes default suffixes for properly DNS lookup + "yes" by default + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + distributedDDL: + type: object + description: | + allows change `` settings + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl + # nullable: true + properties: + profile: + type: string + description: "Settings from this profile will be used to execute DDL queries" + templates: + type: object + description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource" + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`" + serviceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource" + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + configuration: + type: object + description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource" + # nullable: true + properties: + zookeeper: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/ + currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl` + More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + users: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure password hashed, authorization restrictions, database level security row filters etc. + More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers + # nullable: true + x-kubernetes-preserve-unknown-fields: true + profiles: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of settings profile + More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles + # nullable: true + x-kubernetes-preserve-unknown-fields: true + quotas: + type: object + description: | + allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/` + you can configure any aspect of resource quotas + More details: https://clickhouse.tech/docs/en/operations/quotas/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas + # nullable: true + x-kubernetes-preserve-unknown-fields: true + settings: + type: object + description: | + allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: | + allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + every key in this object is the file name + every value in this object is the file content + you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html + each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored + More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml + # nullable: true + x-kubernetes-preserve-unknown-fields: true + clusters: + type: array + description: | + describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level + every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server` + all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml` + Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources" + minLength: 1 + # See namePartClusterMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + zookeeper: + type: object + description: | + optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.zookeeper` settings + # nullable: true + properties: + nodes: + type: array + description: "describe every available zookeeper cluster node for interaction" + # nullable: true + items: + type: object + #required: + # - host + properties: + host: + type: string + description: "dns name or ip address for Zookeeper node" + port: + type: integer + description: "TCP port which used to connect to Zookeeper node" + minimum: 0 + maximum: 65535 + session_timeout_ms: + type: integer + description: "session timeout during connect to Zookeeper" + operation_timeout_ms: + type: integer + description: "one operation timeout during Zookeeper transactions" + root: + type: string + description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)" + identity: + type: string + description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper" + settings: + type: object + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: | + optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` + # nullable: true + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster + override top-level `chi.spec.configuration.templates` + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one cluster" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster" + serviceTemplate: + type: string + description: "optional, fully ignores for cluster-level" + clusterServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + layout: + type: object + description: | + describe current cluster layout, how much shards in cluster, how much replica in shard + allows override settings on each shard and replica separatelly + # nullable: true + properties: + type: + type: string + description: "DEPRECATED - to be removed soon" + shardsCount: + type: integer + description: "how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default" + replicasCount: + type: integer + description: "how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default" + shards: + type: array + description: "optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + + definitionType: + type: string + description: "DEPRECATED - to be removed soon" + weight: + type: integer + description: | + optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + # Need to be StringBool + internalReplication: + type: string + description: | + optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise + allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication, + will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml + More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/ + enum: + # List StringBoolXXX constants from model + - "" + - "0" + - "1" + - "False" + - "false" + - "True" + - "true" + - "No" + - "no" + - "Yes" + - "yes" + - "Off" + - "off" + - "On" + - "on" + - "Disable" + - "disable" + - "Enable" + - "enable" + - "Disabled" + - "disabled" + - "Enabled" + - "enabled" + settings: + type: object + # nullable: true + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` + override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + x-kubernetes-preserve-unknown-fields: true + files: + type: object + # nullable: true + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files` + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard + override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates` + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + serviceTemplate: + type: string + description: "optional, fully ignores for shard-level" + clusterServiceTemplate: + type: string + description: "optional, fully ignores for shard-level" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + replicasCount: + type: integer + description: | + optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, + shard contains 1 replica by default + override cluster-level `chi.spec.configuration.clusters.layout.replicasCount` + minimum: 1 + replicas: + type: array + description: | + optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards` + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + type: object + # nullable: true + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + x-kubernetes-preserve-unknown-fields: true + files: + type: object + # nullable: true + description: | + optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files` + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates` + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + serviceTemplate: + type: string + description: "optional, fully ignores for replica-level" + clusterServiceTemplate: + type: string + description: "optional, fully ignores for replica-level" + shardServiceTemplate: + type: string + description: "optional, fully ignores for replica-level" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + replicas: + type: array + description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do" + # nullable: true + items: + type: object + properties: + name: + type: string + description: "optional, by default replica name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartShardMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + settings: + type: object + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: + type: object + # nullable: true + description: | + optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + serviceTemplate: + type: string + description: "optional, fully ignores for replica-level" + clusterServiceTemplate: + type: string + description: "optional, fully ignores for replica-level" + shardServiceTemplate: + type: string + description: "optional, fully ignores for replica-level" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + shardsCount: + type: integer + description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`" + minimum: 1 + shards: + type: array + description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents" + # nullable: true + items: + # Host + type: object + properties: + name: + type: string + description: "optional, by default shard name is generated, but you can override it and setup custom name" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + tcpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort` + allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort` + allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service` + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort` + allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol + minimum: 1 + maximum: 65535 + settings: + type: object + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: | + optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents + # nullable: true + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: | + optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica + override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates` + # nullable: true + properties: + hostTemplate: + type: string + description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard" + podTemplate: + type: string + description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + dataVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + logVolumeClaimTemplate: + type: string + description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard" + serviceTemplate: + type: string + description: "optional, fully ignores for shard-level" + clusterServiceTemplate: + type: string + description: "optional, fully ignores for shard-level" + shardServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" + replicaServiceTemplate: + type: string + description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard" + volumeClaimTemplate: + type: string + description: "DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate" + templates: + type: object + description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it" + # nullable: true + properties: + hostTemplates: + type: array + description: "hostTemplate will use during apply to generate `clickhose-server` config files" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`" + type: string + portDistribution: + type: array + description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network" + enum: + # List PortDistributionXXX constants + - "" + - "Unspecified" + - "ClusterScopeIndex" + spec: + # Host + type: object + properties: + name: + type: string + description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`" + minLength: 1 + # See namePartReplicaMaxLen const + maxLength: 15 + pattern: "^[a-zA-Z0-9-]{0,15}$" + tcpPort: + type: integer + description: | + optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]` + More info: https://clickhouse.tech/docs/en/interfaces/tcp/ + minimum: 1 + maximum: 65535 + httpPort: + type: integer + description: | + optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]` + More info: https://clickhouse.tech/docs/en/interfaces/http/ + minimum: 1 + maximum: 65535 + interserverHTTPPort: + type: integer + description: | + optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply + if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]` + More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port + minimum: 1 + maximum: 65535 + settings: + type: object + description: | + optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/` + More details: https://clickhouse.tech/docs/en/operations/settings/settings/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + files: + type: object + description: | + optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/` + # nullable: true + x-kubernetes-preserve-unknown-fields: true + templates: + type: object + description: "be carefull, this part of CRD allows override template inside template, don't use it if you don't understand what you do" + # nullable: true + properties: + hostTemplate: + type: string + podTemplate: + type: string + dataVolumeClaimTemplate: + type: string + logVolumeClaimTemplate: + type: string + serviceTemplate: + type: string + clusterServiceTemplate: + type: string + shardServiceTemplate: + type: string + replicaServiceTemplate: + type: string + + podTemplates: + type: array + description: | + podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone + More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`" + generateName: + type: string + description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables" + zone: + type: object + description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + #required: + # - values + properties: + key: + type: string + description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`" + values: + type: array + description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`" + # nullable: true + items: + type: string + distribution: + type: string + description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`" + enum: + - "" + - "Unspecified" + - "OnePerHost" + podDistribution: + type: array + description: "define ClickHouse Pod distibution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster" + # nullable: true + items: + type: object + #required: + # - type + properties: + type: + type: string + description: "you can define multiple affinity policy types" + enum: + # List PodDistributionXXX constants + - "" + - "Unspecified" + - "ClickHouseAntiAffinity" + - "ShardAntiAffinity" + - "ReplicaAntiAffinity" + - "AnotherNamespaceAntiAffinity" + - "AnotherClickHouseInstallationAntiAffinity" + - "AnotherClusterAntiAffinity" + - "MaxNumberPerNode" + - "NamespaceAffinity" + - "ClickHouseInstallationAffinity" + - "ClusterAffinity" + - "ShardAffinity" + - "ReplicaAffinity" + - "PreviousTailAffinity" + - "CircularReplication" + scope: + type: string + description: "scope for apply each podDistribution" + enum: + # list PodDistributionScopeXXX constants + - "" + - "Unspecified" + - "Shard" + - "Replica" + - "Cluster" + - "ClickHouseInstallation" + - "Namespace" + number: + type: integer + description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type" + minimum: 0 + maximum: 65535 + topologyKey: + type: string + description: "use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity" + spec: + # TODO specify PodSpec + type: object + description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details" + # nullable: true + x-kubernetes-preserve-unknown-fields: true + metadata: + type: object + description: | + allows pass standard object's metadata from template to Pod + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + + volumeClaimTemplates: + type: array + description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else" + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + description: | + template name, could use to link inside + top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`, + cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`, + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate` + type: string + reclaimPolicy: + type: string + description: "define behavior of `PVC` deletion policy during delete `Pod`, `Delete` by default, when `Retain` then `PVC` still alive even `Pod` will deleted" + enum: + - "" + - "Retain" + - "Delete" + metadata: + type: object + description: | + allows pass standard object's metadata from template to PVC + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + description: | + allows define all aspects of `PVC` resource + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # nullable: true + x-kubernetes-preserve-unknown-fields: true + serviceTemplates: + type: array + description: | + allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level + # nullable: true + items: + type: object + #required: + # - name + # - spec + properties: + name: + type: string + description: | + template name, could use to link inside + chi-level `chi.spec.defaults.templates.serviceTemplate` + cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate` + shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate` + replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate` + generateName: + type: string + description: "allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables" + metadata: + # TODO specify ObjectMeta + type: object + description: | + allows pass standard object's metadata from template to Service + Could be use for define specificly for Cloud Provider metadata which impact to behavior of service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + spec: + # TODO specify ServiceSpec + type: object + description: | + describe behavior of generated Service + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + # nullable: true + x-kubernetes-preserve-unknown-fields: true + useTemplates: + type: array + description: "list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters" + # nullable: true + items: + type: object + #required: + # - name + properties: + name: + type: string + description: "name of `ClickHouseInstallationTemplate` (chit) resource" + namespace: + type: string + description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`" + useType: + type: string + description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`" + enum: + # List useTypeXXX constants from model + - "" + - "merge" \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/.helmignore b/deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore similarity index 96% rename from deploy/kubernetes/platform/signoz-charts/flattener-processor/.helmignore rename to deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore index 0e8a0eb36f..7653e97e66 100644 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/.helmignore +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/.helmignore @@ -21,3 +21,5 @@ .idea/ *.tmproj .vscode/ + +unittests/ diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml new file mode 100644 index 0000000000..b855a035a5 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: alertmanager +description: The Alertmanager handles alerts sent by client applications such as the Prometheus server. +type: application +version: 0.5.0 +appVersion: 0.5.0 + diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml new file mode 100644 index 0000000000..cba5de8e29 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/ci/config-reload-values.yaml @@ -0,0 +1,2 @@ +configmapReload: + enabled: true diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/NOTES.txt b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt similarity index 66% rename from deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/NOTES.txt rename to deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt index 6bb90f9caa..91577ad79a 100644 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/NOTES.txt +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/NOTES.txt @@ -2,20 +2,20 @@ {{- if .Values.ingress.enabled }} {{- range $host := .Values.ingress.hosts }} {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} {{- end }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "flattener-processor.fullname" . }}) + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }}) export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "flattener-processor.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "flattener-processor.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alertmanager.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") echo http://$SERVICE_IP:{{ .Values.service.port }} {{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flattener-processor.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME {{ .Values.service.port }}:80 {{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/_helpers.tpl b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl similarity index 71% rename from deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/_helpers.tpl rename to deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl index a6e25c3e02..47d01ca1c6 100644 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/_helpers.tpl +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "flattener-processor.name" -}} +{{- define "alertmanager.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "flattener-processor.fullname" -}} +{{- define "alertmanager.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,16 +27,16 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "flattener-processor.chart" -}} +{{- define "alertmanager.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Common labels */}} -{{- define "flattener-processor.labels" -}} -helm.sh/chart: {{ include "flattener-processor.chart" . }} -{{ include "flattener-processor.selectorLabels" . }} +{{- define "alertmanager.labels" -}} +helm.sh/chart: {{ include "alertmanager.chart" . }} +{{ include "alertmanager.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -46,17 +46,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "flattener-processor.selectorLabels" -}} -app.kubernetes.io/name: {{ include "flattener-processor.name" . }} +{{- define "alertmanager.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alertmanager.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end -}} {{/* Create the name of the service account to use */}} -{{- define "flattener-processor.serviceAccountName" -}} +{{- define "alertmanager.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "flattener-processor.fullname" .) .Values.serviceAccount.name }} + {{ default (include "alertmanager.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml new file mode 100644 index 0000000000..71d5ea0933 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} +data: + alertmanager.yml: | + {{- toYaml .Values.config | default "{}" | nindent 4 }} + {{- range $key, $value := .Values.templates }} + {{ $key }}: |- + {{- $value | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml new file mode 100644 index 0000000000..efc9599c01 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "alertmanager.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml new file mode 100644 index 0000000000..f6f8b3e80a --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/serviceaccount.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml similarity index 63% rename from deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/serviceaccount.yaml rename to deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml index 5e5ba9ca95..9ca80f4cba 100644 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/serviceaccount.yaml +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/serviceaccount.yaml @@ -2,9 +2,9 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "flattener-processor.serviceAccountName" . }} + name: {{ include "alertmanager.serviceAccountName" . }} labels: - {{- include "flattener-processor.labels" . | nindent 4 }} + {{- include "alertmanager.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml new file mode 100644 index 0000000000..81e30a9468 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/services.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Service +metadata: + name: alertmanager + labels: + {{- include "alertmanager.labels" . | nindent 4 }} +{{- if .Values.service.annotations }} + annotations: + {{- toYaml .Values.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alertmanager.fullname" . }}-headless + labels: + {{- include "alertmanager.labels" . | nindent 4 }} +spec: + clusterIP: None + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }} + - port: 9094 + targetPort: 9094 + protocol: TCP + name: cluster-tcp + - port: 9094 + targetPort: 9094 + protocol: UDP + name: cluster-udp + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml new file mode 100644 index 0000000000..95ed0ce72e --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/templates/statefulset.yaml @@ -0,0 +1,152 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} +{{- if .Values.statefulSet.annotations }} + annotations: + {{ toYaml .Values.statefulSet.annotations | nindent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} + serviceName: {{ include "alertmanager.fullname" . }}-headless + template: + metadata: + labels: + {{- include "alertmanager.selectorLabels" . | nindent 8 }} +{{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 8 }} +{{- end }} + annotations: + {{- if not .Values.configmapReload.enabled }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} +{{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} +{{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "alertmanager.serviceAccountName" . }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + {{- if and (.Values.configmapReload.enabled) (.Values.config) }} + - name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + args: + - --volume-dir=/etc/alertmanager + - --webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload + resources: + {{- toYaml .Values.configmapReload.resources | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/alertmanager + {{- end }} + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP +{{- if .Values.command }} + command: + {{- toYaml .Values.command | nindent 12 }} +{{- end }} + args: + - --storage.path=/alertmanager + - --config.file=/etc/alertmanager/alertmanager.yml + {{- if or (gt .Values.replicaCount 1.0) (.Values.additionalPeers) }} + - --cluster.advertise-address=$(POD_IP):9094 + - --cluster.listen-address=0.0.0.0:9094 + {{- end }} + {{- if gt .Values.replicaCount 1.0}} + {{- $fullName := include "alertmanager.fullname" . }} + {{- range $i := until (int .Values.replicaCount) }} + - --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:9094 + {{- end }} + {{- end }} + {{- if .Values.additionalPeers }} + {{- range $item := .Values.additionalPeers }} + - --cluster.peer={{ $item }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - name: http + containerPort: 9093 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- if .Values.config }} + - name: config + mountPath: /etc/alertmanager + {{- end }} + - name: storage + mountPath: /alertmanager + {{- if .Values.config }} + volumes: + - name: config + configMap: + name: {{ include "alertmanager.fullname" . }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 10 }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} +{{- else }} + - name: storage + emptyDir: {} +{{- end -}} diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap new file mode 100644 index 0000000000..3b149c763b --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/__snapshot__/ingress_test.yaml.snap @@ -0,0 +1,48 @@ +should match snapshot of default values: + 1: | + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: alertmanager + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: alertmanager-1.0.0 + name: RELEASE-NAME-alertmanager + spec: + ingressClassName: nginx-test + rules: + - host: alertmanager.domain.com + http: + paths: + - backend: + service: + name: RELEASE-NAME-alertmanager + port: + number: 9093 + path: / + pathType: ImplementationSpecific +should match snapshot of default values with old kubernetes ingress: + 1: | + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: nginx-test + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: alertmanager + app.kubernetes.io/version: 1.0.0 + helm.sh/chart: alertmanager-1.0.0 + name: RELEASE-NAME-alertmanager + spec: + rules: + - host: alertmanager.domain.com + http: + paths: + - backend: + serviceName: RELEASE-NAME-alertmanager + servicePort: 9093 + path: / diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml new file mode 100644 index 0000000000..b468c0c2b8 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/unittests/ingress_test.yaml @@ -0,0 +1,81 @@ +suite: test ingress +templates: + - ingress.yaml +tests: + - it: should be empty if ingress is not enabled + asserts: + - hasDocuments: + count: 0 + - it: should have apiVersion extensions/v1beta1 for k8s < 1.14 + set: + ingress.enabled: true + capabilities: + majorVersion: 1 + minorVersion: 13 + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + - isAPIVersion: + of: extensions/v1beta1 + - it: should have apiVersion networking.k8s.io/v1beta1 for k8s < 1.19 + set: + ingress.enabled: true + capabilities: + majorVersion: 1 + minorVersion: 18 + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + - isAPIVersion: + of: networking.k8s.io/v1beta1 + - it: should have apiVersion networking.k8s.io/v1 for k8s >= 1.19 + set: + ingress.enabled: true + capabilities: + majorVersion: 1 + minorVersion: 19 + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Ingress + - isAPIVersion: + of: networking.k8s.io/v1 + - it: should have an ingressClassName for k8s >= 1.19 + set: + ingress.enabled: true + ingress.className: nginx-test + capabilities: + majorVersion: 1 + minorVersion: 19 + asserts: + - hasDocuments: + count: 1 + - equal: + path: spec.ingressClassName + value: nginx-test + - it: should match snapshot of default values + set: + ingress.enabled: true + ingress.className: nginx-test + chart: + version: 1.0.0 + appVersion: 1.0.0 + asserts: + - matchSnapshot: { } + - it: should match snapshot of default values with old kubernetes ingress + set: + ingress.enabled: true + ingress.className: nginx-test + capabilities: + majorVersion: 1 + minorVersion: 17 + chart: + version: 1.0.0 + appVersion: 1.0.0 + asserts: + - matchSnapshot: { } diff --git a/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml b/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml new file mode 100644 index 0000000000..1547564070 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/alertmanager/values.yaml @@ -0,0 +1,189 @@ +# Default values for alertmanager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: signoz/alertmanager + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "0.5.0" + +extraArgs: {} + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: + fsGroup: 65534 +dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 +securityContext: + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + +additionalPeers: [] + +service: + annotations: {} + type: ClusterIP + port: 9093 + # if you want to force a specific nodePort. Must be use with service.type=NodePort + # nodePort: + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: alertmanager.domain.com + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - alertmanager.domain.com + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 32Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +statefulSet: + annotations: {} + +podAnnotations: {} +podLabels: {} + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + +command: [] + +persistence: + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 100Mi + +config: + global: + resolve_timeout: 1m + slack_api_url: 'https://hooks.slack.com/services/xxx' + + templates: + - '/etc/alertmanager/*.tmpl' + + receivers: + - name: 'slack-notifications' + slack_configs: + - channel: '#alerts' + send_resolved: true + icon_url: https://avatars3.githubusercontent.com/u/3380462 + title: '{{ template "slack.title" . }}' + text: '{{ template "slack.text" . }}' + + + route: + receiver: 'slack-notifications' + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## If false, the configmap-reload container will not be deployed + ## + enabled: false + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +templates: + title.tmpl: |- + {{ define "slack.title" }} + [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} + {{- if gt (len .CommonLabels) (len .GroupLabels) -}} + {{" "}}( + {{- with .CommonLabels.Remove .GroupLabels.Names }} + {{- range $index, $label := .SortedPairs -}} + {{ if $index }}, {{ end }} + {{- $label.Name }}="{{ $label.Value -}}" + {{- end }} + {{- end -}} + ) + {{- end }} + {{ end }} + text.tmpl: |- + {{ define "slack.text" }} + {{ range .Alerts -}} + *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }} + + *Description:* {{ .Annotations.description }} + + *Details:* + {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` + {{ end }} + {{ end }} + {{ end }} \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/Chart.yaml deleted file mode 100644 index 8b21653f16..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: flattener-processor -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.3.6 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 0.3.6 diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/deployment.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/deployment.yaml deleted file mode 100644 index 933fe713c4..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/deployment.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "flattener-processor.fullname" . }} - labels: - {{- include "flattener-processor.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "flattener-processor.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "flattener-processor.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "flattener-processor.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - "/root/flattener" - ports: - - name: http - containerPort: 8080 - protocol: TCP - env: - - name: KAFKA_BROKER - value: {{ .Values.configVars.KAFKA_BROKER }} - - name: KAFKA_INPUT_TOPIC - value: {{ .Values.configVars.KAFKA_INPUT_TOPIC }} - - name: KAFKA_OUTPUT_TOPIC - value: {{ .Values.configVars.KAFKA_OUTPUT_TOPIC }} - - # livenessProbe: - # httpGet: - # path: / - # port: http - # readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/ingress.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/ingress.yaml deleted file mode 100644 index 5550df87f5..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/ingress.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "flattener-processor.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "flattener-processor.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} -{{- end }} diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/service.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/service.yaml deleted file mode 100644 index 20de6f95f7..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "flattener-processor.fullname" . }} - labels: - {{- include "flattener-processor.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "flattener-processor.selectorLabels" . | nindent 4 }} diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/tests/test-connection.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/tests/test-connection.yaml deleted file mode 100644 index 4ffe7f16d1..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "flattener-processor.fullname" . }}-test-connection" - labels: - {{- include "flattener-processor.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "flattener-processor.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/deploy/kubernetes/platform/signoz-charts/flattener-processor/values.yaml b/deploy/kubernetes/platform/signoz-charts/flattener-processor/values.yaml deleted file mode 100644 index e8fe280bc7..0000000000 --- a/deploy/kubernetes/platform/signoz-charts/flattener-processor/values.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# Default values for flattener-processor. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: signoz/flattener-processor - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - - -configVars: - KAFKA_BROKER: signoz-kafka:9092 - KAFKA_INPUT_TOPIC: otlp_spans - KAFKA_OUTPUT_TOPIC: flattened_spans - -serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 8080 - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml index 6f42297444..81d494d9ae 100644 --- a/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml +++ b/deploy/kubernetes/platform/signoz-charts/frontend/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 0.3.6 +version: 0.5.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 0.3.6 +appVersion: 0.5.2 diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml index aebe4b2216..278cb1513c 100644 --- a/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml +++ b/deploy/kubernetes/platform/signoz-charts/frontend/templates/config.yaml @@ -24,6 +24,9 @@ data: index index.html index.htm; try_files $uri $uri/ /index.html; } + location /api/alertmanager{ + proxy_pass http://{{ .Values.config.alertmanagerUrl }}/api/v2; + } location /api { proxy_pass http://{{ .Values.config.queryServiceUrl }}/api; } diff --git a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml b/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml index 6a925e2fc6..a75390c191 100644 --- a/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml +++ b/deploy/kubernetes/platform/signoz-charts/frontend/values.yaml @@ -17,6 +17,7 @@ configVars: {} config: name: signoz-nginx-config queryServiceUrl: signoz-query-service:8080 + alertmanagerUrl: alertmanager:9093 serviceAccount: # Specifies whether a service account should be created diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml index 73d48fb948..07040e3a42 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 0.3.6 +version: 0.5.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 0.3.6 +appVersion: 0.5.1 diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml new file mode 100644 index 0000000000..6dc77baf64 --- /dev/null +++ b/deploy/kubernetes/platform/signoz-charts/query-service/templates/prometheus-configmap.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + - 'alerts.yml' + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + + + remote_read: + - url: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password + diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/templates/deployment.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml similarity index 64% rename from deploy/kubernetes/platform/signoz-charts/query-service/templates/deployment.yaml rename to deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml index 68d7c6e2da..097e2bbcdc 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/templates/deployment.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/templates/statefulset.yaml @@ -1,10 +1,11 @@ apiVersion: apps/v1 -kind: Deployment +kind: StatefulSet metadata: name: {{ include "query-service.fullname" . }} labels: {{- include "query-service.labels" . | nindent 4 }} spec: + serviceName: query-service replicas: {{ .Values.replicaCount }} selector: matchLabels: @@ -14,19 +15,17 @@ spec: labels: {{- include "query-service.selectorLabels" . | nindent 8 }} spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "query-service.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: - name: {{ .Chart.Name }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["-config=/root/config/prometheus.yml"] ports: - name: http containerPort: 8080 @@ -38,7 +37,10 @@ spec: value: {{ .Values.configVars.DruidDatasource }} - name: STORAGE value: {{ .Values.configVars.STORAGE }} - + - name: ClickHouseUrl + value: {{ .Values.configVars.ClickHouseUrl}} + - name: GODEBUG + value: netdns=go # livenessProbe: # httpGet: # path: / @@ -47,6 +49,13 @@ spec: # httpGet: # path: / # port: http + volumeMounts: + - name: prometheus + mountPath: /root/config + - name: signoz-db + mountPath: /var/lib/signoz/ + - name: dashboards + mountPath: /root/config/dashboards resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} @@ -61,3 +70,18 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} + volumes: + - name: prometheus + configMap: + name: prometheus-config + - name: dashboards + emptyDir: {} + + volumeClaimTemplates: + - metadata: + name: signoz-db + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml index 53de74316b..b18bf629ab 100644 --- a/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml +++ b/deploy/kubernetes/platform/signoz-charts/query-service/values.yaml @@ -16,7 +16,8 @@ fullnameOverride: "" configVars: DruidClientUrl: http://signoz-druid-router:8888 DruidDatasource: flattened_spans - STORAGE: druid + ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password + STORAGE: clickhouse POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w" diff --git a/deploy/kubernetes/platform/templates/clickhouse-config.yaml b/deploy/kubernetes/platform/templates/clickhouse-config.yaml new file mode 100644 index 0000000000..51011c64ab --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-config.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: initdb-config +data: + init-db.sql: |- + CREATE TABLE IF NOT EXISTS signoz_index ( + timestamp DateTime64(9) CODEC(Delta, ZSTD(1)), + traceID String CODEC(ZSTD(1)), + spanID String CODEC(ZSTD(1)), + parentSpanID String CODEC(ZSTD(1)), + serviceName LowCardinality(String) CODEC(ZSTD(1)), + name LowCardinality(String) CODEC(ZSTD(1)), + kind Int32 CODEC(ZSTD(1)), + durationNano UInt64 CODEC(ZSTD(1)), + tags Array(String) CODEC(ZSTD(1)), + tagsKeys Array(String) CODEC(ZSTD(1)), + tagsValues Array(String) CODEC(ZSTD(1)), + statusCode Int64 CODEC(ZSTD(1)), + references String CODEC(ZSTD(1)), + externalHttpMethod Nullable(String) CODEC(ZSTD(1)), + externalHttpUrl Nullable(String) CODEC(ZSTD(1)), + component Nullable(String) CODEC(ZSTD(1)), + dbSystem Nullable(String) CODEC(ZSTD(1)), + dbName Nullable(String) CODEC(ZSTD(1)), + dbOperation Nullable(String) CODEC(ZSTD(1)), + peerService Nullable(String) CODEC(ZSTD(1)), + INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64, + INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64, + INDEX idx_duration durationNano TYPE minmax GRANULARITY 1 + ) ENGINE MergeTree() + PARTITION BY toDate(timestamp) + ORDER BY (serviceName, -toUnixTimestamp(timestamp)) \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/clickhouse-instance.yaml b/deploy/kubernetes/platform/templates/clickhouse-instance.yaml new file mode 100644 index 0000000000..855d9f6675 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-instance.yaml @@ -0,0 +1,104 @@ +{{ if (eq (.Values.cloud | toString) "gcp" )}} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gce-resizable +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-standard + fstype: ext4 + replication-type: none +reclaimPolicy: Retain +#volumeBindingMode: Immediate +allowVolumeExpansion: true +{{- else if (eq (.Values.cloud | toString) "aws") }} +# +# AWS resizable disk example +# +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp2-resizable +provisioner: kubernetes.io/aws-ebs +parameters: + type: gp2 +reclaimPolicy: Retain +#volumeBindingMode: Immediate +allowVolumeExpansion: true +{{- end }} +--- +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: signoz +spec: + defaults: + templates: + dataVolumeClaimTemplate: default-volume-claim + # logVolumeClaimTemplate: default-volume-claim + serviceTemplate: chi-service-template + configuration: + zookeeper: + nodes: + - host: signoz-zookeeper + port: 2181 + session_timeout_ms: 6000 + clusters: + - name: cluster + # Templates are specified for this cluster explicitly + templates: + dataVolumeClaimTemplate: default-volume-claim + # logVolumeClaimTemplate: default-volume-claim + podTemplate: pod-template-with-volume + layout: + shardsCount: 1 + replicasCount: 1 + templates: + hostTemplates: + - name: port-distribution + portDistribution: + - type: ClusterScopeIndex + spec: + tcpPort: 9000 + httpPort: 8123 + interserverHTTPPort: 9009 + + podTemplates: + - name: pod-template-with-volume + spec: + containers: + - name: clickhouse + image: yandex/clickhouse-server:21.7 + volumeMounts: + - name: default-volume-claim + mountPath: /var/lib/clickhouse + - name: initdb + mountPath: /docker-entrypoint-initdb.d + volumes: + - name: initdb + configMap: + name: initdb-config + serviceTemplates: + - name: chi-service-template + generateName: signoz-clickhouse + spec: + ports: + - name: http + port: 8123 + - name: tcp + port: 9000 + type: {{ .Values.clickhouseOperator.serviceType }} + volumeClaimTemplates: + - name: default-volume-claim + reclaimPolicy: Retain + spec: + {{- if (eq (.Values.cloud | toString) "gcp" )}} + storageClassName: gce-resizable + {{- else if (eq (.Values.cloud | toString) "aws") }} + storageClassName: gp2-resizable + {{- end }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.clickhouseOperator.storage | quote }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml new file mode 100644 index 0000000000..5242d671d5 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrole.yaml @@ -0,0 +1,157 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Template Parameters: +# +# NAMESPACE=posthog +# COMMENT=# +# ROLE_KIND=ClusterRole +# ROLE_NAME=clickhouse-operator-posthog +# ROLE_BINDING_KIND=ClusterRoleBinding +# ROLE_BINDING_NAME=clickhouse-operator-posthog +# +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: clickhouse-operator-posthog + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - services + verbs: + - create + - delete + - get + - patch + - update + - list + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + - pods + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - patch + - update + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - delete + - get + - patch + - update +- apiGroups: + - apps + resourceNames: + - clickhouse-operator + resources: + - deployments + verbs: + - get + - patch + - update + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - patch + - update + - list + - watch +- apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations + verbs: + - delete + - get + - patch + - update +- apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations + - clickhouseinstallationtemplates + - clickhouseoperatorconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/finalizers + - clickhouseinstallationtemplates/finalizers + - clickhouseoperatorconfigurations/finalizers + verbs: + - update +- apiGroups: + - clickhouse.altinity.com + resources: + - clickhouseinstallations/status + - clickhouseinstallationtemplates/status + - clickhouseoperatorconfigurations/status + verbs: + - create + - delete + - get + - update + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml new file mode 100644 index 0000000000..e06d68a4f2 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Setup ClusterRoleBinding between ClusterRole and ServiceAccount. +# ClusterRoleBinding is namespace-less and must have unique name +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clickhouse-operator-posthog + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: clickhouse-operator-posthog +subjects: +- kind: ServiceAccount + name: clickhouse-operator + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml new file mode 100644 index 0000000000..8c70ddd136 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/configmap.yaml @@ -0,0 +1,418 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Template Parameters: +# +# NAME=etc-clickhouse-operator-files +# NAMESPACE=posthog +# COMMENT= +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: etc-clickhouse-operator-files + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +data: + config.yaml: | + ################################################ + ## + ## Watch Namespaces Section + ## + ################################################ + + # List of namespaces where clickhouse-operator watches for events. + # Concurrently running operators should watch on different namespaces + #watchNamespaces: ["dev", "test"] + watchNamespaces: [] + + ################################################ + ## + ## Additional Configuration Files Section + ## + ################################################ + + # Path to folder where ClickHouse configuration files common for all instances within CHI are located. + chCommonConfigsPath: config.d + + # Path to folder where ClickHouse configuration files unique for each instance (host) within CHI are located. + chHostConfigsPath: conf.d + + # Path to folder where ClickHouse configuration files with users settings are located. + # Files are common for all instances within CHI + chUsersConfigsPath: users.d + + # Path to folder where ClickHouseInstallation .yaml manifests are located. + # Manifests are applied in sorted alpha-numeric order + chiTemplatesPath: templates.d + + ################################################ + ## + ## Cluster Create/Update/Delete Objects Section + ## + ################################################ + + # How many seconds to wait for created/updated StatefulSet to be Ready + statefulSetUpdateTimeout: 300 + + # How many seconds to wait between checks for created/updated StatefulSet status + statefulSetUpdatePollPeriod: 5 + + # What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + # Possible options: + # 1. abort - do nothing, just break the process and wait for admin + # 2. delete - delete newly created problematic StatefulSet + # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet + onStatefulSetCreateFailureAction: ignore + + # What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds + # Possible options: + # 1. abort - do nothing, just break the process and wait for admin + # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. + # Pod would be recreated by StatefulSet based on rollback-ed configuration + # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet + onStatefulSetUpdateFailureAction: rollback + + ################################################ + ## + ## ClickHouse Settings Section + ## + ################################################ + + # Default values for ClickHouse user configuration + # 1. user/profile - string + # 2. user/quota - string + # 3. user/networks/ip - multiple strings + # 4. user/password - string + chConfigUserDefaultProfile: default + chConfigUserDefaultQuota: default + chConfigUserDefaultNetworksIP: + - "::1" + - "127.0.0.1" + chConfigUserDefaultPassword: "default" + + # Default host_regexp to limit network connectivity from outside + chConfigNetworksHostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$" + + ################################################ + ## + ## Access to ClickHouse instances + ## + ################################################ + + # ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances + # for: + # 1. Metrics requests + # 2. Schema maintenance + # 3. DROP DNS CACHE + # User with such credentials can be specified in additional ClickHouse .xml config files, + # located in `chUsersConfigsPath` folder + chUsername: "clickhouse_operator" + chPassword: "clickhouse_operator_password" + + # Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances + # Can be used instead of explicitly specified username and password + chCredentialsSecretNamespace: "" + chCredentialsSecretName: "" + + # Port where to connect to ClickHouse instances to + chPort: 8123 + + ################################################ + ## + ## Log parameters + ## + ################################################ + + logtostderr: "true" + alsologtostderr: "false" + v: "1" + stderrthreshold: "" + vmodule: "" + log_backtrace_at: "" + + ################################################ + ## + ## Runtime parameters + ## + ################################################ + + # Max number of concurrent reconciles in progress + reconcileThreadsNumber: 10 + reconcileWaitExclude: true + reconcileWaitInclude: false + + ################################################ + ## + ## Labels management parameters + ## + ################################################ + + # When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`, + # exclude labels from the following list: + #excludeFromPropagationLabels: + # - "labelA" + # - "labelB" + + # Whether to append *Scope* labels to StatefulSet and Pod. + # Full list of available *scope* labels check in labeler.go + # LabelShardScopeIndex + # LabelReplicaScopeIndex + # LabelCHIScopeIndex + # LabelCHIScopeCycleSize + # LabelCHIScopeCycleIndex + # LabelCHIScopeCycleOffset + # LabelClusterScopeIndex + # LabelClusterScopeCycleSize + # LabelClusterScopeCycleIndex + # LabelClusterScopeCycleOffset + appendScopeLabels: "no" + + ################################################ + ## + ## Pod management parameters + ## + ################################################ + # Grace period for Pod termination. + # How many seconds to wait between sending + # SIGTERM and SIGKILL during Pod termination process. + # Increase this number is case of slow shutdown. + terminationGracePeriod: 30 + +--- + +# Template Parameters: +# +# NAME=etc-clickhouse-operator-confd-files +# NAMESPACE=posthog +# COMMENT= +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: etc-clickhouse-operator-confd-files + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +data: + +--- + +# Template Parameters: +# +# NAME=etc-clickhouse-operator-configd-files +# NAMESPACE=posthog +# COMMENT= +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: etc-clickhouse-operator-configd-files + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +data: + 01-clickhouse-01-listen.xml: | + + + :: + 0.0.0.0 + 1 + + + 01-clickhouse-02-logger.xml: | + + + + debug + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + 1 + + + + 01-clickhouse-03-query_log.xml: | + + + system + query_log
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day + 7500 +
+ +
+ + 01-clickhouse-04-part_log.xml: | + + + system + part_log
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day + 7500 +
+
+ +--- + +# Template Parameters: +# +# NAME=etc-clickhouse-operator-templatesd-files +# NAMESPACE=posthog +# COMMENT= +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: etc-clickhouse-operator-templatesd-files + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +data: + 001-templates.json.example: | + { + "apiVersion": "clickhouse.altinity.com/v1", + "kind": "ClickHouseInstallationTemplate", + "metadata": { + "name": "01-default-volumeclaimtemplate" + }, + "spec": { + "templates": { + "volumeClaimTemplates": [ + { + "name": "chi-default-volume-claim-template", + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "2Gi" + } + } + } + } + ], + "podTemplates": [ + { + "name": "chi-default-oneperhost-pod-template", + "distribution": "OnePerHost", + "spec": { + "containers" : [ + { + "name": "clickhouse", + "image": "yandex/clickhouse-server:19.3.7", + "ports": [ + { + "name": "http", + "containerPort": 8123 + }, + { + "name": "client", + "containerPort": 9000 + }, + { + "name": "interserver", + "containerPort": 9009 + } + ] + } + ] + } + } + ] + } + } + } + + default-pod-template.yaml.example: | + apiVersion: "clickhouse.altinity.com/v1" + kind: "ClickHouseInstallationTemplate" + metadata: + name: "default-oneperhost-pod-template" + spec: + templates: + podTemplates: + - name: default-oneperhost-pod-template + distribution: "OnePerHost" + default-storage-template.yaml.example: | + apiVersion: "clickhouse.altinity.com/v1" + kind: "ClickHouseInstallationTemplate" + metadata: + name: "default-storage-template-2Gi" + spec: + templates: + volumeClaimTemplates: + - name: default-storage-template-2Gi + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + + readme: | + Templates in this folder are packaged with an operator and available via 'useTemplate' + +--- + +# Template Parameters: +# +# NAME=etc-clickhouse-operator-usersd-files +# NAMESPACE=posthog +# COMMENT= +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: etc-clickhouse-operator-usersd-files + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +data: + 01-clickhouse-user.xml: | + + + + + 127.0.0.1 + 0.0.0.0/0 + ::/0 + + 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448 + clickhouse_operator + default + + + + + 0 + 1 + 10 + + + + + 02-clickhouse-default-profile.xml: | + + + + 1 + 1000 + 1 + 1 + + + + 03-database-ordinary.xml: | + + + + + Ordinary + + + + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml new file mode 100644 index 0000000000..fe0981dc61 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/deployment.yaml @@ -0,0 +1,129 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Template Parameters: +# +# NAMESPACE=posthog +# COMMENT= +# OPERATOR_IMAGE=altinity/clickhouse-operator:latest +# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:latest +# +# Setup Deployment for clickhouse-operator +# Deployment would be created in kubectl-specified namespace +kind: Deployment +apiVersion: apps/v1 +metadata: + name: clickhouse-operator + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +spec: + replicas: 1 + selector: + matchLabels: + app: clickhouse-operator + template: + metadata: + labels: + app: clickhouse-operator + annotations: + prometheus.io/port: '8888' + prometheus.io/scrape: 'true' + spec: + serviceAccountName: clickhouse-operator + volumes: + - name: etc-clickhouse-operator-folder + configMap: + name: etc-clickhouse-operator-files + - name: etc-clickhouse-operator-confd-folder + configMap: + name: etc-clickhouse-operator-confd-files + - name: etc-clickhouse-operator-configd-folder + configMap: + name: etc-clickhouse-operator-configd-files + - name: etc-clickhouse-operator-templatesd-folder + configMap: + name: etc-clickhouse-operator-templatesd-files + - name: etc-clickhouse-operator-usersd-folder + configMap: + name: etc-clickhouse-operator-usersd-files + containers: + - name: clickhouse-operator + image: altinity/clickhouse-operator:latest + imagePullPolicy: Always + volumeMounts: + - name: etc-clickhouse-operator-folder + mountPath: /etc/clickhouse-operator + - name: etc-clickhouse-operator-confd-folder + mountPath: /etc/clickhouse-operator/conf.d + - name: etc-clickhouse-operator-configd-folder + mountPath: /etc/clickhouse-operator/config.d + - name: etc-clickhouse-operator-templatesd-folder + mountPath: /etc/clickhouse-operator/templates.d + - name: etc-clickhouse-operator-usersd-folder + mountPath: /etc/clickhouse-operator/users.d + env: + # Pod-specific + # spec.nodeName: ip-172-20-52-62.ec2.internal + - name: OPERATOR_POD_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # metadata.name: clickhouse-operator-6f87589dbb-ftcsf + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # metadata.namespace: kube-system + - name: OPERATOR_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # status.podIP: 100.96.3.2 + - name: OPERATOR_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # spec.serviceAccount: clickhouse-operator + # spec.serviceAccountName: clickhouse-operator + - name: OPERATOR_POD_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + + # Container-specific + - name: OPERATOR_CONTAINER_CPU_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.cpu + - name: OPERATOR_CONTAINER_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.cpu + - name: OPERATOR_CONTAINER_MEM_REQUEST + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: requests.memory + - name: OPERATOR_CONTAINER_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: clickhouse-operator + resource: limits.memory + + - name: metrics-exporter + image: altinity/metrics-exporter:latest + imagePullPolicy: Always + volumeMounts: + - name: etc-clickhouse-operator-folder + mountPath: /etc/clickhouse-operator + - name: etc-clickhouse-operator-confd-folder + mountPath: /etc/clickhouse-operator/conf.d + - name: etc-clickhouse-operator-configd-folder + mountPath: /etc/clickhouse-operator/config.d + - name: etc-clickhouse-operator-templatesd-folder + mountPath: /etc/clickhouse-operator/templates.d + - name: etc-clickhouse-operator-usersd-folder + mountPath: /etc/clickhouse-operator/users.d + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml new file mode 100644 index 0000000000..4794c800c7 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/service.yaml @@ -0,0 +1,26 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Template Parameters: +# +# NAMESPACE=posthog +# COMMENT= +# +# Setup ClusterIP Service to provide monitoring metrics for Prometheus +# Service would be created in kubectl-specified namespace +# In order to get access outside of k8s it should be exposed as: +# kubectl --namespace prometheus port-forward service/prometheus 9090 +# and point browser to localhost:9090 +kind: Service +apiVersion: v1 +metadata: + name: clickhouse-operator-metrics + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + labels: + app: clickhouse-operator +spec: + ports: + - port: 8888 + name: clickhouse-operator-metrics + selector: + app: clickhouse-operator + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml b/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml new file mode 100644 index 0000000000..247e80a197 --- /dev/null +++ b/deploy/kubernetes/platform/templates/clickhouse-operator/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.clickhouseOperator.enabled }} +# Template Parameters: +# +# COMMENT= +# NAMESPACE=posthog +# NAME=clickhouse-operator +# +# Setup ServiceAccount +apiVersion: v1 +kind: ServiceAccount +metadata: + name: clickhouse-operator + namespace: {{ .Values.clickhouseOperator.namespace | default .Release.Namespace }} + +{{- end }} diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml new file mode 100644 index 0000000000..ee337b6f42 --- /dev/null +++ b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-config.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-collector-metrics-conf + labels: + app: opentelemetry + component: otel-collector-metrics-conf +data: + otel-collector-metrics-config: | + receivers: + otlp: + protocols: + grpc: + http: + + # Data sources: metrics + prometheus: + config: + scrape_configs: + - job_name: "otel-collector" + scrape_interval: 30s + static_configs: + - targets: ["otel-collector:8889"] + processors: + batch: + send_batch_size: 1000 + timeout: 10s + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 683 + # 80% of maximum memory up to 2G + limit_mib: 1500 + # 25% of limit up to 2G + spike_limit_mib: 512 + check_interval: 5s + # queued_retry: + # num_workers: 4 + # queue_size: 100 + # retry_on_failure: true + extensions: + health_check: {} + zpages: {} + exporters: + clickhousemetricswrite: + endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password + + service: + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp, prometheus] + processors: [batch] + exporters: [clickhousemetricswrite] \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml new file mode 100644 index 0000000000..eeed7079e5 --- /dev/null +++ b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-collector-metrics + labels: + app: opentelemetry + component: otel-collector-metrics +spec: + selector: + matchLabels: + app: opentelemetry + component: otel-collector-metrics + minReadySeconds: 5 + progressDeadlineSeconds: 120 + replicas: 1 #TODO - adjust this to your own requirements + template: + metadata: + labels: + app: opentelemetry + component: otel-collector-metrics + spec: + containers: + - command: + - "/otelcontribcol" + - "--config=/conf/otel-collector-metrics-config.yaml" +# Memory Ballast size should be max 1/3 to 1/2 of memory. + - "--mem-ballast-size-mib=683" + image: signoz/otelcontribcol:0.4.2 + name: otel-collector + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 200m + memory: 400Mi + ports: + - containerPort: 55679 # Default endpoint for ZPages. + - containerPort: 55680 # Default endpoint for OpenTelemetry receiver. + - containerPort: 55681 # Default endpoint for OpenTelemetry HTTP/1.0 receiver. + - containerPort: 4317 # Default endpoint for OpenTelemetry GRPC receiver. + - containerPort: 14250 # Default endpoint for Jaeger GRPC receiver. + - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. + - containerPort: 9411 # Default endpoint for Zipkin receiver. + - containerPort: 8888 # Default endpoint for querying metrics. + volumeMounts: + - name: otel-collector-metrics-config-vol + mountPath: /conf +# - name: otel-collector-secrets +# mountPath: /secrets + livenessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + readinessProbe: + httpGet: + path: / + port: 13133 # Health Check extension default port. + volumes: + - configMap: + name: otel-collector-metrics-conf + items: + - key: otel-collector-metrics-config + path: otel-collector-metrics-config.yaml + name: otel-collector-metrics-config-vol +# - secret: +# name: otel-collector-secrets +# items: +# - key: cert.pem +# path: cert.pem +# - key: key.pem +# path: key.pem diff --git a/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml new file mode 100644 index 0000000000..b8a1f93a5f --- /dev/null +++ b/deploy/kubernetes/platform/templates/otel-collector-metrics/otel-collector-metrics-service.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: otel-collector-metrics + labels: + app: opentelemetry + component: otel-collector-metrics +spec: + ports: + - name: otlp # Default endpoint for OpenTelemetry receiver. + port: 55680 + protocol: TCP + targetPort: 55680 + - name: otlp-http-legacy # Default endpoint for OpenTelemetry receiver. + port: 55681 + protocol: TCP + targetPort: 55681 + - name: otlp-grpc # Default endpoint for OpenTelemetry receiver. + port: 4317 + protocol: TCP + targetPort: 4317 + - name: jaeger-grpc # Default endpoing for Jaeger gRPC receiver + port: 14250 + - name: jaeger-thrift-http # Default endpoint for Jaeger HTTP receiver. + port: 14268 + - name: zipkin # Default endpoint for Zipkin receiver. + port: 9411 + - name: metrics # Default endpoint for querying metrics. + port: 8888 + selector: + component: otel-collector-metrics \ No newline at end of file diff --git a/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml new file mode 100644 index 0000000000..c1de1ba3a6 --- /dev/null +++ b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-config.yaml @@ -0,0 +1,67 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: otel-collector-conf + labels: + app: opentelemetry + component: otel-collector-conf +data: + otel-collector-config: | + receivers: + otlp/spanmetrics: + protocols: + grpc: + endpoint: "localhost:12345" + otlp: + protocols: + grpc: + http: + jaeger: + protocols: + grpc: + thrift_http: + processors: + batch: + send_batch_size: 1000 + timeout: 10s + signozspanmetrics/prometheus: + metrics_exporter: prometheus + latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] + memory_limiter: + # Same as --mem-ballast-size-mib CLI argument + ballast_size_mib: 683 + # 80% of maximum memory up to 2G + limit_mib: 1500 + # 25% of limit up to 2G + spike_limit_mib: 512 + check_interval: 5s + # queued_retry: + # num_workers: 4 + # queue_size: 100 + # retry_on_failure: true + extensions: + health_check: {} + zpages: {} + exporters: + clickhouse: + datasource: tcp://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password + clickhousemetricswrite: + endpoint: tcp://signoz-clickhouse:9000/?database=signoz_metrics&username=clickhouse_operator&password=clickhouse_operator_password + resource_to_telemetry_conversion: + enabled: true + prometheus: + endpoint: "0.0.0.0:8889" + service: + extensions: [health_check, zpages] + pipelines: + traces: + receivers: [jaeger, otlp] + processors: [signozspanmetrics/prometheus, batch] + exporters: [clickhouse] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [clickhousemetricswrite] + metrics/spanmetrics: + receivers: [otlp/spanmetrics] + exporters: [prometheus] \ No newline at end of file diff --git a/deploy/kubernetes/otel-collector/deployment.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml similarity index 93% rename from deploy/kubernetes/otel-collector/deployment.yaml rename to deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml index 8aed349db6..52d3313e0e 100644 --- a/deploy/kubernetes/otel-collector/deployment.yaml +++ b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-deployment.yaml @@ -21,11 +21,11 @@ spec: spec: containers: - command: - - "/otelcol" + - "/otelcontribcol" - "--config=/conf/otel-collector-config.yaml" # Memory Ballast size should be max 1/3 to 1/2 of memory. - "--mem-ballast-size-mib=683" - image: otel/opentelemetry-collector:0.18.0 + image: signoz/otelcontribcol:0.4.2 name: otel-collector resources: limits: @@ -43,6 +43,7 @@ spec: - containerPort: 14268 # Default endpoint for Jaeger HTTP receiver. - containerPort: 9411 # Default endpoint for Zipkin receiver. - containerPort: 8888 # Default endpoint for querying metrics. + - containerPort: 8889 # Default endpoint for prometheus exported metrics. volumeMounts: - name: otel-collector-config-vol mountPath: /conf diff --git a/deploy/kubernetes/otel-collector/service.yaml b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml similarity index 90% rename from deploy/kubernetes/otel-collector/service.yaml rename to deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml index 5e95d7c14e..f64303b29b 100644 --- a/deploy/kubernetes/otel-collector/service.yaml +++ b/deploy/kubernetes/platform/templates/otel-collector/otel-collector-service.yaml @@ -27,5 +27,7 @@ spec: port: 9411 - name: metrics # Default endpoint for querying metrics. port: 8888 + - name: prometheus-metrics # Default endpoint for querying prometheus metrics. + port: 8889 selector: component: otel-collector \ No newline at end of file diff --git a/deploy/kubernetes/platform/values.yaml b/deploy/kubernetes/platform/values.yaml index e62dba82ec..14c369e09a 100644 --- a/deploy/kubernetes/platform/values.yaml +++ b/deploy/kubernetes/platform/values.yaml @@ -1,51 +1,15 @@ zookeeper: - autopurge: - purgeInterval: 1 - -kafka: - zookeeper: - enabled: false - externalZookeeper: - servers: ["signoz-zookeeper:2181"] - zookeeperConnectionTimeoutMs: 6000 - -druid: - image: - tag: 0.21.1-rc2 - - configVars: - - # To store data on local disks attached - druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]' - druid_storage_type: local - - - # # To store data in S3 - # druid_extensions_loadList: '["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]' - # druid_storage_type: s3 - # druid_storage_bucket: signoz-druid - # druid_storage_baseKey: baseKey - # AWS_ACCESS_KEY_ID: - # AWS_SECRET_ACCESS_KEY: - # AWS_REGION: - - historical: - persistence: - size: "20Gi" - - zkHosts: "signoz-zookeeper:2181" - - zookeeper: - enabled: false - -flattener-processor: - configVars: - KAFKA_BROKER: signoz-kafka:9092 - KAFKA_INPUT_TOPIC: otlp_spans - KAFKA_OUTPUT_TOPIC: flattened_spans + autopurge: + purgeInterval: 1 query-service: - configVars: - DruidClientUrl: http://signoz-druid-router:8888 - DruidDatasource: flattened_spans - STORAGE: druid + configVars: + ClickHouseUrl: http://signoz-clickhouse:9000?username=clickhouse_operator&password=clickhouse_operator_password + STORAGE: clickhouse + +cloud: aws + +clickhouseOperator: + enabled: true + storage: 20Gi + serviceType: ClusterIP diff --git a/sample-apps/hotrod/deployment.yaml b/sample-apps/hotrod/deployment.yaml index 426a33e930..35641c9ca4 100644 --- a/sample-apps/hotrod/deployment.yaml +++ b/sample-apps/hotrod/deployment.yaml @@ -16,14 +16,14 @@ spec: service: hotrod spec: containers: - - args: - - all - env: - - name: JAEGER_ENDPOINT - value: http://otel-collector.platform.svc.cluster.local:14268/api/traces - image: jaegertracing/example-hotrod:latest - imagePullPolicy: IfNotPresent - name: hotrod - ports: - - containerPort: 8080 + - args: + - all + env: + - name: JAEGER_ENDPOINT + value: http://otel-collector.platform.svc.cluster.local:14268/api/traces + image: jaegertracing/example-hotrod:latest + imagePullPolicy: IfNotPresent + name: hotrod + ports: + - containerPort: 8080 restartPolicy: Always