Prashant Shahi 61a6c21edb
feat: docker/swarm deployment restructuring and improvements (#6904)
This PR consists of improvements and fixes related to SigNoz in Docker Standalone and Docker Swarm.

- Docker/Swarm deploy directory restructuring and improvements
- Prometheus metrics scraping using labels in Docker Standalone and Docker Swarm
- Swarm: single schema-migrator container for both sync/async migrations
- Remove Kubernetes HOTRod files (will be migrated to charts)
- Fetch histogram-quantile from release instead of local binary mount

---------

Signed-off-by: Prashant Shahi <prashant@signoz.io>
2025-01-27 12:48:42 +00:00

42 lines
1.7 KiB
XML

<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<!-- For S3 cold storage,
if region is us-east-1, endpoint can be https://<bucket-name>.s3.amazonaws.com
if region is not us-east-1, endpoint should be https://<bucket-name>.s3-<region>.amazonaws.com
For GCS cold storage,
endpoint should be https://storage.googleapis.com/<bucket-name>/data/
-->
<endpoint>https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/</endpoint>
<access_key_id>ACCESS-KEY-ID</access_key_id>
<secret_access_key>SECRET-ACCESS-KEY</secret_access_key>
<!-- In case of S3, uncomment the below configuration in case you want to read
AWS credentials from the Environment variables if they exist. -->
<!-- <use_environment_credentials>true</use_environment_credentials> -->
<!-- In case of GCS, uncomment the below configuration, since GCS does
not support batch deletion and result in error messages in logs. -->
<!-- <support_batch_delete>false</support_batch_delete> -->
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>