receivers: otlp: protocols: grpc: http: jaeger: protocols: grpc: thrift_http: hostmetrics: collection_interval: 60s scrapers: cpu: load: memory: disk: filesystem: network: # Data sources: metrics prometheus: config: scrape_configs: - job_name: "otel-collector" dns_sd_configs: - names: - 'tasks.signoz_otel-collector' type: 'A' port: 8888 - job_name: "otel-collector-hostmetrics" scrape_interval: 10s static_configs: - targets: ["otel-collector-hostmetrics:8888"] processors: batch: send_batch_size: 1000 timeout: 10s memory_limiter: # Same as --mem-ballast-size-mib CLI argument ballast_size_mib: 683 # 80% of maximum memory up to 2G limit_mib: 1500 # 25% of limit up to 2G spike_limit_mib: 512 check_interval: 5s # queued_retry: # num_workers: 4 # queue_size: 100 # retry_on_failure: true extensions: health_check: {} zpages: {} exporters: clickhouse: datasource: tcp://clickhouse:9000 clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: enabled: true service: extensions: [health_check, zpages] pipelines: traces: receivers: [jaeger, otlp] processors: [batch] exporters: [clickhouse] metrics: receivers: [otlp, prometheus, hostmetrics] processors: [batch] exporters: [clickhousemetricswrite]