Feat: QS: redis integration v0: instructions for collecting and parsing logs (#4753)

* chore: minor cleanups to postgres integration instructions

* chore: update instructions for connecting redis integration

* feat: add instructions for collecting redis logs

* chore: flesh out prerequisites for connecting redis integration

* chore: add list of metrics collected for redis
This commit is contained in:
Raj Kamal Singh 2024-03-27 20:03:27 +05:30 committed by GitHub
parent a30b75a2a8
commit 0ac9f6f663
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 466 additions and 127 deletions

View File

@ -1,5 +1,7 @@
### Collect Postgres Logs
You can configure Postgres logs collection by providing the required collector config to your collector.
#### Create collector config file
Save the following config for collecting postgres logs in a file named `postgres-logs-collection-config.yaml`
@ -69,14 +71,14 @@ exporters:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/local:
# otlp/postgres-logs:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
postgresql:
logs/postgresql:
receivers: [filelog/postgresql]
processors: [batch]
exporters: [otlp/postgresql-logs]

View File

@ -54,7 +54,7 @@ exporters:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/local:
# otlp/postgres:
# endpoint: "localhost:4317"
# tls:
# insecure: true

View File

@ -57,15 +57,18 @@
"name": "Process ID",
"path": "attributes.pid",
"type": "string"
}, {
},
{
"name": "Timestamp",
"path": "timestamp",
"type": "timestamp"
}, {
},
{
"name": "Severity Text",
"path": "severity_text",
"type": "string"
}, {
},
{
"name": "Severity Number",
"path": "severity_number",
"type": "number"
@ -73,167 +76,167 @@
],
"metrics": [
{
"name": "postgresql.backends",
"name": "postgresql_backends",
"type": "sum",
"unit": "number",
"description": "The number of backends."
},
{
"name": "postgresql.bgwriter.buffers.allocated",
"name": "postgresql_bgwriter_buffers_allocated",
"type": "sum",
"unit": "number",
"description": "Number of buffers allocated."
},
{
"name": "postgresql.bgwriter.buffers.writes",
"name": "postgresql_bgwriter_buffers_writes",
"type": "sum",
"unit": "number",
"description": "Number of buffers written."
},
{
"name": "postgresql.bgwriter.checkpoint.count",
"name": "postgresql_bgwriter_checkpoint_count",
"type": "sum",
"unit": "number",
"description": "The number of checkpoints performed."
},
{
"name": "postgresql.bgwriter.duration",
"name": "postgresql_bgwriter_duration",
"type": "sum",
"unit": "ms",
"description": "Total time spent writing and syncing files to disk by checkpoints."
},
{
"name": "postgresql.bgwriter.maxwritten",
"name": "postgresql_bgwriter_maxwritten",
"type": "sum",
"unit": "number",
"description": "Number of times the background writer stopped a cleaning scan because it had written too many buffers."
},
{
"name": "postgresql.blocks_read",
"name": "postgresql_blocks_read",
"type": "sum",
"unit": "number",
"description": "The number of blocks read."
},
{
"name": "postgresql.commits",
"name": "postgresql_commits",
"type": "sum",
"unit": "number",
"description": "The number of commits."
},
{
"name": "postgresql.connection.max",
"name": "postgresql_connection_max",
"type": "gauge",
"unit": "number",
"description": "Configured maximum number of client connections allowed"
},
{
"name": "postgresql.database.count",
"name": "postgresql_database_count",
"type": "sum",
"unit": "number",
"description": "Number of user databases."
},
{
"name": "postgresql.database.locks",
"name": "postgresql_database_locks",
"type": "gauge",
"unit": "number",
"description": "The number of database locks."
},
{
"name": "postgresql.db_size",
"name": "postgresql_db_size",
"type": "sum",
"unit": "Bytes",
"description": "The database disk usage."
},
{
"name": "postgresql.deadlocks",
"name": "postgresql_deadlocks",
"type": "sum",
"unit": "number",
"description": "The number of deadlocks."
},
{
"name": "postgresql.index.scans",
"name": "postgresql_index_scans",
"type": "sum",
"unit": "number",
"description": "The number of index scans on a table."
},
{
"name": "postgresql.index.size",
"name": "postgresql_index_size",
"type": "gauge",
"unit": "Bytes",
"description": "The size of the index on disk."
},
{
"name": "postgresql.operations",
"name": "postgresql_operations",
"type": "sum",
"unit": "number",
"description": "The number of db row operations."
},
{
"name": "postgresql.replication.data_delay",
"name": "postgresql_replication_data_delay",
"type": "gauge",
"unit": "Bytes",
"description": "The amount of data delayed in replication."
},
{
"name": "postgresql.rollbacks",
"name": "postgresql_rollbacks",
"type": "sum",
"unit": "number",
"description": "The number of rollbacks."
},
{
"name": "postgresql.rows",
"name": "postgresql_rows",
"type": "sum",
"unit": "number",
"description": "The number of rows in the database."
},
{
"name": "postgresql.sequential_scans",
"name": "postgresql_sequential_scans",
"type": "sum",
"unit": "number",
"description": "The number of sequential scans."
},
{
"name": "postgresql.table.count",
"name": "postgresql_table_count",
"type": "sum",
"unit": "number",
"description": "Number of user tables in a database."
},
{
"name": "postgresql.table.size",
"name": "postgresql_table_size",
"type": "sum",
"unit": "Bytes",
"description": "Disk space used by a table."
},
{
"name": "postgresql.table.vacuum.count",
"name": "postgresql_table_vacuum_count",
"type": "sum",
"unit": "number",
"description": "Number of times a table has manually been vacuumed."
},
{
"name": "postgresql.temp_files",
"name": "postgresql_temp_files",
"type": "sum",
"unit": "number",
"description": "The number of temp files."
},
{
"name": "postgresql.wal.age",
"name": "postgresql_wal_age",
"type": "gauge",
"unit": "seconds",
"description": "Age of the oldest WAL file."
},
{
"name": "postgresql.wal.delay",
"name": "postgresql_wal_delay",
"type": "gauge",
"unit": "seconds",
"description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
},
{
"name": "postgresql.wal.lag",
"name": "postgresql_wal_lag",
"type": "gauge",
"unit": "seconds",
"description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
}
]
}
}
}

View File

@ -0,0 +1,102 @@
### Collect Redis Logs
You can configure Redis logs collection by providing the required collector config to your collector.
#### Create collector config file
Save the following config for collecting redis logs in a file named `redis-logs-collection-config.yaml`
```yaml
receivers:
filelog/redis:
include: ["${env:REDIS_LOG_FILE}"]
operators:
# Parse default redis log format
# pid:role timestamp log_level message
- type: regex_parser
if: body matches '^(?P<pid>\\d+):(?P<role>\\w+) (?P<ts>\\d{2} \\w+ \\d{4} \\d{2}:\\d{2}:\\d{2}\\.\\d+) (?P<log_level>[.\\-*#]) (?P<message>.*)$'
parse_from: body
regex: '^(?P<pid>\d+):(?P<role>\w+) (?P<ts>\d{2} \w+ \d{4} \d{2}:\d{2}:\d{2}\.\d+) (?P<log_level>[.\-*#]) (?P<message>.*)$'
timestamp:
parse_from: attributes.ts
layout: '02 Jan 2006 15:04:05.000'
layout_type: gotime
severity:
parse_from: attributes.log_level
overwrite_text: true
mapping:
debug: '.'
info:
- '-'
- '*'
warning: '#'
on_error: send
- type: move
if: attributes.message != nil
from: attributes.message
to: body
- type: remove
if: attributes.log_level != nil
field: attributes.log_level
- type: remove
if: attributes.ts != nil
field: attributes.ts
- type: add
field: attributes.source
value: redis
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
exporters:
# export to SigNoz cloud
otlp/redis-logs:
endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/redis-logs:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
logs/redis:
receivers: [filelog/redis]
processors: [batch]
exporters: [otlp/redis-logs]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# path of Redis server log file. must be accessible by the otel collector
export REDIS_LOG_FILE=/var/log/redis.log
# region specific SigNoz cloud ingestion endpoint
export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
# your SigNoz ingestion key
export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
```
#### Use collector config file
Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
```bash
--config redis-logs-collection-config.yaml
```
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.

View File

@ -0,0 +1,93 @@
### Collect Redis Metrics
You can configure Redis metrics collection by providing the required collector config to your collector.
#### Create collector config file
Save the following config for collecting Redis metrics in a file named `redis-metrics-collection-config.yaml`
```yaml
receivers:
redis:
# The hostname and port of the Redis instance, separated by a colon.
endpoint: ${env:REDIS_ENDPOINT}
# The frequency at which to collect metrics from the Redis instance.
collection_interval: 60s
# # The password used to access the Redis instance; must match the password specified in the requirepass server configuration option.
password: ${env:REDIS_PASSWORD}
# # Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`
# transport: tcp
# tls:
# insecure: false
# ca_file: /etc/ssl/certs/ca-certificates.crt
# cert_file: /etc/ssl/certs/redis.crt
# key_file: /etc/ssl/certs/redis.key
metrics:
redis.maxmemory:
enabled: true
redis.cmd.latency:
enabled: true
processors:
# enriches the data with additional host information
# see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
resourcedetection/system:
# add additional detectors if needed
detectors: ["system"]
system:
hostname_sources: ["os"]
exporters:
# export to SigNoz cloud
otlp/redis:
endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/redis:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
metrics/redis:
receivers: [redis]
# note: remove this processor if the collector host is not running on the same host as the redis instance
processors: [resourcedetection/system]
exporters: [otlp/redis]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# redis endpoint reachable from the otel collector"
export REDIS_ENDPOINT="localhost:6379"
# password used to access the Redis instance.
# must match the password specified in the requirepass server configuration option.
# can be left empty if the redis server is not configured to require a password.
export REDIS_PASSWORD=""
# region specific SigNoz cloud ingestion endpoint
export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
# your SigNoz ingestion key
export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
```
#### Use collector config file
Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
```bash
--config redis-metrics-collection-config.yaml
```
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.

View File

@ -1,63 +0,0 @@
### Configure otel collector
#### Save collector config file
Save the following collector config in a file named `redis-collector-config.yaml`
```bash
receivers:
redis:
# The hostname and port of the Redis instance, separated by a colon.
endpoint: "localhost:6379"
# The frequency at which to collect metrics from the Redis instance.
collection_interval: 60s
# # The password used to access the Redis instance; must match the password specified in the requirepass server configuration option.
# password: ${env:REDIS_PASSWORD}
# # Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`
# transport: tcp
# tls:
# insecure: false
# ca_file: /etc/ssl/certs/ca-certificates.crt
# cert_file: /etc/ssl/certs/redis.crt
# key_file: /etc/ssl/certs/redis.key
metrics:
redis.maxmemory:
enabled: true
redis.cmd.latency:
enabled: true
processors:
# enriches the data with additional host information
# see https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#resource-detection-processor
resourcedetection/system:
# add additional detectors if needed
detectors: ["system"]
system:
hostname_sources: ["os"]
exporters:
# export to local collector
otlp/local:
endpoint: "localhost:4317"
tls:
insecure: true
# export to SigNoz cloud
otlp/signoz:
endpoint: "ingest.{region}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "<SIGNOZ_INGESTION_KEY>"
service:
pipelines:
metrics/redis:
receivers: [redis]
# note: remove this processor if the collector host is not running on the same host as the redis instance
processors: [resourcedetection/system]
exporters: [otlp/local]
```
#### Use collector config file
Run your collector with the added flag `--config redis-collector-config.yaml`

View File

@ -1,5 +1,20 @@
### Prepare redis for monitoring
## Before You Begin
- Have a running redis instance
- Have the monitoring user created
- Have the monitoring user granted the necessary permissions
To configure metrics and logs collection for a Redis server, you need the following.
### Ensure Redis server is running a supported version
Redis server versions newer than 3.0 are supported.
### Ensure OTEL Collector is running with access to the Redis server
#### Ensure that an OTEL collector is running in your deployment environment
If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
If already installed, ensure that the collector version is v0.88.0 or newer.
Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
#### Ensure that the OTEL collector can access the Redis server
In order to collect metrics, the collector must be able to access the Redis server as a client.
In order to collect logs, the collector must be able to read the Redis server log file.

View File

@ -1,7 +1,7 @@
{
"id": "redis",
"title": "Redis",
"description": "Monitor redis using logs and metrics.",
"description": "Monitor redis with metrics and logs",
"author": {
"name": "SigNoz",
"email": "integrations@signoz.io",
@ -18,8 +18,12 @@
"instructions": "file://config/prerequisites.md"
},
{
"title": "Configure Otel Collector",
"instructions": "file://config/configure-otel-collector.md"
"title": "Collect Metrics",
"instructions": "file://config/collect-metrics.md"
},
{
"title": "Collect Logs",
"instructions": "file://config/collect-logs.md"
}
],
"assets": {
@ -29,7 +33,7 @@
]
},
"dashboards": [
"file://assets/dashboards/overview.json"
"file://assets/dashboards/overview.json"
],
"alerts": []
},
@ -52,37 +56,218 @@
"data_collected": {
"logs": [
{
"name": "Request Method",
"path": "attributes[\"http.request.method\"]",
"type": "string",
"description": "HTTP method"
"name": "Process ID",
"path": "attributes.pid",
"type": "string"
},
{
"name": "Request Path",
"path": "attributes[\"url.path\"]",
"type": "string",
"description": "path requested"
"name": "Process Role",
"path": "attributes.role",
"type": "string"
},
{
"name": "Response Status Code",
"path": "attributes[\"http.response.status_code\"]",
"type": "int",
"description": "HTTP response code"
"name": "Timestamp",
"path": "timestamp",
"type": "timestamp"
},
{
"name": "Severity Text",
"path": "severity_text",
"type": "string"
},
{
"name": "Severity Number",
"path": "severity_number",
"type": "number"
}
],
"metrics": [
{
"name": "http.server.request.duration",
"type": "Histogram",
"unit": "s",
"description": "Duration of HTTP server requests"
"name": "redis_commands_processed",
"type": "Sum",
"unit": "number",
"description": "Total number of commands processed by the server"
},
{
"name": "http.server.active_requests",
"type": "UpDownCounter",
"unit": "{ request }",
"description": "Number of active HTTP server requests"
"name": "redis_cpu_time",
"type": "Sum",
"unit": "s",
"description": "System CPU consumed by the Redis server in seconds since server start"
},
{
"name": "redis_keys_expired",
"type": "Sum",
"unit": "number",
"description": "Total number of key expiration events"
},
{
"name": "redis_db_expires",
"type": "Gauge",
"unit": "number",
"description": "Number of keyspace keys with an expiration"
},
{
"name": "redis_commands",
"type": "Gauge",
"unit": "ops/s",
"description": "Number of commands processed per second"
},
{
"name": "redis_replication_offset",
"type": "Gauge",
"unit": "Bytes",
"description": "The server's current replication offset"
},
{
"name": "redis_net_input",
"type": "Sum",
"unit": "Bytes",
"description": "The total number of bytes read from the network"
},
{
"name": "redis_clients_connected",
"type": "Sum",
"unit": "number",
"description": "Number of client connections (excluding connections from replicas)"
},
{
"name": "redis_keys_evicted",
"type": "Sum",
"unit": "number",
"description": "Number of evicted keys due to maxmemory limit"
},
{
"name": "redis_maxmemory",
"type": "Gauge",
"unit": "Bytes",
"description": "The value of the maxmemory configuration directive"
},
{
"name": "redis_clients_max_input_buffer",
"type": "Gauge",
"unit": "Bytes",
"description": "Biggest input buffer among current client connections"
},
{
"name": "redis_cmd_latency",
"type": "Gauge",
"unit": "s",
"description": "Command execution latency"
},
{
"name": "redis_memory_lua",
"type": "Gauge",
"unit": "Bytes",
"description": "Number of bytes used by the Lua engine"
},
{
"name": "redis_replication_backlog_first_byte_offset",
"type": "Gauge",
"unit": "Bytes",
"description": "The master offset of the replication backlog buffer"
},
{
"name": "redis_keyspace_hits",
"type": "Sum",
"unit": "number",
"description": "Number of successful lookup of keys in the main dictionary"
},
{
"name": "redis_clients_blocked",
"type": "Sum",
"unit": "number",
"description": "Number of clients pending on a blocking call"
},
{
"name": "redis_connections_rejected",
"type": "Sum",
"unit": "number",
"description": "Number of connections rejected because of maxclients limit"
},
{
"name": "redis_latest_fork",
"type": "Gauge",
"unit": "us",
"description": "Duration of the latest fork operation in microseconds"
},
{
"name": "redis_clients_max_output_buffer",
"type": "Gauge",
"unit": "Bytes",
"description": "Longest output list among current client connections"
},
{
"name": "redis_slaves_connected",
"type": "Sum",
"unit": "number",
"description": "Number of connected replicas"
},
{
"name": "redis_db_keys",
"type": "Gauge",
"unit": "number",
"description": "Number of keyspace keys"
},
{
"name": "redis_keyspace_misses",
"type": "Sum",
"unit": "number",
"description": "Number of failed lookup of keys in the main dictionary"
},
{
"name": "redis_uptime",
"type": "Sum",
"unit": "s",
"description": "Number of seconds since Redis server start"
},
{
"name": "redis_memory_used",
"type": "Gauge",
"unit": "Bytes",
"description": "Total number of bytes allocated by Redis using its allocator"
},
{
"name": "redis_net_output",
"type": "Sum",
"unit": "Bytes",
"description": "The total number of bytes written to the network"
},
{
"name": "redis_connections_received",
"type": "Sum",
"unit": "number",
"description": "Total number of connections accepted by the server"
},
{
"name": "redis_rdb_changes_since_last_save",
"type": "Sum",
"unit": "number",
"description": "Number of changes since the last dump"
},
{
"name": "redis_memory_rss",
"type": "Gauge",
"unit": "Bytes",
"description": "Number of bytes that Redis allocated as seen by the operating system"
},
{
"name": "redis_db_avg_ttl",
"type": "Gauge",
"unit": "ms",
"description": "Average keyspace keys TTL"
},
{
"name": "redis_memory_peak",
"type": "Gauge",
"unit": "Bytes",
"description": "Peak memory consumed by Redis (in bytes)"
},
{
"name": "redis_memory_fragmentation_ratio",
"type": "Gauge",
"unit": "number",
"description": "Ratio between used_memory_rss and used_memory"
}
]
}
}
}

View File

@ -1,3 +1,5 @@
### Monitor Redis with SigNoz
Parse your Redis logs and collect key metrics.
Collect key Redis metrics and view them with an out of the box dashboard.
Collect and parse Redis logs to populate timestamp, severity, and other log attributes for better querying and aggregation.