Feat: postgres integration v0 (#4704)

* chore: update annotations for pre blocks in configuration instructions

* chore: update list of collected metrics for postgres integration

* chore: change non-string units to string in metrics collected list

* chore: some cleanups for postgres config instructions

* chore: some cleanup to metrics connection status resource labels

* chore: remove stub pipeline in postgres integration - no interesting log parsing to be done
This commit is contained in:
Raj Kamal Singh 2024-03-18 18:20:12 +05:30 committed by GitHub
parent 01bb39da6a
commit 6f3183823f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 206 additions and 88 deletions

View File

@ -9,6 +9,7 @@ import (
"io"
"net/http"
"regexp"
"slices"
"strconv"
"strings"
"sync"
@ -2574,9 +2575,19 @@ func (ah *APIHandler) calculateConnectionStatus(
} else if statusForLastReceivedMetric != nil {
resourceSummaryParts := []string{}
for k, v := range statusForLastReceivedMetric.LastReceivedLabels {
resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
"%s=%s", k, v,
))
interestingLabels := []string{
"container_name", "host_name", "node_name",
"pod_name", "deployment_name", "cluster_name",
"namespace_name", "job_name", "service_name",
}
isInterestingKey := !strings.HasPrefix(k, "_") && slices.ContainsFunc(
interestingLabels, func(l string) bool { return strings.Contains(k, l) },
)
if isInterestingKey {
resourceSummaryParts = append(resourceSummaryParts, fmt.Sprintf(
"%s=%s", k, v,
))
}
}
result.Metrics = &integrations.SignalConnectionStatus{

View File

@ -1,33 +0,0 @@
{
"id": "parse-default-postgres-access-log",
"name": "Parse default postgres access log",
"alias": "parse-default-postgres-access-log",
"description": "Parse standard postgres access log",
"enabled": true,
"filter": {
"op": "AND",
"items": [
{
"key": {
"type": "tag",
"key": "source",
"dataType": "string"
},
"op": "=",
"value": "postgres"
}
]
},
"config": [
{
"type": "grok_parser",
"id": "parse-body-grok",
"enabled": true,
"orderId": 1,
"name": "Parse Body",
"parse_to": "attributes",
"pattern": "%{GREEDYDATA}",
"parse_from": "body"
}
]
}

View File

@ -1,14 +1,14 @@
### Configure otel collector
#### Save collector config file
#### Create collector config file
Save the following collector config in a file named `postgres-collector-config.yaml`
```bash
```yaml
receivers:
postgresql:
# The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be host:port. If transport is set to unix, the endpoint will internally be translated from host:port to /host.s.PGSQL.port
endpoint: "localhost:5432"
endpoint: ${env:POSTGRESQL_ENDPOINT}
# The frequency at which to collect metrics from the Postgres instance.
collection_interval: 60s
# The username used to access the postgres instance
@ -17,8 +17,6 @@ receivers:
password: ${env:POSTGRESQL_PASSWORD}
# The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases
databases: []
# List of databases which will be excluded when collecting statistics.
exclude_databases: []
# # Defines the network to use for connecting to the server. Valid Values are `tcp` or `unix`
# transport: tcp
tls:
@ -45,18 +43,13 @@ processors:
hostname_sources: ["os"]
exporters:
# export to local collector
otlp/local:
endpoint: "localhost:4317"
tls:
insecure: true
# export to SigNoz cloud
otlp/signoz:
endpoint: "ingest.{region}.signoz.cloud:443"
endpoint: "ingest.${env:SIGNOZ_REGION}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "<SIGNOZ_INGESTION_KEY>"
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
service:
pipelines:
@ -64,9 +57,31 @@ service:
receivers: [postgresql]
# note: remove this processor if the collector host is not running on the same host as the postgres instance
processors: [resourcedetection/system]
exporters: [otlp/local]
exporters: [otlp/signoz]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# password for postgres monitoring user"
export POSTGRESQL_PASSWORD="password"
# postgres endpoint reachable from the otel collector"
export POSTGRESQL_ENDPOINT="host:port"
# your signoz ingestion key"
export SIGNOZ_INGESTION_KEY="key"
# your signoz region (Eg: us, eu, in ...)
export SIGNOZ_REGION="us"
```
#### Use collector config file
Run your collector with the added flag `--config postgres-collector-config.yaml`
Make the `postgres-collector-config.yaml` file available to your otel collector and add the flag `--config postgres-collector-config.yaml` to the command for running your otel collector.
Note: the collector can use multiple config files, specified by multiple occurances of the --config flag.

View File

@ -8,7 +8,7 @@ This receiver supports PostgreSQL versions 9.6+
For PostgreSQL versions 10+, run:
```bash
```sql
create user monitoring with password '<PASSWORD>';
grant pg_monitor to monitoring;
grant SELECT ON pg_stat_database to monitoring;
@ -16,11 +16,7 @@ grant SELECT ON pg_stat_database to monitoring;
For PostgreSQL versions >= 9.6 and <10, run:
```bash
```sql
create user monitoring with password '<PASSWORD>';
grant SELECT ON pg_stat_database to monitoring;
```
Set the following environment variables:
- POSTGRESQL_PASSWORD

View File

@ -24,12 +24,10 @@
],
"assets": {
"logs": {
"pipelines": [
"file://assets/pipelines/log-parser.json"
]
"pipelines": []
},
"dashboards": [
"file://assets/dashboards/overview.json"
"file://assets/dashboards/overview.json"
],
"alerts": []
},
@ -50,38 +48,169 @@
}
},
"data_collected": {
"logs": [
{
"name": "Request Method",
"path": "attributes[\"http.request.method\"]",
"type": "string",
"description": "HTTP method"
},
{
"name": "Request Path",
"path": "attributes[\"url.path\"]",
"type": "string",
"description": "path requested"
},
{
"name": "Response Status Code",
"path": "attributes[\"http.response.status_code\"]",
"type": "int",
"description": "HTTP response code"
}
],
"logs": [],
"metrics": [
{
"name": "http.server.request.duration",
"type": "Histogram",
"unit": "s",
"description": "Duration of HTTP server requests"
"name": "postgresql.backends",
"type": "sum",
"unit": "1",
"description": "The number of backends."
},
{
"name": "http.server.active_requests",
"type": "UpDownCounter",
"unit": "{ request }",
"description": "Number of active HTTP server requests"
"name": "postgresql.bgwriter.buffers.allocated",
"type": "sum",
"unit": "{buffers}",
"description": "Number of buffers allocated."
},
{
"name": "postgresql.bgwriter.buffers.writes",
"type": "sum",
"unit": "{buffers}",
"description": "Number of buffers written."
},
{
"name": "postgresql.bgwriter.checkpoint.count",
"type": "sum",
"unit": "{checkpoints}",
"description": "The number of checkpoints performed."
},
{
"name": "postgresql.bgwriter.duration",
"type": "sum",
"unit": "ms",
"description": "Total time spent writing and syncing files to disk by checkpoints."
},
{
"name": "postgresql.bgwriter.maxwritten",
"type": "sum",
"unit": "1",
"description": "Number of times the background writer stopped a cleaning scan because it had written too many buffers."
},
{
"name": "postgresql.blocks_read",
"type": "sum",
"unit": "1",
"description": "The number of blocks read."
},
{
"name": "postgresql.commits",
"type": "sum",
"unit": "1",
"description": "The number of commits."
},
{
"name": "postgresql.connection.max",
"type": "gauge",
"unit": "{connections}",
"description": "Configured maximum number of client connections allowed"
},
{
"name": "postgresql.database.count",
"type": "sum",
"unit": "{databases}",
"description": "Number of user databases."
},
{
"name": "postgresql.database.locks",
"type": "gauge",
"unit": "{lock}",
"description": "The number of database locks."
},
{
"name": "postgresql.db_size",
"type": "sum",
"unit": "By",
"description": "The database disk usage."
},
{
"name": "postgresql.deadlocks",
"type": "sum",
"unit": "{deadlock}",
"description": "The number of deadlocks."
},
{
"name": "postgresql.index.scans",
"type": "sum",
"unit": "{scans}",
"description": "The number of index scans on a table."
},
{
"name": "postgresql.index.size",
"type": "gauge",
"unit": "By",
"description": "The size of the index on disk."
},
{
"name": "postgresql.operations",
"type": "sum",
"unit": "1",
"description": "The number of db row operations."
},
{
"name": "postgresql.replication.data_delay",
"type": "gauge",
"unit": "By",
"description": "The amount of data delayed in replication."
},
{
"name": "postgresql.rollbacks",
"type": "sum",
"unit": "1",
"description": "The number of rollbacks."
},
{
"name": "postgresql.rows",
"type": "sum",
"unit": "1",
"description": "The number of rows in the database."
},
{
"name": "postgresql.sequential_scans",
"type": "sum",
"unit": "{sequential_scan}",
"description": "The number of sequential scans."
},
{
"name": "postgresql.table.count",
"type": "sum",
"unit": "{table}",
"description": "Number of user tables in a database."
},
{
"name": "postgresql.table.size",
"type": "sum",
"unit": "By",
"description": "Disk space used by a table."
},
{
"name": "postgresql.table.vacuum.count",
"type": "sum",
"unit": "{vacuums}",
"description": "Number of times a table has manually been vacuumed."
},
{
"name": "postgresql.temp_files",
"type": "sum",
"unit": "{temp_file}",
"description": "The number of temp files."
},
{
"name": "postgresql.wal.age",
"type": "gauge",
"unit": "s",
"description": "Age of the oldest WAL file."
},
{
"name": "postgresql.wal.delay",
"type": "gauge",
"unit": "s",
"description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
},
{
"name": "postgresql.wal.lag",
"type": "gauge",
"unit": "s",
"description": "Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it."
}
]
}