Feat/integrations v0 mongo and nginx (#4763)

* feat: flesh out pre-requisites for collecting mongodb logs and metrics

* chore: remove stale pipelines in bundled integrations

* chore: clean up 'collect metrics' step for mongodb

* feat: add instructions for collecting and parsing mongodb logs

* feat: add metrics and logs attributes to mongodb data collected list

* feat: nginx logs collection instructions and some other cleanup

* feat: add list of parsed log attributes to data collected list for nginx

* chore: do not run pipeline population integration test if no built-in integration has a pipeline
This commit is contained in:
Raj Kamal Singh 2024-03-28 19:57:07 +05:30 committed by GitHub
parent 5d5ff47d5e
commit 990fc83269
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 617 additions and 228 deletions

View File

@ -1,33 +0,0 @@
{
"id": "parse-default-mongo-access-log",
"name": "Parse default mongo access log",
"alias": "parse-default-mongo-access-log",
"description": "Parse standard mongo access log",
"enabled": true,
"filter": {
"op": "AND",
"items": [
{
"key": {
"type": "tag",
"key": "source",
"dataType": "string"
},
"op": "=",
"value": "mongo"
}
]
},
"config": [
{
"type": "grok_parser",
"id": "parse-body-grok",
"enabled": true,
"orderId": 1,
"name": "Parse Body",
"parse_to": "attributes",
"pattern": "%{GREEDYDATA}",
"parse_from": "body"
}
]
}

View File

@ -0,0 +1,117 @@
### Collect MongoDB Logs
You can configure MongoDB logs collection by providing the required collector config to your collector.
#### Create collector config file
Save the following config for collecting MongoDB logs in a file named `mongodb-logs-collection-config.yaml`
```yaml
receivers:
filelog/mongodb:
include: ["${env:MONGODB_LOG_FILE}"]
operators:
# Parse structured mongodb logs
# For more details, see https://www.mongodb.com/docs/manual/reference/log-messages/#structured-logging
- type: json_parser
if: body matches '^\\s*{\\s*".*}\\s*$'
parse_from: body
parse_to: attributes
timestamp:
parse_from: attributes.t.$$date
layout: '2006-01-02T15:04:05.000-07:00'
layout_type: gotime
severity:
parse_from: attributes.s
overwrite_text: true
mapping:
debug:
- D1
- D2
- D3
- D4
- D5
info: I
warn: W
error: E
fatal: F
- type: flatten
if: attributes.attr != nil
field: attributes.attr
- type: move
if: attributes.msg != nil
from: attributes.msg
to: body
- type: move
if: attributes.c != nil
from: attributes.c
to: attributes.component
- type: move
if: attributes.id != nil
from: attributes.id
to: attributes.mongo_log_id
- type: remove
if: attributes.t != nil
field: attributes.t
- type: remove
if: attributes.s != nil
field: attributes.s
- type: add
field: attributes.source
value: mongodb
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
exporters:
# export to SigNoz cloud
otlp/mongodb-logs:
endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/mongodb-logs:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
logs/mongodb:
receivers: [filelog/mongodb]
processors: [batch]
exporters: [otlp/mongodb-logs]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# path of MongoDB server log file. must be accessible by the otel collector
export MONGODB_LOG_FILE=/var/log/mongodb.log
# region specific SigNoz cloud ingestion endpoint
export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
# your SigNoz ingestion key
export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
```
#### Use collector config file
Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
```bash
--config mongodb-logs-collection-config.yaml
```
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.

View File

@ -1,19 +1,21 @@
### Configure otel collector
### Collect MongoDB Metrics
#### Save collector config file
You can configure MongoDB metrics collection by providing the required collector config to your collector.
Save the following collector config in a file named `mongo-collector-config.yaml`
#### Create collector config file
```bash
Save the following config for collecting mongodb metrics in a file named `mongodb-metrics-collection-config.yaml`
```yaml
receivers:
mongodb:
# - For standalone MongoDB deployments this is the hostname and port of the mongod instance
# - For replica sets specify the hostnames and ports of the mongod instances that are in the replica set configuration. If the replica_set field is specified, nodes will be autodiscovered.
# - For a sharded MongoDB deployment, please specify a list of the mongos hosts.
hosts:
- endpoint: 127.0.0.1:27017
- endpoint: ${env:MONGODB_ENDPOINT}
# If authentication is required, the user can with clusterMonitor permissions can be provided here
username: monitoring
username: ${env:MONGODB_USERNAME}
# If authentication is required, the password can be provided here.
password: ${env:MONGODB_PASSWORD}
collection_interval: 60s
@ -46,18 +48,19 @@ processors:
hostname_sources: ["os"]
exporters:
# export to local collector
otlp/local:
endpoint: "localhost:4317"
tls:
insecure: true
# export to SigNoz cloud
otlp/signoz:
endpoint: "ingest.{region}.signoz.cloud:443"
otlp/mongodb:
endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
"signoz-access-token": "<SIGNOZ_INGESTION_KEY>"
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/mongodb:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
@ -65,10 +68,37 @@ service:
receivers: [mongodb]
# note: remove this processor if the collector host is not running on the same host as the mongo instance
processors: [resourcedetection/system]
exporters: [otlp/local]
exporters: [otlp/mongodb]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# MongoDB endpoint reachable from the otel collector"
export MONGODB_ENDPOINT="host:port"
# password for MongoDB monitoring user"
export MONGODB_USERNAME="monitoring"
# password for MongoDB monitoring user"
export MONGODB_PASSWORD="<PASSWORD>"
# region specific SigNoz cloud ingestion endpoint
export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
# your SigNoz ingestion key
export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
```
#### Use collector config file
Run your collector with the added flag `--config mongo-collector-config.yaml`
Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
```bash
--config mongodb-metrics-collection-config.yaml
```
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.

View File

@ -1,22 +1,41 @@
### Prepare mongo for monitoring
## Before You Begin
- Have a running mongodb instance
- Have the monitoring user created
- Have the monitoring user granted the necessary permissions
To configure metrics and logs collection for MongoDB, you need the following.
Mongodb recommends to set up a least privilege user (LPU) with a `clusterMonitor` role in order to collect.
### Ensure MongoDB server is prepared for monitoring
Run the following command to create a user with the necessary permissions.
- **Ensure that the MongoDB server is running a supported version**
MongoDB versions 4.4+ are supported.
You can use the following statement to determine server version
```js
db.version()
```
```bash
use admin
db.createUser(
{
user: "monitoring",
pwd: "<PASSWORD>",
roles: ["clusterMonitor"]
}
);
```
- **If collecting metrics, ensure that there is a MongoDB user with required permissions**
Mongodb recommends to set up a least privilege user (LPU) with a clusterMonitor role in order to collect metrics
Replace `<PASSWORD>` with a strong password and set is as env var `MONGODB_PASSWORD`.
To create a monitoring user, run:
```js
use admin
db.createUser(
{
user: "monitoring",
pwd: "<PASSWORD>",
roles: ["clusterMonitor"]
}
);
```
### Ensure OTEL Collector is running with access to the MongoDB server
- **Ensure that an OTEL collector is running in your deployment environment**
If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
If already installed, ensure that the collector version is v0.88.0 or newer.
Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
- **Ensure that the OTEL collector can access the MongoDB server**
In order to collect metrics, the collector must be able to access the MongoDB server as a client using the monitoring user.
In order to collect logs, the collector must be able to read the MongoDB server log file.

View File

@ -18,18 +18,20 @@
"instructions": "file://config/prerequisites.md"
},
{
"title": "Configure Otel Collector",
"instructions": "file://config/configure-otel-collector.md"
"title": "Collect Metrics",
"instructions": "file://config/collect-metrics.md"
},
{
"title": "Collect Logs",
"instructions": "file://config/collect-logs.md"
}
],
"assets": {
"logs": {
"pipelines": [
"file://assets/pipelines/log-parser.json"
]
"pipelines": []
},
"dashboards": [
"file://assets/dashboards/overview.json"
"file://assets/dashboards/overview.json"
],
"alerts": []
},
@ -52,37 +54,207 @@
"data_collected": {
"logs": [
{
"name": "Request Method",
"path": "attributes[\"http.request.method\"]",
"type": "string",
"description": "HTTP method"
"name": "Timestamp",
"path": "timestamp",
"type": "timestamp"
},
{
"name": "Request Path",
"path": "attributes[\"url.path\"]",
"type": "string",
"description": "path requested"
"name": "Severity Text",
"path": "severity_text",
"type": "string"
},
{
"name": "Response Status Code",
"path": "attributes[\"http.response.status_code\"]",
"type": "int",
"description": "HTTP response code"
"name": "Severity Number",
"path": "severity_number",
"type": "number"
},
{
"name": "MongoDB Component",
"path": "attributes.component",
"type": "string"
}
],
"metrics": [
{
"name": "http.server.request.duration",
"type": "Histogram",
"unit": "s",
"description": "Duration of HTTP server requests"
"description": "The number of cache operations of the instance.",
"unit": "number",
"type": "Sum",
"name": "mongodb_cache_operations"
},
{
"name": "http.server.active_requests",
"type": "UpDownCounter",
"unit": "{ request }",
"description": "Number of active HTTP server requests"
"description": "The number of collections.",
"unit": "number",
"type": "Sum",
"name": "mongodb_collection_count"
},
{
"description": "The size of the collection. Data compression does not affect this value.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_data_size"
},
{
"description": "The number of connections.",
"unit": "number",
"type": "Sum",
"name": "mongodb_connection_count"
},
{
"description": "The number of extents.",
"unit": "number",
"type": "Sum",
"name": "mongodb_extent_count"
},
{
"description": "The time the global lock has been held.",
"unit": "ms",
"type": "Sum",
"name": "mongodb_global_lock_time"
},
{
"description": "The number of indexes.",
"unit": "number",
"type": "Sum",
"name": "mongodb_index_count"
},
{
"description": "Sum of the space allocated to all indexes in the database, including free index space.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_index_size"
},
{
"description": "The amount of memory used.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_memory_usage"
},
{
"description": "The number of objects.",
"unit": "number",
"type": "Sum",
"name": "mongodb_object_count"
},
{
"description": "The latency of operations.",
"unit": "us",
"type": "Gauge",
"name": "mongodb_operation_latency_time"
},
{
"description": "The number of operations executed.",
"unit": "number",
"type": "Sum",
"name": "mongodb_operation_count"
},
{
"description": "The number of replicated operations executed.",
"unit": "number",
"type": "Sum",
"name": "mongodb_operation_repl_count"
},
{
"description": "The total amount of storage allocated to this collection.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_storage_size"
},
{
"description": "The number of existing databases.",
"unit": "number",
"type": "Sum",
"name": "mongodb_database_count"
},
{
"description": "The number of times an index has been accessed.",
"unit": "number",
"type": "Sum",
"name": "mongodb_index_access_count"
},
{
"description": "The number of document operations executed.",
"unit": "number",
"type": "Sum",
"name": "mongodb_document_operation_count"
},
{
"description": "The number of bytes received.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_network_io_receive"
},
{
"description": "The number of by transmitted.",
"unit": "Bytes",
"type": "Sum",
"name": "mongodb_network_io_transmit"
},
{
"description": "The number of requests received by the server.",
"unit": "number",
"type": "Sum",
"name": "mongodb_network_request_count"
},
{
"description": "The total time spent performing operations.",
"unit": "ms",
"type": "Sum",
"name": "mongodb_operation_time"
},
{
"description": "The total number of active sessions.",
"unit": "number",
"type": "Sum",
"name": "mongodb_session_count"
},
{
"description": "The number of open cursors maintained for clients.",
"unit": "number",
"type": "Sum",
"name": "mongodb_cursor_count"
},
{
"description": "The number of cursors that have timed out.",
"unit": "number",
"type": "Sum",
"name": "mongodb_cursor_timeout_count"
},
{
"description": "Number of times the lock was acquired in the specified mode.",
"unit": "number",
"type": "Sum",
"name": "mongodb_lock_acquire_count"
},
{
"description": "Number of times the lock acquisitions encountered waits because the locks were held in a conflicting mode.",
"unit": "number",
"type": "Sum",
"name": "mongodb_lock_acquire_wait_count"
},
{
"description": "Cumulative wait time for the lock acquisitions.",
"unit": "microseconds",
"type": "Sum",
"name": "mongodb_lock_acquire_time"
},
{
"description": "Number of times the lock acquisitions encountered deadlocks.",
"unit": "number",
"type": "Sum",
"name": "mongodb_lock_deadlock_count"
},
{
"description": "The health status of the server.",
"unit": "number",
"type": "Gauge",
"name": "mongodb_health"
},
{
"description": "The amount of time that the server has been running.",
"unit": "ms",
"type": "Sum",
"name": "mongodb_uptime"
}
]
}
}
}

View File

@ -1,3 +1,6 @@
### Monitor MongoDB with SigNoz
Collect key MongoDB metrics and parse your MongoDB logs
Collect key MongoDB metrics and view them with an out of the box dashboard.
Collect and parse MongoDB logs to populate timestamp, severity, and other log attributes for better querying and aggregation.

View File

@ -1,62 +0,0 @@
{
"id": "parse-default-nginx-access-log",
"name": "Parse default nginx access log",
"alias": "parse-default-nginx-access-log",
"description": "Parse standard nginx access log",
"enabled": true,
"filter": {
"op": "AND",
"items": [
{
"key": {
"type": "tag",
"key": "source",
"dataType": "string"
},
"op": "=",
"value": "nginx"
}
]
},
"config": [
{
"type": "grok_parser",
"id": "parse-body-grok",
"enabled": true,
"orderId": 1,
"name": "Parse Body",
"parse_to": "attributes",
"pattern": "%{IP:client.address} - %{USERNAME:enduser.id} \\[%{HTTPDATE:time.local}\\] \"((%{WORD:http.method} %{DATA:http.path}(\\?%{DATA:http.query})? %{WORD:network.protocol.name}/%{NOTSPACE:network.protocol.version})|%{DATA})\" %{INT:http.response.status_code:int} %{INT:http.request.body.bytes:int} \"%{NOTSPACE:http.referer}\" \"%{DATA:http.user.agent}\" %{INT:http.request.bytes:int} %{NUMBER:http.request.time:float} \\[%{DATA:proxy.upstream.name}?\\] \\[%{DATA:proxy.alternative.upstream.name}?\\] ((%{IP:network.peer.address}:%{INT:network.peer.port:int})|%{DATA})? (%{INT:http.response.bytes:int}|-)? (%{NUMBER:http.response.time:float}|-)? (%{NUMBER:network.peer.status.code:int}|-)? %{NOTSPACE:request.id}",
"parse_from": "body"
},
{
"type": "severity_parser",
"id": "parse-sev",
"enabled": true,
"orderId": 2,
"name": "Set Severity",
"parse_from": "attributes[\"http.response.status_code\"]",
"mapping": {
"debug": [
"1xx"
],
"error": [
"4xx"
],
"fatal": [
"5xx"
],
"info": [
"2xx"
],
"trace": [
"trace"
],
"warn": [
"3xx"
]
},
"overwrite_text": true
}
]
}

View File

@ -0,0 +1,139 @@
### Collect Nginx Logs
You can configure Nginx logs collection by providing the required collector config to your collector.
#### Create collector config file
Save the following config for collecting Nginx logs in a file named `nginx-logs-collection-config.yaml`
```yaml
receivers:
filelog/nginx-access-logs:
include: ["${env:NGINX_ACCESS_LOG_FILE}"]
operators:
# Parse the default nginx access log format. Nginx defaults to the "combined" log format
# $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"
# For more details, see https://nginx.org/en/docs/http/ngx_http_log_module.html
- type: regex_parser
if: body matches '^(?P<remote_addr>[0-9\\.]+) - (?P<remote_user>[^\\s]+) \\[(?P<ts>.+)\\] "(?P<request_method>\\w+?) (?P<request_path>.+?)" (?P<status>[0-9]+) (?P<body_bytes_sent>[0-9]+) "(?P<http_referrer>.+?)" "(?P<http_user_agent>.+?)"$'
parse_from: body
parse_to: attributes
regex: '^(?P<remote_addr>[0-9\.]+) - (?P<remote_user>[^\s]+) \[(?P<ts>.+)\] "(?P<request_method>\w+?) (?P<request_path>.+?)" (?P<status>[0-9]+) (?P<body_bytes_sent>[0-9]+) "(?P<http_referrer>.+?)" "(?P<http_user_agent>.+?)"$'
timestamp:
parse_from: attributes.ts
layout: "02/Jan/2006:15:04:05 -0700"
layout_type: gotime
severity:
parse_from: attributes.status
overwrite_text: true
mapping:
debug: "1xx"
info:
- "2xx"
- "3xx"
warn: "4xx"
error: "5xx"
- type: remove
if: attributes.ts != nil
field: attributes.ts
- type: add
field: attributes.source
value: nginx
filelog/nginx-error-logs:
include: ["${env:NGINX_ERROR_LOG_FILE}"]
operators:
# Parse the default nginx error log format.
# YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE
# For more details, see https://github.com/phusion/nginx/blob/master/src/core/ngx_log.c
- type: regex_parser
if: body matches '^(?P<ts>.+?) \\[(?P<log_level>\\w+)\\] (?P<pid>\\d+)#(?P<tid>\\d+). \\*(?P<cid>\\d+) (?P<message>.+)$'
parse_from: body
parse_to: attributes
regex: '^(?P<ts>.+?) \[(?P<log_level>\w+)\] (?P<pid>\d+)#(?P<tid>\d+). \*(?P<cid>\d+) (?P<message>.+)$'
timestamp:
parse_from: attributes.ts
layout: "2006/01/02 15:04:05"
layout_type: gotime
severity:
parse_from: attributes.log_level
overwrite_text: true
mapping:
debug: "debug"
info:
- "info"
- "notice"
warn: "warn"
error:
- "error"
- "crit"
- "alert"
fatal: "emerg"
- type: remove
if: attributes.ts != nil
field: attributes.ts
- type: move
if: attributes.message != nil
from: attributes.message
to: body
- type: add
field: attributes.source
value: nginx
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
exporters:
# export to SigNoz cloud
otlp/nginx-logs:
endpoint: "${env:OTLP_DESTINATION_ENDPOINT}"
tls:
insecure: false
headers:
"signoz-access-token": "${env:SIGNOZ_INGESTION_KEY}"
# export to local collector
# otlp/nginx-logs:
# endpoint: "localhost:4317"
# tls:
# insecure: true
service:
pipelines:
logs/nginx:
receivers: [filelog/nginx-access-logs, filelog/nginx-error-logs]
processors: [batch]
exporters: [otlp/nginx-logs]
```
#### Set Environment Variables
Set the following environment variables in your otel-collector environment:
```bash
# path of Nginx access log file. must be accessible by the otel collector
export NGINX_ACCESS_LOG_FILE=/var/log/nginx/access.log;
# path of Nginx error log file. must be accessible by the otel collector
export NGINX_ERROR_LOG_FILE=/var/log/nginx/error.log
# region specific SigNoz cloud ingestion endpoint
export OTLP_DESTINATION_ENDPOINT="ingest.us.signoz.cloud:443"
# your SigNoz ingestion key
export SIGNOZ_INGESTION_KEY="signoz-ingestion-key"
```
#### Use collector config file
Make the collector config file available to your otel collector and use it by adding the following flag to the command for running your collector
```bash
--config nginx-logs-collection-config.yaml
```
Note: the collector can use multiple config files, specified by multiple occurrences of the --config flag.

View File

@ -1 +0,0 @@
### Prepare nginx for observability

View File

@ -0,0 +1,19 @@
## Before You Begin
To configure logs collection for Nginx, you need the following.
### Ensure Nginx server is running a supported version
Ensure that your Nginx server is running a version newer than 1.0.0
### Ensure OTEL Collector is running with access to the Nginx server
- **Ensure that an OTEL collector is running in your deployment environment**
If needed, please [install an OTEL Collector](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/)
If already installed, ensure that the collector version is v0.88.0 or newer.
Also ensure that you can provide config files to the collector and that you can set environment variables and command line flags used for running it.
- **Ensure that the OTEL collector can access the Nginx server**
In order to collect logs, the collector must be able to read Nginx server log files.

View File

@ -15,19 +15,17 @@
"overview": "file://overview.md",
"configuration": [
{
"title": "Prepare Nginx",
"instructions": "file://config/prepare-nginx.md"
"title": "Prerequisites",
"instructions": "file://config/prerequisites.md"
},
{
"title": "Configure Otel Collector",
"instructions": "file://config/configure-otel-collector.md"
"title": "Collect Logs",
"instructions": "file://config/collect-logs.md"
}
],
"assets": {
"logs": {
"pipelines": [
"file://assets/pipelines/log-parser.json"
]
"pipelines": []
},
"dashboards": null,
"alerts": null
@ -50,38 +48,57 @@
},
"data_collected": {
"logs": [
{
"name": "Timestamp",
"path": "timestamp",
"type": "timestamp"
},
{
"name": "Severity Text",
"path": "severity_text",
"type": "string"
},
{
"name": "Severity Number",
"path": "severity_number",
"type": "number"
},
{
"name": "Body Bytes Sent",
"path": "attributes.body_bytes_sent",
"type": "string"
},
{
"name": "Referrer",
"path": "attributes.http_referrer",
"type": "string"
},
{
"name": "User Agent",
"path": "attributes.http_user_agent",
"type": "string"
},
{
"name": "Request Method",
"path": "attributes[\"http.request.method\"]",
"type": "string",
"description": "HTTP method"
"path": "attributes.request_method",
"type": "string"
},
{
"name": "Request Path",
"path": "attributes[\"url.path\"]",
"type": "string",
"description": "path requested"
"path": "attributes.request_path",
"type": "string"
},
{
"name": "Response Status Code",
"path": "attributes[\"http.response.status_code\"]",
"type": "int",
"description": "HTTP response code"
}
],
"metrics": [
{
"name": "http.server.request.duration",
"type": "Histogram",
"unit": "s",
"description": "Duration of HTTP server requests"
"path": "attributes.status",
"type": "string"
},
{
"name": "http.server.active_requests",
"type": "UpDownCounter",
"unit": "{ request }",
"description": "Number of active HTTP server requests"
"name": "Remote Address",
"path": "attributes.remote_addr",
"type": "string"
}
]
],
"metrics": []
}
}
}

View File

@ -1,3 +1,3 @@
### Monitor Nginx with SigNoz
Parse your Nginx logs and collect key metrics.
Collect and parse Nginx logs to populate timestamp, severity, and other log attributes for better querying and aggregation.

View File

@ -35,7 +35,7 @@ receivers:
- LOG
- NOTICE
- DETAIL
warning: WARNING
warn: WARNING
error: ERROR
fatal:
- FATAL

View File

@ -1,33 +0,0 @@
{
"id": "parse-default-redis-access-log",
"name": "Parse default redis access log",
"alias": "parse-default-redis-access-log",
"description": "Parse standard redis access log",
"enabled": true,
"filter": {
"op": "AND",
"items": [
{
"key": {
"type": "tag",
"key": "source",
"dataType": "string"
},
"op": "=",
"value": "redis"
}
]
},
"config": [
{
"type": "grok_parser",
"id": "parse-body-grok",
"enabled": true,
"orderId": 1,
"name": "Parse Body",
"parse_to": "attributes",
"pattern": "%{GREEDYDATA}",
"parse_from": "body"
}
]
}

View File

@ -29,7 +29,7 @@ receivers:
info:
- '-'
- '*'
warning: '#'
warn: '#'
on_error: send
- type: move
if: attributes.message != nil

View File

@ -28,9 +28,7 @@
],
"assets": {
"logs": {
"pipelines": [
"file://assets/pipelines/log-parser.json"
]
"pipelines": []
},
"dashboards": [
"file://assets/dashboards/overview.json"

View File

@ -141,9 +141,14 @@ func TestLogPipelinesForInstalledSignozIntegrations(t *testing.T) {
break
}
}
require.NotNil(testAvailableIntegration)
if testAvailableIntegration == nil {
// None of the built in integrations include a pipeline right now.
return
}
// Installing an integration should add its pipelines to pipelines list
require.NotNil(testAvailableIntegration)
require.False(testAvailableIntegration.IsInstalled)
integrationsTB.RequestQSToInstallIntegration(
testAvailableIntegration.Id, map[string]interface{}{},