mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-05 20:06:03 +08:00

* feat: enables prometheus rules and alerts which can be sent to alertmanager * chore: adding configs for alertmanager, alert, and prom * chore: alerts WIP * chore: alerts WIP * chore: alerts WIP * chore: setRules API will update rules * chore: initialization of prometheus related stuff moved to separate function * chore: alerts WIP * chore: alerts WIP * fix: r.promConfig was nil * feat: routing alertmanager apis to alertmanager service at nginx * chore: not writing to localDB if string parsing gives error * feat: list alerts API * chore: error in creating multiple groups * feat: CRUD APIs for rules working * chore: changed prometheus version * chore: updated AlertingRuleResponse struct's Id json value * chore: updated prometheus's version * chore: will load rules from database on bootup * feat: crud APIs for notification channels WIP * fix: changed ALERTMANAGER_API_PREFIX * chore: enabling scrape and notify discover manager * chore: fixing path for signoz.db * chore: used transactions for rules APIs * chore: editchannel API updated and other apis refactored * chore: fixed merge conflicts * chore: changing createChannel api from yaml to json reader * chore: changing editChannel api from yaml to json reader * chore: porting loadChannels to json format * chore: editRule returning rule not found * chore: pre-release * chore: fixed db path for persistence * release: v0.5.0
27 lines
757 B
YAML
27 lines
757 B
YAML
# my global config
|
|
global:
|
|
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
|
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
|
# scrape_timeout is set to the global default (10s).
|
|
|
|
# Alertmanager configuration
|
|
alerting:
|
|
alertmanagers:
|
|
- static_configs:
|
|
- targets:
|
|
- alertmanager:9093
|
|
|
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
|
rule_files:
|
|
# - "first_rules.yml"
|
|
# - "second_rules.yml"
|
|
- 'alerts.yml'
|
|
|
|
# A scrape configuration containing exactly one endpoint to scrape:
|
|
# Here it's Prometheus itself.
|
|
scrape_configs:
|
|
|
|
|
|
remote_read:
|
|
- url: tcp://clickhouse:9000/?database=signoz_metrics
|