mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-14 19:05:59 +08:00
feat(prometheus): create a dedicated prometheus package (#7397)
This commit is contained in:
parent
2cbd8733a1
commit
81c7f3221a
4
Makefile
4
Makefile
@ -74,6 +74,10 @@ go-run-enterprise: ## Runs the enterprise go backend server
|
||||
--use-logs-new-schema true \
|
||||
--use-trace-new-schema true
|
||||
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@go test -race ./...
|
||||
|
||||
.PHONY: go-run-community
|
||||
go-run-community: ## Runs the community go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
|
@ -72,7 +72,6 @@ sqlstore:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
timeout:
|
||||
@ -91,20 +90,29 @@ apiserver:
|
||||
- /api/v1/version
|
||||
- /
|
||||
|
||||
|
||||
##################### TelemetryStore #####################
|
||||
telemetrystore:
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
# Maximum number of idle connections in the connection pool.
|
||||
max_idle_conns: 50
|
||||
# Maximum number of open connections to the database.
|
||||
max_open_conns: 100
|
||||
# Maximum time to wait for a connection to be established.
|
||||
dial_timeout: 5s
|
||||
# Specifies the telemetrystore provider to use.
|
||||
provider: clickhouse
|
||||
clickhouse:
|
||||
# The DSN to use for ClickHouse.
|
||||
dsn: http://localhost:9000
|
||||
# The DSN to use for clickhouse.
|
||||
dsn: tcp://localhost:9000
|
||||
|
||||
##################### Prometheus #####################
|
||||
prometheus:
|
||||
active_query_tracker:
|
||||
# Whether to enable the active query tracker.
|
||||
enabled: true
|
||||
# The path to use for the active query tracker.
|
||||
path: ""
|
||||
# The maximum number of concurrent queries.
|
||||
max_concurrent: 20
|
||||
|
||||
##################### Alertmanager #####################
|
||||
alertmanager:
|
||||
@ -117,7 +125,7 @@ alertmanager:
|
||||
# The poll interval for periodically syncing the alertmanager with the config in the store.
|
||||
poll_interval: 1m
|
||||
# The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself.
|
||||
external_url: http://localhost:9093
|
||||
external_url: http://localhost:8080
|
||||
# The global configuration for the alertmanager. All the exahustive fields can be found in the upstream: https://github.com/prometheus/alertmanager/blob/efa05feffd644ba4accb526e98a8c6545d26a783/config/config.go#L833
|
||||
global:
|
||||
# ResolveTimeout is the time after which an alert is declared resolved if it has not been updated.
|
||||
|
@ -8,8 +8,10 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
basechr "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
)
|
||||
|
||||
type ClickhouseReader struct {
|
||||
@ -20,8 +22,8 @@ type ClickhouseReader struct {
|
||||
|
||||
func NewDataConnector(
|
||||
localDB *sqlx.DB,
|
||||
ch clickhouse.Conn,
|
||||
promConfigPath string,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
lm interfaces.FeatureLookup,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
@ -29,14 +31,10 @@ func NewDataConnector(
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickhouseReader {
|
||||
chReader := basechr.NewReader(localDB, ch, promConfigPath, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
chReader := basechr.NewReader(localDB, telemetryStore, prometheus, lm, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return &ClickhouseReader{
|
||||
conn: ch,
|
||||
conn: telemetryStore.ClickhouseDB(),
|
||||
appdb: localDB,
|
||||
ClickHouseReader: chReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickhouseReader) Start(readerReady chan bool) {
|
||||
r.ClickHouseReader.Start(readerReady)
|
||||
}
|
||||
|
@ -18,13 +18,14 @@ import (
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/ee/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
@ -49,7 +50,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
pqle "github.com/SigNoz/signoz/pkg/query-service/pqlEngine"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
@ -137,18 +137,16 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
|
||||
// set license manager as feature flag provider in dao
|
||||
modelDao.SetFlagProvider(lm)
|
||||
readerReady := make(chan bool)
|
||||
|
||||
fluxIntervalForTraceDetail, err := time.ParseDuration(serverOptions.FluxIntervalForTraceDetail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reader interfaces.DataConnector
|
||||
qb := db.NewDataConnector(
|
||||
reader := db.NewDataConnector(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
lm,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
@ -156,8 +154,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go qb.Start(readerReady)
|
||||
reader = qb
|
||||
|
||||
skipConfig := &basemodel.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
@ -176,9 +172,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
c = cache.NewCache(cacheOpts)
|
||||
}
|
||||
|
||||
<-readerReady
|
||||
rm, err := makeRulesManager(
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.RuleRepoURL,
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
reader,
|
||||
@ -189,6 +183,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.UseTraceNewSchema,
|
||||
serverOptions.SigNoz.Alertmanager,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@ -233,7 +229,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickHouseDB(), serverOptions.Config.TelemetryStore.ClickHouse.DSN)
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.Config.TelemetryStore.Clickhouse.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -304,7 +300,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
&opAmpModel.AllAgents, agentConfMgr,
|
||||
)
|
||||
|
||||
errorList := qb.PreloadMetricsMetadata(context.Background())
|
||||
errorList := reader.PreloadMetricsMetadata(context.Background())
|
||||
for _, er := range errorList {
|
||||
zap.L().Error("failed to preload metrics metadata", zap.Error(er))
|
||||
}
|
||||
@ -537,7 +533,6 @@ func (s *Server) Stop() error {
|
||||
}
|
||||
|
||||
func makeRulesManager(
|
||||
promConfigPath,
|
||||
ruleRepoURL string,
|
||||
db *sqlx.DB,
|
||||
ch baseint.Reader,
|
||||
@ -548,16 +543,13 @@ func makeRulesManager(
|
||||
useTraceNewSchema bool,
|
||||
alertmanager alertmanager.Alertmanager,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
) (*baserules.Manager, error) {
|
||||
// create engine
|
||||
pqle, err := pqle.FromConfigPath(promConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
PqlEngine: pqle,
|
||||
TelemetryStore: telemetryStore,
|
||||
Prometheus: prometheus,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
|
@ -7,6 +7,5 @@ import (
|
||||
// Connector defines methods for interaction
|
||||
// with o11y data. for example - clickhouse
|
||||
type DataConnector interface {
|
||||
Start(readerReady chan bool)
|
||||
baseint.Reader
|
||||
}
|
||||
|
@ -18,8 +18,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
@ -32,10 +30,6 @@ func initZapLog() *zap.Logger {
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
@ -89,6 +83,7 @@ func main() {
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
Config: promConfigPath,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
@ -110,7 +105,7 @@ func main() {
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
|
@ -48,7 +48,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.Rule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
|
||||
@ -145,7 +145,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
parsedRule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
|
69
go.mod
69
go.mod
@ -1,8 +1,8 @@
|
||||
module github.com/SigNoz/signoz
|
||||
|
||||
go 1.22.0
|
||||
go 1.22.7
|
||||
|
||||
toolchain go1.22.7
|
||||
toolchain go1.22.11
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1
|
||||
@ -16,7 +16,6 @@ require (
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-co-op/gocron v1.30.1
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-openapi/runtime v0.28.0
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
@ -36,7 +35,6 @@ require (
|
||||
github.com/knadh/koanf/v2 v2.1.1
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/oklog/oklog v0.3.2
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.111.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
@ -45,7 +43,7 @@ require (
|
||||
github.com/prometheus/alertmanager v0.28.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/prometheus v2.5.0+incompatible
|
||||
github.com/prometheus/prometheus v0.300.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/russellhaering/gosaml2 v0.9.0
|
||||
github.com/russellhaering/goxmldsig v1.2.0
|
||||
@ -63,7 +61,7 @@ require (
|
||||
go.opentelemetry.io/collector/pdata v1.17.0
|
||||
go.opentelemetry.io/collector/processor v0.111.0
|
||||
go.opentelemetry.io/contrib/config v0.10.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
|
||||
go.opentelemetry.io/otel v1.34.0
|
||||
go.opentelemetry.io/otel/metric v1.34.0
|
||||
go.opentelemetry.io/otel/sdk v1.34.0
|
||||
@ -75,27 +73,30 @@ require (
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/text v0.21.0
|
||||
google.golang.org/protobuf v1.35.2
|
||||
google.golang.org/protobuf v1.36.0
|
||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.31.1
|
||||
honnef.co/go/tools v0.0.1-2020.1.4
|
||||
k8s.io/apimachinery v0.31.3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.9.5 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
|
||||
cloud.google.com/go/auth v0.13.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
@ -107,16 +108,15 @@ require (
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/elastic/lunes v0.1.0 // indirect
|
||||
github.com/expr-lang/expr v1.16.9 // indirect
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.7.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
@ -132,16 +132,17 @@ require (
|
||||
github.com/gofrs/uuid v4.4.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gojek/valkyrie v0.0.0-20180215180059-6aee720afcdf // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gosimple/unidecode v1.0.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect
|
||||
@ -162,7 +163,7 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.10 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.2.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
|
||||
@ -201,7 +202,6 @@ require (
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
@ -219,7 +219,6 @@ require (
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.1 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/component v0.111.0 // indirect
|
||||
@ -248,38 +247,38 @@ require (
|
||||
go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.111.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 // indirect
|
||||
go.opentelemetry.io/collector/service v0.111.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.1 // indirect
|
||||
google.golang.org/api v0.199.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
google.golang.org/api v0.213.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.69.0 // indirect
|
||||
gopkg.in/telebot.v3 v3.3.8 // indirect
|
||||
k8s.io/client-go v0.31.1 // indirect
|
||||
k8s.io/client-go v0.31.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
)
|
||||
|
||||
replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.13.0
|
||||
|
155
go.sum
155
go.sum
@ -29,10 +29,10 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
|
||||
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
|
||||
cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
@ -44,8 +44,8 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
|
||||
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
|
||||
@ -67,18 +67,23 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.4 h1:iLRwjzz1mWmUEf5UNrSYOceQ+PX9SdBJ8Xw0DNrL114=
|
||||
github.com/AfterShip/clickhouse-sql-parser v0.4.4/go.mod h1:W0Z82wJWkJxz2RVun/RMwxue3g7ut47Xxl+SFqdJGus=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4=
|
||||
@ -96,8 +101,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkbj57eGXx8H3ZJ4zhmQXBnrW523ktj8=
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
|
||||
github.com/SigNoz/prometheus v1.13.0 h1:hsUql1zd83ifXtswO9Qk1rpCgVjE/ItQvgdNocBSqRI=
|
||||
github.com/SigNoz/prometheus v1.13.0/go.mod h1:4PC0dxmx6y3kNI2d9oOTvEFTPkH6QnxDxERyqeL1hvI=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
|
||||
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
@ -170,8 +173,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs=
|
||||
github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/coder/quartz v0.1.2 h1:PVhc9sJimTdKd3VbygXtS4826EOCpB1fXoRlLnCrE+s=
|
||||
github.com/coder/quartz v0.1.2/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
@ -190,12 +193,12 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg=
|
||||
github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
|
||||
github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8=
|
||||
github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v27.3.0+incompatible h1:BNb1QY6o4JdKpqwi9IB+HUYcRRrVN4aGFUTvDmWYK1A=
|
||||
github.com/docker/docker v27.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@ -205,8 +208,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
|
||||
github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
|
||||
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4=
|
||||
github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4=
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21 h1:OJyUGMJTzHTd1XQp98QTaHernxMYzRaOasRir9hUlFQ=
|
||||
@ -224,8 +227,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||
@ -247,8 +250,8 @@ github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUork
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@ -266,14 +269,10 @@ github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JS
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@ -341,7 +340,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
@ -437,9 +435,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
|
||||
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
|
||||
github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
@ -457,8 +457,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
|
||||
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
|
||||
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
|
||||
@ -585,12 +585,14 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
||||
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs=
|
||||
github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs=
|
||||
github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM=
|
||||
@ -621,8 +623,8 @@ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI=
|
||||
github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM=
|
||||
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
|
||||
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74 h1:1KuuSOy4ZNgW0KA2oYIngXVFhQcXxhLqCVK7cBcldkk=
|
||||
github.com/lufia/plan9stats v0.0.0-20240408141607-282e7b5d6b74/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
|
||||
@ -700,8 +702,6 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/oklog v0.3.2 h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||
@ -808,9 +808,13 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.300.1 h1:9KKcTTq80gkzmXW0Et/QCFSrBPgmwiS3Hlcxc6o8KlM=
|
||||
github.com/prometheus/prometheus v0.300.1/go.mod h1:gtTPY/XVyCdqqnjA3NzDMb0/nc5H9hOu1RMame+gHyM=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
@ -859,8 +863,6 @@ github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92/go.mod h1:7/OT02F6
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
||||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
@ -896,7 +898,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
@ -966,8 +967,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo=
|
||||
@ -1050,14 +1049,16 @@ go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6t
|
||||
go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g=
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw=
|
||||
go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM=
|
||||
go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw=
|
||||
go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
|
||||
go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms=
|
||||
go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY=
|
||||
go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c=
|
||||
go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho=
|
||||
go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY=
|
||||
@ -1070,12 +1071,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:Wyp
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE=
|
||||
@ -1092,13 +1093,13 @@ go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7W
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
@ -1340,7 +1341,6 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@ -1363,8 +1363,8 @@ golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@ -1471,8 +1471,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
|
||||
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
|
||||
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
|
||||
google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ=
|
||||
google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -1559,10 +1559,10 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@ -1595,8 +1595,8 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@ -1612,8 +1612,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -1653,13 +1653,14 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
||||
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
||||
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
||||
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
||||
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
||||
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
|
||||
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
|
||||
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
|
||||
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
|
||||
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
|
270
pkg/prometheus/clickhouseprometheus/client.go
Normal file
270
pkg/prometheus/clickhouseprometheus/client.go
Normal file
@ -0,0 +1,270 @@
|
||||
package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
}
|
||||
|
||||
func NewReadClient(settings factory.ScopedProviderSettings, telemetryStore telemetrystore.TelemetryStore) remote.ReadClient {
|
||||
return &client{
|
||||
settings: settings,
|
||||
telemetryStore: telemetryStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
|
||||
if len(query.Matchers) == 2 {
|
||||
var hasJob bool
|
||||
var queryString string
|
||||
for _, m := range query.Matchers {
|
||||
if m.Type == prompb.LabelMatcher_EQ && m.Name == "job" && m.Value == "rawsql" {
|
||||
hasJob = true
|
||||
}
|
||||
if m.Type == prompb.LabelMatcher_EQ && m.Name == "query" {
|
||||
queryString = m.Value
|
||||
}
|
||||
}
|
||||
|
||||
if hasJob && queryString != "" {
|
||||
res, err := client.queryRaw(ctx, queryString, int64(query.EndTimestampMs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return remote.FromQueryResult(sortSeries, res), nil
|
||||
}
|
||||
}
|
||||
|
||||
var metricName string
|
||||
for _, matcher := range query.Matchers {
|
||||
if matcher.Name == "__name__" {
|
||||
metricName = matcher.Value
|
||||
}
|
||||
}
|
||||
|
||||
clickhouseQuery, args, err := client.queryToClickhouseQuery(ctx, query, metricName, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fingerprints, err := client.getFingerprintsFromClickhouseQuery(ctx, clickhouseQuery, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(fingerprints) == 0 {
|
||||
return remote.FromQueryResult(sortSeries, new(prompb.QueryResult)), nil
|
||||
}
|
||||
|
||||
clickhouseSubQuery, args, err := client.queryToClickhouseQuery(ctx, query, metricName, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := new(prompb.QueryResult)
|
||||
timeseries, err := client.querySamples(ctx, int64(query.StartTimestampMs), int64(query.EndTimestampMs), fingerprints, metricName, clickhouseSubQuery, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.Timeseries = timeseries
|
||||
|
||||
return remote.FromQueryResult(sortSeries, res), nil
|
||||
}
|
||||
|
||||
func (client *client) queryToClickhouseQuery(_ context.Context, query *prompb.Query, metricName string, subQuery bool) (string, []any, error) {
|
||||
var clickHouseQuery string
|
||||
var conditions []string
|
||||
var argCount int = 0
|
||||
var selectString string = "fingerprint, any(labels)"
|
||||
if subQuery {
|
||||
argCount = 1
|
||||
selectString = "fingerprint"
|
||||
}
|
||||
|
||||
start, end, tableName := getStartAndEndAndTableName(query.StartTimestampMs, query.EndTimestampMs)
|
||||
|
||||
var args []any
|
||||
conditions = append(conditions, fmt.Sprintf("metric_name = $%d", argCount+1))
|
||||
conditions = append(conditions, "temporality IN ['Cumulative', 'Unspecified']")
|
||||
conditions = append(conditions, "__normalized = true")
|
||||
conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end))
|
||||
|
||||
args = append(args, metricName)
|
||||
for _, m := range query.Matchers {
|
||||
switch m.Type {
|
||||
case prompb.LabelMatcher_EQ:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, $%d) = $%d", argCount+2, argCount+3))
|
||||
case prompb.LabelMatcher_NEQ:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, $%d) != $%d", argCount+2, argCount+3))
|
||||
case prompb.LabelMatcher_RE:
|
||||
conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, $%d), $%d)", argCount+2, argCount+3))
|
||||
case prompb.LabelMatcher_NRE:
|
||||
conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, $%d), $%d)", argCount+2, argCount+3))
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unexpected matcher found in query: %s", m.Type.String())
|
||||
}
|
||||
args = append(args, m.Name, m.Value)
|
||||
argCount += 2
|
||||
}
|
||||
|
||||
whereClause := strings.Join(conditions, " AND ")
|
||||
|
||||
clickHouseQuery = fmt.Sprintf(`SELECT %s FROM %s.%s WHERE %s GROUP BY fingerprint`, selectString, databaseName, tableName, whereClause)
|
||||
|
||||
return clickHouseQuery, args, nil
|
||||
}
|
||||
|
||||
func (client *client) getFingerprintsFromClickhouseQuery(ctx context.Context, query string, args []any) (map[uint64][]prompb.Label, error) {
|
||||
rows, err := client.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
fingerprints := make(map[uint64][]prompb.Label)
|
||||
|
||||
var fingerprint uint64
|
||||
var labelString string
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&fingerprint, &labelString); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels, _, err := unmarshalLabels(labelString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fingerprints[fingerprint] = labels
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fingerprints, nil
|
||||
}
|
||||
|
||||
func (client *client) querySamples(ctx context.Context, start int64, end int64, fingerprints map[uint64][]prompb.Label, metricName string, subQuery string, args []any) ([]*prompb.TimeSeries, error) {
|
||||
argCount := len(args)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT metric_name, fingerprint, unix_milli, value
|
||||
FROM %s.%s
|
||||
WHERE metric_name = $1 AND fingerprint GLOBAL IN (%s) AND unix_milli >= $%s AND unix_milli <= $%s ORDER BY fingerprint, unix_milli;`,
|
||||
databaseName, distributedSamplesV4, subQuery, strconv.Itoa(argCount+2), strconv.Itoa(argCount+3))
|
||||
query = strings.TrimSpace(query)
|
||||
|
||||
allArgs := append([]any{metricName}, args...)
|
||||
allArgs = append(allArgs, start, end)
|
||||
|
||||
rows, err := client.telemetryStore.ClickhouseDB().Query(ctx, query, allArgs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var res []*prompb.TimeSeries
|
||||
var ts *prompb.TimeSeries
|
||||
var fingerprint, prevFingerprint uint64
|
||||
var timestampMs int64
|
||||
var value float64
|
||||
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&metricName, &fingerprint, ×tampMs, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// collect samples in time series
|
||||
if fingerprint != prevFingerprint {
|
||||
// add collected time series to result
|
||||
prevFingerprint = fingerprint
|
||||
if ts != nil {
|
||||
res = append(res, ts)
|
||||
}
|
||||
|
||||
labels := fingerprints[fingerprint]
|
||||
ts = &prompb.TimeSeries{
|
||||
Labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// add samples to current time series
|
||||
ts.Samples = append(ts.Samples, prompb.Sample{
|
||||
Timestamp: timestampMs,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
// add last time series
|
||||
if ts != nil {
|
||||
res = append(res, ts)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (client *client) queryRaw(ctx context.Context, query string, ts int64) (*prompb.QueryResult, error) {
|
||||
rows, err := client.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns := rows.Columns()
|
||||
var res prompb.QueryResult
|
||||
targets := make([]any, len(columns))
|
||||
for i := range targets {
|
||||
targets[i] = new(scanner)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(targets...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels := make([]prompb.Label, 0, len(columns))
|
||||
var value float64
|
||||
for i, c := range columns {
|
||||
v := targets[i].(*scanner)
|
||||
switch c {
|
||||
case "value":
|
||||
value = v.f
|
||||
default:
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: c,
|
||||
Value: v.s,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
res.Timeseries = append(res.Timeseries, &prompb.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{{
|
||||
Value: value,
|
||||
Timestamp: ts,
|
||||
}},
|
||||
})
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
27
pkg/prometheus/clickhouseprometheus/json.go
Normal file
27
pkg/prometheus/clickhouseprometheus/json.go
Normal file
@ -0,0 +1,27 @@
|
||||
package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
// Unmarshals JSON into Prometheus labels. It does not preserve order.
|
||||
func unmarshalLabels(s string) ([]prompb.Label, string, error) {
|
||||
var metricName string
|
||||
m := make(map[string]string)
|
||||
if err := json.Unmarshal([]byte(s), &m); err != nil {
|
||||
return nil, metricName, err
|
||||
}
|
||||
res := make([]prompb.Label, 0, len(m))
|
||||
for n, v := range m {
|
||||
if n == "__name__" {
|
||||
metricName = v
|
||||
}
|
||||
res = append(res, prompb.Label{
|
||||
Name: n,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
return res, metricName, nil
|
||||
}
|
60
pkg/prometheus/clickhouseprometheus/provider.go
Normal file
60
pkg/prometheus/clickhouseprometheus/provider.go
Normal file
@ -0,0 +1,60 @@
|
||||
package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
)
|
||||
|
||||
var stCallback = func() (int64, error) {
|
||||
return int64(model.Latest), nil
|
||||
}
|
||||
|
||||
type provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
engine *prometheus.Engine
|
||||
queryable storage.SampleAndChunkQueryable
|
||||
}
|
||||
|
||||
func NewFactory(telemetryStore telemetrystore.TelemetryStore) factory.ProviderFactory[prometheus.Prometheus, prometheus.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("clickhouse"), func(ctx context.Context, providerSettings factory.ProviderSettings, config prometheus.Config) (prometheus.Prometheus, error) {
|
||||
return New(ctx, providerSettings, config, telemetryStore)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config prometheus.Config, telemetryStore telemetrystore.TelemetryStore) (prometheus.Prometheus, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/prometheus/clickhouseprometheus")
|
||||
|
||||
readClient := NewReadClient(settings, telemetryStore)
|
||||
|
||||
return &provider{
|
||||
settings: settings,
|
||||
telemetryStore: telemetryStore,
|
||||
engine: prometheus.NewEngine(settings.Logger(), config),
|
||||
queryable: remote.NewSampleAndChunkQueryableClient(readClient, labels.EmptyLabels(), []*labels.Matcher{}, false, stCallback),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Engine() *prometheus.Engine {
|
||||
return provider.engine
|
||||
}
|
||||
|
||||
func (provider *provider) Storage() storage.Queryable {
|
||||
return provider
|
||||
}
|
||||
|
||||
func (provider *provider) Querier(mint, maxt int64) (storage.Querier, error) {
|
||||
querier, err := provider.queryable.Querier(mint, maxt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storage.NewMergeQuerier(nil, []storage.Querier{querier}, storage.ChainedSeriesMerge), nil
|
||||
}
|
31
pkg/prometheus/clickhouseprometheus/scanner.go
Normal file
31
pkg/prometheus/clickhouseprometheus/scanner.go
Normal file
@ -0,0 +1,31 @@
|
||||
package clickhouseprometheus
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ sql.Scanner = (*scanner)(nil)
|
||||
|
||||
type scanner struct {
|
||||
f float64
|
||||
s string
|
||||
}
|
||||
|
||||
func (s *scanner) Scan(val any) error {
|
||||
s.f = 0
|
||||
s.s = ""
|
||||
|
||||
s.s = fmt.Sprintf("%v", val)
|
||||
switch val := val.(type) {
|
||||
case int64:
|
||||
s.f = float64(val)
|
||||
case uint64:
|
||||
s.f = float64(val)
|
||||
case float64:
|
||||
s.f = val
|
||||
case []byte:
|
||||
s.s = string(val)
|
||||
}
|
||||
return nil
|
||||
}
|
41
pkg/prometheus/clickhouseprometheus/table.go
Normal file
41
pkg/prometheus/clickhouseprometheus/table.go
Normal file
@ -0,0 +1,41 @@
|
||||
package clickhouseprometheus
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
databaseName string = "signoz_metrics"
|
||||
distributedTimeSeriesV4 string = "distributed_time_series_v4"
|
||||
distributedTimeSeriesV46hrs string = "distributed_time_series_v4_6hrs"
|
||||
distributedTimeSeriesV41day string = "distributed_time_series_v4_1day"
|
||||
distributedSamplesV4 string = "distributed_samples_v4"
|
||||
)
|
||||
|
||||
var (
|
||||
sixHoursInMilliseconds = time.Hour.Milliseconds() * 6
|
||||
oneDayInMilliseconds = time.Hour.Milliseconds() * 24
|
||||
)
|
||||
|
||||
// Returns the start time, end time and the table name to use for the query.
|
||||
//
|
||||
// If time range is less than 6 hours, we need to use the `time_series_v4` table
|
||||
// else if time range is less than 1 day and greater than 6 hours, we need to use the `time_series_v4_6hrs` table
|
||||
// else we need to use the `time_series_v4_1day` table
|
||||
func getStartAndEndAndTableName(start, end int64) (int64, int64, string) {
|
||||
var tableName string
|
||||
|
||||
if end-start <= sixHoursInMilliseconds {
|
||||
// adjust the start time to nearest 1 hour
|
||||
start = start - (start % (time.Hour.Milliseconds() * 1))
|
||||
tableName = distributedTimeSeriesV4
|
||||
} else if end-start <= oneDayInMilliseconds {
|
||||
// adjust the start time to nearest 6 hours
|
||||
start = start - (start % (time.Hour.Milliseconds() * 6))
|
||||
tableName = distributedTimeSeriesV46hrs
|
||||
} else {
|
||||
// adjust the start time to nearest 1 day
|
||||
start = start - (start % (time.Hour.Milliseconds() * 24))
|
||||
tableName = distributedTimeSeriesV41day
|
||||
}
|
||||
|
||||
return start, end, tableName
|
||||
}
|
35
pkg/prometheus/config.go
Normal file
35
pkg/prometheus/config.go
Normal file
@ -0,0 +1,35 @@
|
||||
package prometheus
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/factory"
|
||||
|
||||
type ActiveQueryTrackerConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
Path string `mapstructure:"path"`
|
||||
MaxConcurrent int `mapstructure:"max_concurrent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
ActiveQueryTrackerConfig ActiveQueryTrackerConfig `mapstructure:"active_query_tracker"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("prometheus"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return Config{
|
||||
ActiveQueryTrackerConfig: ActiveQueryTrackerConfig{
|
||||
Enabled: true,
|
||||
Path: "",
|
||||
MaxConcurrent: 20,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Config) Provider() string {
|
||||
return "clickhouse"
|
||||
}
|
33
pkg/prometheus/engine.go
Normal file
33
pkg/prometheus/engine.go
Normal file
@ -0,0 +1,33 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
)
|
||||
|
||||
func NewEngine(logger *slog.Logger, cfg Config) *Engine {
|
||||
var activeQueryTracker promql.QueryTracker
|
||||
if cfg.ActiveQueryTrackerConfig.Enabled {
|
||||
activeQueryTracker = promql.NewActiveQueryTracker(
|
||||
cfg.ActiveQueryTrackerConfig.Path,
|
||||
cfg.ActiveQueryTrackerConfig.MaxConcurrent,
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
return promql.NewEngine(promql.EngineOpts{
|
||||
Logger: logger,
|
||||
Reg: nil,
|
||||
MaxSamples: 50000000,
|
||||
Timeout: time.Duration(2 * time.Minute),
|
||||
ActiveQueryTracker: activeQueryTracker,
|
||||
})
|
||||
}
|
||||
|
||||
// init initializes the prometheus model with UTF8 validation
|
||||
func init() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
13
pkg/prometheus/prometheus.go
Normal file
13
pkg/prometheus/prometheus.go
Normal file
@ -0,0 +1,13 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
type Engine = promql.Engine
|
||||
|
||||
type Prometheus interface {
|
||||
Engine() *Engine
|
||||
Storage() storage.Queryable
|
||||
}
|
69
pkg/prometheus/prometheustest/provider.go
Normal file
69
pkg/prometheus/prometheustest/provider.go
Normal file
@ -0,0 +1,69 @@
|
||||
package prometheustest
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
var _ prometheus.Prometheus = (*Provider)(nil)
|
||||
|
||||
type Provider struct {
|
||||
db *tsdb.DB
|
||||
dir string
|
||||
engine *prometheus.Engine
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, cfg prometheus.Config, outOfOrderTimeWindow ...int64) *Provider {
|
||||
dir, err := os.MkdirTemp("", "test_storage")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Tests just load data for a series sequentially. Thus we
|
||||
// need a long appendable window.
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.RetentionDuration = 0
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
// Set OutOfOrderTimeWindow if provided, otherwise use default (0)
|
||||
if len(outOfOrderTimeWindow) > 0 {
|
||||
opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0]
|
||||
} else {
|
||||
opts.OutOfOrderTimeWindow = 0 // Default value is zero
|
||||
}
|
||||
|
||||
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
engine := prometheus.NewEngine(logger, cfg)
|
||||
|
||||
return &Provider{
|
||||
db: db,
|
||||
dir: dir,
|
||||
engine: engine,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) Engine() *prometheus.Engine {
|
||||
return provider.engine
|
||||
}
|
||||
|
||||
func (provider *Provider) Storage() storage.Queryable {
|
||||
return provider.db
|
||||
}
|
||||
|
||||
func (provider *Provider) Close() error {
|
||||
if err := provider.db.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(provider.dir)
|
||||
}
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
@ -16,20 +15,16 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model/metrics_explorer"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"honnef.co/go/tools/config"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/oklog/oklog/pkg/group"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/promlog"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
@ -38,7 +33,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
promModel "github.com/prometheus/common/model"
|
||||
"go.uber.org/zap"
|
||||
|
||||
queryprogress "github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader/query_progress"
|
||||
@ -120,6 +114,7 @@ var (
|
||||
// SpanWriter for reading spans from ClickHouse
|
||||
type ClickHouseReader struct {
|
||||
db clickhouse.Conn
|
||||
prometheus prometheus.Prometheus
|
||||
localDB *sqlx.DB
|
||||
TraceDB string
|
||||
operationsTable string
|
||||
@ -138,9 +133,6 @@ type ClickHouseReader struct {
|
||||
logsAttributeKeys string
|
||||
logsResourceKeys string
|
||||
logsTagAttributeTableV2 string
|
||||
queryEngine *promql.Engine
|
||||
remoteStorage *remote.Storage
|
||||
fanoutStorage *storage.Storage
|
||||
queryProgressTracker queryprogress.QueryProgressTracker
|
||||
|
||||
logsTableV2 string
|
||||
@ -175,8 +167,8 @@ type ClickHouseReader struct {
|
||||
// NewTraceReader returns a TraceReader for the database
|
||||
func NewReader(
|
||||
localDB *sqlx.DB,
|
||||
db driver.Conn,
|
||||
configFile string,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
featureFlag interfaces.FeatureLookup,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
@ -185,14 +177,14 @@ func NewReader(
|
||||
cache cache.Cache,
|
||||
) *ClickHouseReader {
|
||||
options := NewOptions(primaryNamespace, archiveNamespace)
|
||||
return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
return NewReaderFromClickhouseConnection(options, localDB, telemetryStore, prometheus, featureFlag, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
|
||||
}
|
||||
|
||||
func NewReaderFromClickhouseConnection(
|
||||
db driver.Conn,
|
||||
options *Options,
|
||||
localDB *sqlx.DB,
|
||||
configFile string,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
featureFlag interfaces.FeatureLookup,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
@ -215,7 +207,8 @@ func NewReaderFromClickhouseConnection(
|
||||
}
|
||||
|
||||
return &ClickHouseReader{
|
||||
db: db,
|
||||
db: telemetryStore.ClickhouseDB(),
|
||||
prometheus: prometheus,
|
||||
localDB: localDB,
|
||||
TraceDB: options.primary.TraceDB,
|
||||
operationsTable: options.primary.OperationsTable,
|
||||
@ -235,7 +228,6 @@ func NewReaderFromClickhouseConnection(
|
||||
logsResourceKeys: options.primary.LogsResourceKeysTable,
|
||||
logsTagAttributeTableV2: options.primary.LogsTagAttributeTableV2,
|
||||
liveTailRefreshSeconds: options.primary.LiveTailRefreshSeconds,
|
||||
promConfigFile: configFile,
|
||||
featureFlags: featureFlag,
|
||||
cluster: cluster,
|
||||
queryProgressTracker: queryprogress.NewQueryProgressTracker(),
|
||||
@ -262,154 +254,8 @@ func NewReaderFromClickhouseConnection(
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) Start(readerReady chan bool) {
|
||||
logLevel := promlog.AllowedLevel{}
|
||||
logLevel.Set("debug")
|
||||
allowedFormat := promlog.AllowedFormat{}
|
||||
allowedFormat.Set("logfmt")
|
||||
|
||||
promlogConfig := promlog.Config{
|
||||
Level: &logLevel,
|
||||
Format: &allowedFormat,
|
||||
}
|
||||
|
||||
logger := promlog.New(&promlogConfig)
|
||||
|
||||
startTime := func() (int64, error) {
|
||||
return int64(promModel.Latest), nil
|
||||
}
|
||||
|
||||
remoteStorage := remote.NewStorage(
|
||||
log.With(logger, "component", "remote"),
|
||||
nil,
|
||||
startTime,
|
||||
"",
|
||||
time.Duration(1*time.Minute),
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
||||
cfg := struct {
|
||||
configFile string
|
||||
|
||||
localStoragePath string
|
||||
lookbackDelta promModel.Duration
|
||||
webTimeout promModel.Duration
|
||||
queryTimeout promModel.Duration
|
||||
queryConcurrency int
|
||||
queryMaxSamples int
|
||||
RemoteFlushDeadline promModel.Duration
|
||||
|
||||
prometheusURL string
|
||||
|
||||
logLevel promlog.AllowedLevel
|
||||
}{
|
||||
configFile: r.promConfigFile,
|
||||
}
|
||||
|
||||
fanoutStorage := storage.NewFanout(logger, remoteStorage)
|
||||
|
||||
opts := promql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "query engine"),
|
||||
Reg: nil,
|
||||
MaxSamples: 50000000,
|
||||
Timeout: time.Duration(2 * time.Minute),
|
||||
ActiveQueryTracker: promql.NewActiveQueryTracker(
|
||||
"",
|
||||
20,
|
||||
log.With(logger, "component", "activeQueryTracker"),
|
||||
),
|
||||
}
|
||||
|
||||
queryEngine := promql.NewEngine(opts)
|
||||
|
||||
reloaders := []func(cfg *config.Config) error{
|
||||
remoteStorage.ApplyConfig,
|
||||
}
|
||||
|
||||
// sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded).
|
||||
type closeOnce struct {
|
||||
C chan struct{}
|
||||
once sync.Once
|
||||
Close func()
|
||||
}
|
||||
// Wait until the server is ready to handle reloading.
|
||||
reloadReady := &closeOnce{
|
||||
C: make(chan struct{}),
|
||||
}
|
||||
reloadReady.Close = func() {
|
||||
reloadReady.once.Do(func() {
|
||||
close(reloadReady.C)
|
||||
})
|
||||
}
|
||||
|
||||
var g group.Group
|
||||
{
|
||||
// Initial configuration loading.
|
||||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
var err error
|
||||
r.promConfig, err = reloadConfig(cfg.configFile, logger, reloaders...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading config from %q: %s", cfg.configFile, err)
|
||||
}
|
||||
|
||||
reloadReady.Close()
|
||||
|
||||
<-cancel
|
||||
|
||||
return nil
|
||||
},
|
||||
func(err error) {
|
||||
close(cancel)
|
||||
},
|
||||
)
|
||||
}
|
||||
r.queryEngine = queryEngine
|
||||
r.remoteStorage = remoteStorage
|
||||
r.fanoutStorage = &fanoutStorage
|
||||
readerReady <- true
|
||||
|
||||
if err := g.Run(); err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetQueryEngine() *promql.Engine {
|
||||
return r.queryEngine
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetFanoutStorage() *storage.Storage {
|
||||
return r.fanoutStorage
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (promConfig *config.Config, err error) {
|
||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||
|
||||
conf, err := config.LoadFile(filename, false, false, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration (--config.file=%q): %v", filename, err)
|
||||
}
|
||||
|
||||
failed := false
|
||||
for _, rl := range rls {
|
||||
if err := rl(conf); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return nil, fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
}
|
||||
level.Info(logger).Log("msg", "Completed loading of configuration file", "filename", filename)
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetInstantQueryMetricsResult(ctx context.Context, queryParams *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
|
||||
qry, err := r.queryEngine.NewInstantQuery(ctx, r.remoteStorage, nil, queryParams.Query, queryParams.Time)
|
||||
qry, err := r.prometheus.Engine().NewInstantQuery(ctx, r.prometheus.Storage(), nil, queryParams.Query, queryParams.Time)
|
||||
if err != nil {
|
||||
return nil, nil, &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
}
|
||||
@ -428,7 +274,7 @@ func (r *ClickHouseReader) GetInstantQueryMetricsResult(ctx context.Context, que
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
|
||||
qry, err := r.queryEngine.NewRangeQuery(ctx, r.remoteStorage, nil, query.Query, query.Start, query.End, query.Step)
|
||||
qry, err := r.prometheus.Engine().NewRangeQuery(ctx, r.prometheus.Storage(), nil, query.Query, query.Start, query.End, query.Step)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/websocket"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
|
@ -5,11 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
||||
@ -18,6 +21,8 @@ import (
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/querycache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -1131,20 +1136,6 @@ func TestQueryRangeValueTypePromQL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
params := &v3.QueryRangeParamsV3{
|
||||
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
|
||||
@ -1358,8 +1349,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Setup mock
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, ®exMatcher{})
|
||||
require.NoError(t, err, "Failed to create ClickHouse mock")
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherRegexp)
|
||||
|
||||
// Configure mock responses
|
||||
for _, response := range tc.queryResponses {
|
||||
@ -1368,7 +1358,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
values = append(values, []any{&ts, &testName})
|
||||
}
|
||||
// if len(values) > 0 {
|
||||
mock.ExpectQuery(response.expectedQuery).WillReturnRows(
|
||||
telemetryStore.Mock().ExpectQuery(response.expectedQuery).WillReturnRows(
|
||||
cmock.NewRows(cols, values),
|
||||
)
|
||||
// }
|
||||
@ -1376,10 +1366,10 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
|
||||
// Create reader and querier
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(
|
||||
mock,
|
||||
options,
|
||||
nil,
|
||||
"",
|
||||
telemetryStore,
|
||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||
featureManager.StartManager(),
|
||||
"",
|
||||
true,
|
||||
@ -1429,7 +1419,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify mock expectations
|
||||
err = mock.ExpectationsWereMet()
|
||||
err = telemetryStore.Mock().ExpectationsWereMet()
|
||||
require.NoError(t, err, "Mock expectations were not met")
|
||||
})
|
||||
}
|
||||
|
@ -5,11 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
|
||||
@ -18,6 +21,8 @@ import (
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/querycache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -1185,20 +1190,6 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
params := &v3.QueryRangeParamsV3{
|
||||
Start: 1722171576000000000, // July 28, 2024 6:29:36 PM
|
||||
@ -1412,8 +1403,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Setup mock
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, ®exMatcher{})
|
||||
require.NoError(t, err, "Failed to create ClickHouse mock")
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherRegexp)
|
||||
|
||||
// Configure mock responses
|
||||
for _, response := range tc.queryResponses {
|
||||
@ -1422,7 +1412,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
values = append(values, []any{&ts, &testName})
|
||||
}
|
||||
// if len(values) > 0 {
|
||||
mock.ExpectQuery(response.expectedQuery).WillReturnRows(
|
||||
telemetryStore.Mock().ExpectQuery(response.expectedQuery).WillReturnRows(
|
||||
cmock.NewRows(cols, values),
|
||||
)
|
||||
// }
|
||||
@ -1430,10 +1420,10 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
|
||||
// Create reader and querier
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(
|
||||
mock,
|
||||
options,
|
||||
nil,
|
||||
"",
|
||||
telemetryStore,
|
||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||
featureManager.StartManager(),
|
||||
"",
|
||||
true,
|
||||
@ -1483,7 +1473,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify mock expectations
|
||||
err = mock.ExpectationsWereMet()
|
||||
err = telemetryStore.Mock().ExpectationsWereMet()
|
||||
require.NoError(t, err, "Mock expectations were not met")
|
||||
})
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/preferences"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
@ -40,7 +42,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
pqle "github.com/SigNoz/signoz/pkg/query-service/pqlEngine"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
@ -119,10 +120,10 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clickhouseReader := clickhouseReader.NewReader(
|
||||
reader := clickhouseReader.NewReader(
|
||||
serverOptions.SigNoz.SQLStore.SQLxDB(),
|
||||
serverOptions.SigNoz.TelemetryStore.ClickHouseDB(),
|
||||
serverOptions.PromConfigPath,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
fm,
|
||||
serverOptions.Cluster,
|
||||
serverOptions.UseLogsNewSchema,
|
||||
@ -130,8 +131,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
fluxIntervalForTraceDetail,
|
||||
serverOptions.SigNoz.Cache,
|
||||
)
|
||||
go clickhouseReader.Start(readerReady)
|
||||
reader := clickhouseReader
|
||||
|
||||
skipConfig := &model.SkipConfig{}
|
||||
if serverOptions.SkipTopLvlOpsPath != "" {
|
||||
@ -162,6 +161,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
serverOptions.UseLogsNewSchema,
|
||||
serverOptions.UseTraceNewSchema,
|
||||
serverOptions.SigNoz.SQLStore,
|
||||
serverOptions.SigNoz.TelemetryStore,
|
||||
serverOptions.SigNoz.Prometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -490,17 +491,13 @@ func makeRulesManager(
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
sqlstore sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
) (*rules.Manager, error) {
|
||||
|
||||
// create engine
|
||||
pqle, err := pqle.FromReader(ch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pql engine : %v", err)
|
||||
}
|
||||
|
||||
// create manager opts
|
||||
managerOpts := &rules.ManagerOptions{
|
||||
PqlEngine: pqle,
|
||||
TelemetryStore: telemetryStore,
|
||||
Prometheus: prometheus,
|
||||
RepoURL: ruleRepoURL,
|
||||
DBConn: db,
|
||||
Context: context.Background(),
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/querycache"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
)
|
||||
|
||||
@ -86,10 +85,6 @@ type Reader interface {
|
||||
req *v3.QBFilterSuggestionsRequest,
|
||||
) (*v3.QBFilterSuggestionsResponse, *model.ApiError)
|
||||
|
||||
// Connection needed for rules, not ideal but required
|
||||
GetQueryEngine() *promql.Engine
|
||||
GetFanoutStorage() *storage.Storage
|
||||
|
||||
QueryDashboardVars(ctx context.Context, query string) (*model.DashboardVar, error)
|
||||
CheckClickHouse(ctx context.Context) error
|
||||
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
@ -29,10 +28,6 @@ func initZapLog() *zap.Logger {
|
||||
return logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
prommodel.NameValidationScheme = prommodel.UTF8Validation
|
||||
}
|
||||
|
||||
func main() {
|
||||
var promConfigPath, skipTopLvlOpsPath string
|
||||
|
||||
@ -85,6 +80,7 @@ func main() {
|
||||
MaxIdleConns: maxIdleConns,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
DialTimeout: dialTimeout,
|
||||
Config: promConfigPath,
|
||||
})
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create config", zap.Error(err))
|
||||
@ -101,7 +97,7 @@ func main() {
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Fatal("Failed to create signoz struct", zap.Error(err))
|
||||
zap.L().Fatal("Failed to create signoz", zap.Error(err))
|
||||
}
|
||||
|
||||
// Read the jwt secret key
|
||||
|
@ -1,126 +0,0 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/go-kit/log"
|
||||
pmodel "github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promlog"
|
||||
pconfig "github.com/prometheus/prometheus/config"
|
||||
pql "github.com/prometheus/prometheus/promql"
|
||||
pstorage "github.com/prometheus/prometheus/storage"
|
||||
premote "github.com/prometheus/prometheus/storage/remote"
|
||||
)
|
||||
|
||||
type PqlEngine struct {
|
||||
engine *pql.Engine
|
||||
fanoutStorage pstorage.Storage
|
||||
}
|
||||
|
||||
func FromConfigPath(promConfigPath string) (*PqlEngine, error) {
|
||||
// load storage path
|
||||
c, err := pconfig.LoadFile(promConfigPath, false, false, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration (--config.file=%q): %v", promConfigPath, err)
|
||||
}
|
||||
|
||||
return NewPqlEngine(c)
|
||||
}
|
||||
|
||||
func FromReader(ch interfaces.Reader) (*PqlEngine, error) {
|
||||
return &PqlEngine{
|
||||
engine: ch.GetQueryEngine(),
|
||||
fanoutStorage: *ch.GetFanoutStorage(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewPqlEngine(config *pconfig.Config) (*PqlEngine, error) {
|
||||
|
||||
logLevel := promlog.AllowedLevel{}
|
||||
_ = logLevel.Set("debug")
|
||||
|
||||
allowedFormat := promlog.AllowedFormat{}
|
||||
_ = allowedFormat.Set("logfmt")
|
||||
|
||||
promlogConfig := promlog.Config{
|
||||
Level: &logLevel,
|
||||
Format: &allowedFormat,
|
||||
}
|
||||
|
||||
logger := promlog.New(&promlogConfig)
|
||||
|
||||
opts := pql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "promql evaluator"),
|
||||
Reg: nil,
|
||||
MaxSamples: 50000000,
|
||||
Timeout: time.Duration(2 * time.Minute),
|
||||
ActiveQueryTracker: pql.NewActiveQueryTracker(
|
||||
"",
|
||||
20,
|
||||
logger,
|
||||
),
|
||||
}
|
||||
|
||||
e := pql.NewEngine(opts)
|
||||
startTime := func() (int64, error) {
|
||||
return int64(pmodel.Latest), nil
|
||||
}
|
||||
|
||||
remoteStorage := premote.NewStorage(
|
||||
log.With(logger, "component", "remote"),
|
||||
nil,
|
||||
startTime,
|
||||
"",
|
||||
time.Duration(1*time.Minute),
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
fanoutStorage := pstorage.NewFanout(logger, remoteStorage)
|
||||
|
||||
_ = remoteStorage.ApplyConfig(config)
|
||||
|
||||
return &PqlEngine{
|
||||
engine: e,
|
||||
fanoutStorage: fanoutStorage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PqlEngine) RunAlertQuery(ctx context.Context, qs string, start, end time.Time, interval time.Duration) (pql.Matrix, error) {
|
||||
q, err := p.engine.NewRangeQuery(ctx, p.fanoutStorage, nil, qs, start, end, interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := q.Exec(ctx)
|
||||
|
||||
if res.Err != nil {
|
||||
return nil, res.Err
|
||||
}
|
||||
|
||||
switch typ := res.Value.(type) {
|
||||
case pql.Vector:
|
||||
series := make([]pql.Series, 0, len(typ))
|
||||
value := res.Value.(pql.Vector)
|
||||
for _, smpl := range value {
|
||||
series = append(series, pql.Series{
|
||||
Metric: smpl.Metric,
|
||||
Floats: []pql.FPoint{{T: smpl.T, F: smpl.F}},
|
||||
})
|
||||
}
|
||||
return series, nil
|
||||
case pql.Scalar:
|
||||
value := res.Value.(pql.Scalar)
|
||||
series := make([]pql.Series, 0, 1)
|
||||
series = append(series, pql.Series{
|
||||
Floats: []pql.FPoint{{T: value.T, F: value.V}},
|
||||
})
|
||||
return series, nil
|
||||
case pql.Matrix:
|
||||
return res.Value.(pql.Matrix), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("rule result is not a vector or scalar")
|
||||
}
|
||||
}
|
@ -18,12 +18,13 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
pqle "github.com/SigNoz/signoz/pkg/query-service/pqlEngine"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
)
|
||||
@ -76,8 +77,8 @@ func prepareTaskName(ruleId interface{}) string {
|
||||
|
||||
// ManagerOptions bundles options for the Manager.
|
||||
type ManagerOptions struct {
|
||||
PqlEngine *pqle.PqlEngine
|
||||
|
||||
TelemetryStore telemetrystore.TelemetryStore
|
||||
Prometheus prometheus.Prometheus
|
||||
// RepoURL is used to generate a backlink in sent alert messages
|
||||
RepoURL string
|
||||
|
||||
@ -180,7 +181,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
|
||||
opts.Rule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
WithSQLStore(opts.SQLStore),
|
||||
)
|
||||
|
||||
|
@ -8,11 +8,11 @@ import (
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
pqle "github.com/SigNoz/signoz/pkg/query-service/pqlEngine"
|
||||
qslabels "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
@ -22,7 +22,7 @@ import (
|
||||
|
||||
type PromRule struct {
|
||||
*BaseRule
|
||||
pqlEngine *pqle.PqlEngine
|
||||
prometheus prometheus.Prometheus
|
||||
}
|
||||
|
||||
func NewPromRule(
|
||||
@ -30,7 +30,7 @@ func NewPromRule(
|
||||
postableRule *PostableRule,
|
||||
logger *zap.Logger,
|
||||
reader interfaces.Reader,
|
||||
pqlEngine *pqle.PqlEngine,
|
||||
prometheus prometheus.Prometheus,
|
||||
opts ...RuleOption,
|
||||
) (*PromRule, error) {
|
||||
|
||||
@ -41,7 +41,7 @@ func NewPromRule(
|
||||
|
||||
p := PromRule{
|
||||
BaseRule: baseRule,
|
||||
pqlEngine: pqlEngine,
|
||||
prometheus: prometheus,
|
||||
}
|
||||
p.logger = logger
|
||||
|
||||
@ -108,7 +108,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
||||
return nil, err
|
||||
}
|
||||
zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q))
|
||||
res, err := r.pqlEngine.RunAlertQuery(ctx, q, start, end, interval)
|
||||
res, err := r.RunAlertQuery(ctx, q, start, end, interval)
|
||||
if err != nil {
|
||||
r.SetHealth(HealthBad)
|
||||
r.SetLastError(err)
|
||||
@ -306,6 +306,43 @@ func (r *PromRule) String() string {
|
||||
return string(byt)
|
||||
}
|
||||
|
||||
func (r *PromRule) RunAlertQuery(ctx context.Context, qs string, start, end time.Time, interval time.Duration) (promql.Matrix, error) {
|
||||
q, err := r.prometheus.Engine().NewRangeQuery(ctx, r.prometheus.Storage(), nil, qs, start, end, interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := q.Exec(ctx)
|
||||
|
||||
if res.Err != nil {
|
||||
return nil, res.Err
|
||||
}
|
||||
|
||||
switch typ := res.Value.(type) {
|
||||
case promql.Vector:
|
||||
series := make([]promql.Series, 0, len(typ))
|
||||
value := res.Value.(promql.Vector)
|
||||
for _, smpl := range value {
|
||||
series = append(series, promql.Series{
|
||||
Metric: smpl.Metric,
|
||||
Floats: []promql.FPoint{{T: smpl.T, F: smpl.F}},
|
||||
})
|
||||
}
|
||||
return series, nil
|
||||
case promql.Scalar:
|
||||
value := res.Value.(promql.Scalar)
|
||||
series := make([]promql.Series, 0, 1)
|
||||
series = append(series, promql.Series{
|
||||
Floats: []promql.FPoint{{T: value.T, F: value.V}},
|
||||
})
|
||||
return series, nil
|
||||
case promql.Matrix:
|
||||
return res.Value.(promql.Matrix), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("rule result is not a vector or scalar")
|
||||
}
|
||||
}
|
||||
|
||||
func toCommonSeries(series promql.Series) v3.Series {
|
||||
commonSeries := v3.Series{
|
||||
Labels: make(map[string]string),
|
||||
|
@ -68,7 +68,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
||||
parsedRule,
|
||||
opts.Logger,
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.PqlEngine,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
WithSendAlways(),
|
||||
WithSendUnmatched(),
|
||||
WithSQLStore(opts.SQLStore),
|
||||
|
@ -3,19 +3,26 @@ package rules
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/cache/memorycache"
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/cache/memorycache"
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/featureManager"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
@ -1152,10 +1159,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
|
||||
if err != nil {
|
||||
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &queryMatcherAny{})
|
||||
|
||||
cols := make([]cmock.ColumnType, 0)
|
||||
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
|
||||
@ -1227,11 +1231,11 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
||||
|
||||
for idx, c := range cases {
|
||||
rows := cmock.NewRows(cols, c.values)
|
||||
mock.ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
|
||||
telemetryStore.Mock().ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
|
||||
// We are testing the eval logic after the query is run
|
||||
// so we don't care about the query string here
|
||||
queryString := "SELECT any"
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectQuery(queryString).
|
||||
WillReturnRows(rows)
|
||||
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
|
||||
@ -1246,7 +1250,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
||||
|
||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true, time.Duration(time.Second), readerCache)
|
||||
require.NoError(t, err)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), fm, "", true, true, time.Duration(time.Second), readerCache)
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
"signoz_calls_total": {
|
||||
@ -1305,10 +1310,7 @@ func TestThresholdRuleNoData(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
|
||||
if err != nil {
|
||||
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &queryMatcherAny{})
|
||||
|
||||
cols := make([]cmock.ColumnType, 0)
|
||||
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
|
||||
@ -1328,12 +1330,12 @@ func TestThresholdRuleNoData(t *testing.T) {
|
||||
for idx, c := range cases {
|
||||
rows := cmock.NewRows(cols, c.values)
|
||||
|
||||
mock.ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
|
||||
telemetryStore.Mock().ExpectQuery(".*").WillReturnError(fmt.Errorf("error"))
|
||||
|
||||
// We are testing the eval logic after the query is run
|
||||
// so we don't care about the query string here
|
||||
queryString := "SELECT any"
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectQuery(queryString).
|
||||
WillReturnRows(rows)
|
||||
var target float64 = 0
|
||||
@ -1346,7 +1348,7 @@ func TestThresholdRuleNoData(t *testing.T) {
|
||||
}
|
||||
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true, time.Duration(time.Second), readerCache)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), fm, "", true, true, time.Duration(time.Second), readerCache)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
@ -1410,10 +1412,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
|
||||
if err != nil {
|
||||
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &queryMatcherAny{})
|
||||
|
||||
metaCols := make([]cmock.ColumnType, 0)
|
||||
metaCols = append(metaCols, cmock.ColumnType{Name: "DISTINCT(tagKey)", Type: "String"})
|
||||
@ -1428,11 +1427,11 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
||||
|
||||
for idx, c := range testCases {
|
||||
metaRows := cmock.NewRows(metaCols, c.metaValues)
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectQuery("SELECT DISTINCT(tagKey), tagType, dataType FROM archiveNamespace.span_attributes_keys").
|
||||
WillReturnRows(metaRows)
|
||||
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").WillReturnRows(&cmock.Rows{})
|
||||
|
||||
rows := cmock.NewRows(cols, c.values)
|
||||
@ -1440,7 +1439,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
||||
// We are testing the eval logic after the query is run
|
||||
// so we don't care about the query string here
|
||||
queryString := "SELECT any"
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectQuery(queryString).
|
||||
WillReturnRows(rows)
|
||||
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
|
||||
@ -1454,7 +1453,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true, time.Duration(time.Second), nil)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), fm, "", true, true, time.Duration(time.Second), nil)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
@ -1523,10 +1522,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fm := featureManager.StartManager()
|
||||
mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{})
|
||||
if err != nil {
|
||||
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
|
||||
}
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &queryMatcherAny{})
|
||||
|
||||
attrMetaCols := make([]cmock.ColumnType, 0)
|
||||
attrMetaCols = append(attrMetaCols, cmock.ColumnType{Name: "name", Type: "String"})
|
||||
@ -1546,17 +1542,17 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
||||
|
||||
for idx, c := range testCases {
|
||||
attrMetaRows := cmock.NewRows(attrMetaCols, c.attrMetaValues)
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_attribute_keys group by name, datatype").
|
||||
WillReturnRows(attrMetaRows)
|
||||
|
||||
resourceMetaRows := cmock.NewRows(resourceMetaCols, c.resourceMetaValues)
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_resource_keys group by name, datatype").
|
||||
WillReturnRows(resourceMetaRows)
|
||||
|
||||
createTableRows := cmock.NewRows(createTableCols, c.createTableValues)
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_logs.logs").
|
||||
WillReturnRows(createTableRows)
|
||||
|
||||
@ -1565,7 +1561,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
||||
// We are testing the eval logic after the query is run
|
||||
// so we don't care about the query string here
|
||||
queryString := "SELECT any"
|
||||
mock.
|
||||
telemetryStore.Mock().
|
||||
ExpectQuery(queryString).
|
||||
WillReturnRows(rows)
|
||||
postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp)
|
||||
@ -1579,7 +1575,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
||||
}
|
||||
|
||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true, true, time.Duration(time.Second), nil)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), fm, "", true, true, time.Duration(time.Second), nil)
|
||||
|
||||
rule, err := NewThresholdRule("69", &postableRule, fm, reader, true, true)
|
||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||
|
@ -13,6 +13,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
@ -20,6 +23,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/google/uuid"
|
||||
@ -39,14 +44,12 @@ func NewMockClickhouseReader(
|
||||
) {
|
||||
require.NotNil(t, testDB)
|
||||
|
||||
mockDB, err := mockhouse.NewClickHouseWithQueryMatcher(nil, sqlmock.QueryMatcherRegexp)
|
||||
|
||||
require.Nil(t, err, "could not init mock clickhouse")
|
||||
telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherRegexp)
|
||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(
|
||||
mockDB,
|
||||
clickhouseReader.NewOptions("", ""),
|
||||
testDB,
|
||||
"",
|
||||
telemetryStore,
|
||||
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
|
||||
featureFlags,
|
||||
"",
|
||||
true,
|
||||
@ -55,7 +58,7 @@ func NewMockClickhouseReader(
|
||||
nil,
|
||||
)
|
||||
|
||||
return reader, mockDB
|
||||
return reader, telemetryStore.Mock()
|
||||
}
|
||||
|
||||
func addLogsQueryExpectation(
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigration"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigrator"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
@ -51,6 +52,9 @@ type Config struct {
|
||||
// TelemetryStore config
|
||||
TelemetryStore telemetrystore.Config `mapstructure:"telemetrystore"`
|
||||
|
||||
// Prometheus config
|
||||
Prometheus prometheus.Config `mapstructure:"prometheus"`
|
||||
|
||||
// Alertmanager config
|
||||
Alertmanager alertmanager.Config `mapstructure:"alertmanager" yaml:"alertmanager"`
|
||||
}
|
||||
@ -61,6 +65,7 @@ type DeprecatedFlags struct {
|
||||
MaxIdleConns int
|
||||
MaxOpenConns int
|
||||
DialTimeout time.Duration
|
||||
Config string
|
||||
}
|
||||
|
||||
func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, deprecatedFlags DeprecatedFlags) (Config, error) {
|
||||
@ -73,6 +78,7 @@ func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, deprec
|
||||
sqlmigrator.NewConfigFactory(),
|
||||
apiserver.NewConfigFactory(),
|
||||
telemetrystore.NewConfigFactory(),
|
||||
prometheus.NewConfigFactory(),
|
||||
alertmanager.NewConfigFactory(),
|
||||
}
|
||||
|
||||
@ -145,7 +151,7 @@ func mergeAndEnsureBackwardCompatibility(config *Config, deprecatedFlags Depreca
|
||||
|
||||
if os.Getenv("ClickHouseUrl") != "" {
|
||||
fmt.Println("[Deprecated] env ClickHouseUrl is deprecated and scheduled for removal. Please use SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN instead.")
|
||||
config.TelemetryStore.ClickHouse.DSN = os.Getenv("ClickHouseUrl")
|
||||
config.TelemetryStore.Clickhouse.DSN = os.Getenv("ClickHouseUrl")
|
||||
}
|
||||
|
||||
if deprecatedFlags.MaxIdleConns != 50 {
|
||||
@ -176,4 +182,8 @@ func mergeAndEnsureBackwardCompatibility(config *Config, deprecatedFlags Depreca
|
||||
if os.Getenv("ALERTMANAGER_API_CHANNEL_PATH") != "" {
|
||||
fmt.Println("[Deprecated] env ALERTMANAGER_API_CHANNEL_PATH is deprecated and scheduled for complete removal.")
|
||||
}
|
||||
|
||||
if deprecatedFlags.Config != "" {
|
||||
fmt.Println("[Deprecated] flag --config is deprecated for passing prometheus config. The flag will be used for passing the entire SigNoz config. More details can be found at https://github.com/SigNoz/signoz/issues/6805.")
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/cache/memorycache"
|
||||
"github.com/SigNoz/signoz/pkg/cache/rediscache"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/clickhouseprometheus"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigration"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
@ -67,7 +69,13 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
|
||||
func NewTelemetryStoreProviderFactories() factory.NamedMap[factory.ProviderFactory[telemetrystore.TelemetryStore, telemetrystore.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
clickhousetelemetrystore.NewFactory(telemetrystorehook.NewFactory()),
|
||||
clickhousetelemetrystore.NewFactory(telemetrystorehook.NewSettingsFactory(), telemetrystorehook.NewLoggingFactory()),
|
||||
)
|
||||
}
|
||||
|
||||
func NewPrometheusProviderFactories(telemetryStore telemetrystore.TelemetryStore) factory.NamedMap[factory.ProviderFactory[prometheus.Prometheus, prometheus.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
clickhouseprometheus.NewFactory(telemetryStore),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -33,6 +35,10 @@ func TestNewProviderFactories(t *testing.T) {
|
||||
NewSQLMigrationProviderFactories(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual))
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
NewPrometheusProviderFactories(telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherEqual))
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
NewAlertmanagerProviderFactories(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual))
|
||||
})
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigration"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigrator"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
@ -22,6 +23,7 @@ type SigNoz struct {
|
||||
Web web.Web
|
||||
SQLStore sqlstore.SQLStore
|
||||
TelemetryStore telemetrystore.TelemetryStore
|
||||
Prometheus prometheus.Prometheus
|
||||
Alertmanager alertmanager.Alertmanager
|
||||
}
|
||||
|
||||
@ -93,6 +95,18 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize prometheus from the available prometheus provider factories
|
||||
prometheus, err := factory.NewProviderFromNamedMap(
|
||||
ctx,
|
||||
providerSettings,
|
||||
config.Prometheus,
|
||||
NewPrometheusProviderFactories(telemetrystore),
|
||||
config.Prometheus.Provider(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Run migrations on the sqlstore
|
||||
sqlmigrations, err := sqlmigration.New(
|
||||
ctx,
|
||||
@ -135,6 +149,7 @@ func New(
|
||||
Web: web,
|
||||
SQLStore: sqlstore,
|
||||
TelemetryStore: telemetrystore,
|
||||
Prometheus: prometheus,
|
||||
Alertmanager: alertmanager,
|
||||
}, nil
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func NewFactory(hookFactories ...factory.ProviderFactory[telemetrystore.Telemetr
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config telemetrystore.Config, hooks ...telemetrystore.TelemetryStoreHook) (telemetrystore.TelemetryStore, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/telemetrystore/clickhousetelemetrystore")
|
||||
|
||||
options, err := clickhouse.ParseDSN(config.ClickHouse.DSN)
|
||||
options, err := clickhouse.ParseDSN(config.Clickhouse.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -53,68 +53,98 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *provider) ClickHouseDB() clickhouse.Conn {
|
||||
func (p *provider) ClickhouseDB() clickhouse.Conn {
|
||||
return p
|
||||
}
|
||||
|
||||
func (p provider) Close() error {
|
||||
func (p *provider) Close() error {
|
||||
return p.clickHouseConn.Close()
|
||||
}
|
||||
|
||||
func (p provider) Ping(ctx context.Context) error {
|
||||
func (p *provider) Ping(ctx context.Context) error {
|
||||
return p.clickHouseConn.Ping(ctx)
|
||||
}
|
||||
|
||||
func (p provider) Stats() driver.Stats {
|
||||
func (p *provider) Stats() driver.Stats {
|
||||
return p.clickHouseConn.Stats()
|
||||
}
|
||||
|
||||
func (p provider) Query(ctx context.Context, query string, args ...interface{}) (driver.Rows, error) {
|
||||
ctx, query, args = telemetrystore.WrapBeforeQuery(p.hooks, ctx, query, args...)
|
||||
func (p *provider) Query(ctx context.Context, query string, args ...interface{}) (driver.Rows, error) {
|
||||
event := telemetrystore.NewQueryEvent(query, args)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
rows, err := p.clickHouseConn.Query(ctx, query, args...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, rows, err)
|
||||
|
||||
event.Err = err
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (p provider) QueryRow(ctx context.Context, query string, args ...interface{}) driver.Row {
|
||||
ctx, query, args = telemetrystore.WrapBeforeQuery(p.hooks, ctx, query, args...)
|
||||
func (p *provider) QueryRow(ctx context.Context, query string, args ...interface{}) driver.Row {
|
||||
event := telemetrystore.NewQueryEvent(query, args)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
row := p.clickHouseConn.QueryRow(ctx, query, args...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, nil, nil)
|
||||
|
||||
event.Err = row.Err()
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
func (p provider) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||
ctx, query, args = telemetrystore.WrapBeforeQuery(p.hooks, ctx, query, args...)
|
||||
func (p *provider) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||
event := telemetrystore.NewQueryEvent(query, args)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
err := p.clickHouseConn.Select(ctx, dest, query, args...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, nil, err)
|
||||
|
||||
event.Err = err
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p provider) Exec(ctx context.Context, query string, args ...interface{}) error {
|
||||
ctx, query, args = telemetrystore.WrapBeforeQuery(p.hooks, ctx, query, args...)
|
||||
func (p *provider) Exec(ctx context.Context, query string, args ...interface{}) error {
|
||||
event := telemetrystore.NewQueryEvent(query, args)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
err := p.clickHouseConn.Exec(ctx, query, args...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, nil, err)
|
||||
|
||||
event.Err = err
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p provider) AsyncInsert(ctx context.Context, query string, wait bool, args ...interface{}) error {
|
||||
ctx, query, args = telemetrystore.WrapBeforeQuery(p.hooks, ctx, query, args...)
|
||||
func (p *provider) AsyncInsert(ctx context.Context, query string, wait bool, args ...interface{}) error {
|
||||
event := telemetrystore.NewQueryEvent(query, args)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
err := p.clickHouseConn.AsyncInsert(ctx, query, wait, args...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, nil, err)
|
||||
|
||||
event.Err = err
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p provider) PrepareBatch(ctx context.Context, query string, opts ...driver.PrepareBatchOption) (driver.Batch, error) {
|
||||
ctx, query, args := telemetrystore.WrapBeforeQuery(p.hooks, ctx, query)
|
||||
func (p *provider) PrepareBatch(ctx context.Context, query string, opts ...driver.PrepareBatchOption) (driver.Batch, error) {
|
||||
event := telemetrystore.NewQueryEvent(query, nil)
|
||||
|
||||
ctx = telemetrystore.WrapBeforeQuery(p.hooks, ctx, event)
|
||||
batch, err := p.clickHouseConn.PrepareBatch(ctx, query, opts...)
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, query, args, nil, err)
|
||||
|
||||
event.Err = err
|
||||
telemetrystore.WrapAfterQuery(p.hooks, ctx, event)
|
||||
|
||||
return batch, err
|
||||
}
|
||||
|
||||
func (p provider) ServerVersion() (*driver.ServerVersion, error) {
|
||||
func (p *provider) ServerVersion() (*driver.ServerVersion, error) {
|
||||
return p.clickHouseConn.ServerVersion()
|
||||
}
|
||||
|
||||
func (p provider) Contributors() []string {
|
||||
func (p *provider) Contributors() []string {
|
||||
return p.clickHouseConn.Contributors()
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package telemetrystore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
@ -10,20 +9,26 @@ import (
|
||||
type Config struct {
|
||||
// Provider is the provider to use
|
||||
Provider string `mapstructure:"provider"`
|
||||
|
||||
// Connection is the connection configuration
|
||||
Connection ConnectionConfig `mapstructure:",squash"`
|
||||
|
||||
// Clickhouse is the clickhouse configuration
|
||||
ClickHouse ClickHouseConfig `mapstructure:"clickhouse"`
|
||||
Clickhouse ClickhouseConfig `mapstructure:"clickhouse"`
|
||||
}
|
||||
|
||||
type ConnectionConfig struct {
|
||||
// MaxOpenConns is the maximum number of open connections to the database.
|
||||
MaxOpenConns int `mapstructure:"max_open_conns"`
|
||||
|
||||
// MaxIdleConns is the maximum number of connections in the idle connection pool.
|
||||
MaxIdleConns int `mapstructure:"max_idle_conns"`
|
||||
|
||||
// DialTimeout is the timeout for dialing a new connection.
|
||||
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
||||
}
|
||||
|
||||
type ClickHouseQuerySettings struct {
|
||||
type QuerySettings struct {
|
||||
MaxExecutionTime int `mapstructure:"max_execution_time"`
|
||||
MaxExecutionTimeLeaf int `mapstructure:"max_execution_time_leaf"`
|
||||
TimeoutBeforeCheckingExecutionSpeed int `mapstructure:"timeout_before_checking_execution_speed"`
|
||||
@ -31,15 +36,16 @@ type ClickHouseQuerySettings struct {
|
||||
MaxResultRowsForCHQuery int `mapstructure:"max_result_rows_for_ch_query"`
|
||||
}
|
||||
|
||||
type ClickHouseConfig struct {
|
||||
type ClickhouseConfig struct {
|
||||
// DSN is the database source name.
|
||||
DSN string `mapstructure:"dsn"`
|
||||
|
||||
QuerySettings ClickHouseQuerySettings `mapstructure:"settings"`
|
||||
// QuerySettings is the query settings for clickhouse.
|
||||
QuerySettings QuerySettings `mapstructure:"settings"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("telemetrystore"), newConfig)
|
||||
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
@ -50,16 +56,13 @@ func newConfig() factory.Config {
|
||||
MaxIdleConns: 50,
|
||||
DialTimeout: 5 * time.Second,
|
||||
},
|
||||
ClickHouse: ClickHouseConfig{
|
||||
Clickhouse: ClickhouseConfig{
|
||||
DSN: "tcp://localhost:9000",
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
if c.Provider != "clickhouse" {
|
||||
return fmt.Errorf("provider: %q is not supported", c.Provider)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestNewWithEnvProvider(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN", "http://localhost:9000")
|
||||
t.Setenv("SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN", "tcp://localhost:9000")
|
||||
t.Setenv("SIGNOZ_TELEMETRYSTORE_MAX__IDLE__CONNS", "60")
|
||||
t.Setenv("SIGNOZ_TELEMETRYSTORE_MAX__OPEN__CONNS", "150")
|
||||
t.Setenv("SIGNOZ_TELEMETRYSTORE_DIAL__TIMEOUT", "5s")
|
||||
@ -33,22 +33,18 @@ func TestNewWithEnvProvider(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := &Config{}
|
||||
err = conf.Unmarshal("telemetrystore", actual)
|
||||
|
||||
actual := Config{}
|
||||
err = conf.Unmarshal("telemetrystore", &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := &Config{
|
||||
Provider: "clickhouse",
|
||||
Connection: ConnectionConfig{
|
||||
MaxOpenConns: 150,
|
||||
MaxIdleConns: 60,
|
||||
DialTimeout: 5 * time.Second,
|
||||
},
|
||||
ClickHouse: ClickHouseConfig{
|
||||
DSN: "http://localhost:9000",
|
||||
},
|
||||
}
|
||||
assert.NoError(t, actual.Validate())
|
||||
|
||||
expected := NewConfigFactory().New().(Config)
|
||||
expected.Provider = "clickhouse"
|
||||
expected.Connection.MaxOpenConns = 150
|
||||
expected.Connection.MaxIdleConns = 60
|
||||
expected.Connection.DialTimeout = 5 * time.Second
|
||||
expected.Clickhouse.DSN = "tcp://localhost:9000"
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
@ -74,14 +70,14 @@ func TestNewWithEnvProviderWithQuerySettings(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := &Config{}
|
||||
err = conf.Unmarshal("telemetrystore", actual)
|
||||
actual := Config{}
|
||||
err = conf.Unmarshal("telemetrystore", &actual)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := &Config{
|
||||
ClickHouse: ClickHouseConfig{
|
||||
QuerySettings: ClickHouseQuerySettings{
|
||||
expected := Config{
|
||||
Clickhouse: ClickhouseConfig{
|
||||
QuerySettings: QuerySettings{
|
||||
MaxExecutionTime: 10,
|
||||
MaxExecutionTimeLeaf: 10,
|
||||
TimeoutBeforeCheckingExecutionSpeed: 10,
|
||||
@ -91,5 +87,5 @@ func TestNewWithEnvProviderWithQuerySettings(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.ClickHouse.QuerySettings, actual.ClickHouse.QuerySettings)
|
||||
assert.Equal(t, expected.Clickhouse.QuerySettings, actual.Clickhouse.QuerySettings)
|
||||
}
|
||||
|
20
pkg/telemetrystore/event.go
Normal file
20
pkg/telemetrystore/event.go
Normal file
@ -0,0 +1,20 @@
|
||||
package telemetrystore
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type QueryEvent struct {
|
||||
Query string
|
||||
QueryArgs []any
|
||||
StartTime time.Time
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewQueryEvent(query string, args []any) *QueryEvent {
|
||||
return &QueryEvent{
|
||||
Query: query,
|
||||
QueryArgs: args,
|
||||
StartTime: time.Now(),
|
||||
}
|
||||
}
|
@ -4,28 +4,26 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
)
|
||||
|
||||
type TelemetryStore interface {
|
||||
ClickHouseDB() clickhouse.Conn
|
||||
ClickhouseDB() clickhouse.Conn
|
||||
}
|
||||
|
||||
type TelemetryStoreHook interface {
|
||||
BeforeQuery(ctx context.Context, query string, args ...interface{}) (context.Context, string, []interface{})
|
||||
AfterQuery(ctx context.Context, query string, args []interface{}, rows driver.Rows, err error)
|
||||
BeforeQuery(ctx context.Context, event *QueryEvent) context.Context
|
||||
AfterQuery(ctx context.Context, event *QueryEvent)
|
||||
}
|
||||
|
||||
func WrapBeforeQuery(hooks []TelemetryStoreHook, ctx context.Context, query string, args ...interface{}) (context.Context, string, []interface{}) {
|
||||
func WrapBeforeQuery(hooks []TelemetryStoreHook, ctx context.Context, event *QueryEvent) context.Context {
|
||||
for _, hook := range hooks {
|
||||
ctx, query, args = hook.BeforeQuery(ctx, query, args...)
|
||||
ctx = hook.BeforeQuery(ctx, event)
|
||||
}
|
||||
return ctx, query, args
|
||||
return ctx
|
||||
}
|
||||
|
||||
// runAfterHooks executes all after hooks in order
|
||||
func WrapAfterQuery(hooks []TelemetryStoreHook, ctx context.Context, query string, args []interface{}, rows driver.Rows, err error) {
|
||||
for _, hook := range hooks {
|
||||
hook.AfterQuery(ctx, query, args, rows, err)
|
||||
func WrapAfterQuery(hooks []TelemetryStoreHook, ctx context.Context, event *QueryEvent) {
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
hooks[i].AfterQuery(ctx, event)
|
||||
}
|
||||
}
|
||||
|
41
pkg/telemetrystore/telemetrystorehook/logging.go
Normal file
41
pkg/telemetrystore/telemetrystorehook/logging.go
Normal file
@ -0,0 +1,41 @@
|
||||
package telemetrystorehook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
)
|
||||
|
||||
type logging struct {
|
||||
logger *slog.Logger
|
||||
level slog.Level
|
||||
}
|
||||
|
||||
func NewLoggingFactory() factory.ProviderFactory[telemetrystore.TelemetryStoreHook, telemetrystore.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("logging"), NewLogging)
|
||||
}
|
||||
|
||||
func NewLogging(ctx context.Context, providerSettings factory.ProviderSettings, config telemetrystore.Config) (telemetrystore.TelemetryStoreHook, error) {
|
||||
return &logging{
|
||||
logger: factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystorehook").Logger(),
|
||||
level: slog.LevelDebug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (logging) BeforeQuery(ctx context.Context, event *telemetrystore.QueryEvent) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (hook *logging) AfterQuery(ctx context.Context, event *telemetrystore.QueryEvent) {
|
||||
hook.logger.Log(
|
||||
ctx,
|
||||
hook.level,
|
||||
"::TELEMETRYSTORE-QUERY::",
|
||||
"db.query.text", event.Query,
|
||||
"db.query.args", event.QueryArgs,
|
||||
"db.duration", time.Since(event.StartTime).String(),
|
||||
)
|
||||
}
|
@ -5,36 +5,26 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
settings telemetrystore.ClickHouseQuerySettings
|
||||
settings telemetrystore.QuerySettings
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[telemetrystore.TelemetryStoreHook, telemetrystore.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("clickhousesettings"), New)
|
||||
func NewSettingsFactory() factory.ProviderFactory[telemetrystore.TelemetryStoreHook, telemetrystore.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("settings"), NewSettings)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config telemetrystore.Config) (telemetrystore.TelemetryStoreHook, error) {
|
||||
func NewSettings(ctx context.Context, providerSettings factory.ProviderSettings, config telemetrystore.Config) (telemetrystore.TelemetryStoreHook, error) {
|
||||
return &provider{
|
||||
settings: config.ClickHouse.QuerySettings,
|
||||
settings: config.Clickhouse.QuerySettings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *provider) BeforeQuery(ctx context.Context, query string, args ...interface{}) (context.Context, string, []interface{}) {
|
||||
return h.clickHouseSettings(ctx, query, args...)
|
||||
}
|
||||
|
||||
func (h *provider) AfterQuery(ctx context.Context, query string, args []interface{}, rows driver.Rows, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// clickHouseSettings adds clickhouse settings to queries
|
||||
func (h *provider) clickHouseSettings(ctx context.Context, query string, args ...interface{}) (context.Context, string, []interface{}) {
|
||||
func (h *provider) BeforeQuery(ctx context.Context, _ *telemetrystore.QueryEvent) context.Context {
|
||||
settings := clickhouse.Settings{}
|
||||
|
||||
// Apply default settings
|
||||
@ -69,8 +59,21 @@ func (h *provider) clickHouseSettings(ctx context.Context, query string, args ..
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Value("max_result_rows") != nil && ctx.Value("result_overflow_mode") != nil {
|
||||
if maxResultRows, ok := ctx.Value("max_result_rows").(int); ok { settings["max_result_rows"] = maxResultRows }
|
||||
settings["result_overflow_mode"] = ctx.Value("result_overflow_mode")
|
||||
}
|
||||
|
||||
if ctx.Value("max_rows_to_group_by") != nil && ctx.Value("result_overflow_mode") != nil {
|
||||
settings["max_rows_to_group_by"] = ctx.Value("max_rows_to_group_by").(int)
|
||||
settings["result_overflow_mode"] = ctx.Value("result_overflow_mode")
|
||||
}
|
||||
|
||||
ctx = clickhouse.Context(ctx, clickhouse.WithSettings(settings))
|
||||
return ctx, query, args
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *provider) AfterQuery(ctx context.Context, event *telemetrystore.QueryEvent) {
|
||||
}
|
||||
|
||||
func (h *provider) getLogComment(ctx context.Context) string {
|
||||
|
@ -2,6 +2,7 @@ package telemetrystoretest
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
@ -10,28 +11,27 @@ var _ telemetrystore.TelemetryStore = (*Provider)(nil)
|
||||
|
||||
// Provider represents a mock telemetry store provider for testing
|
||||
type Provider struct {
|
||||
mock cmock.ClickConnMockCommon
|
||||
clickhouseDB cmock.ClickConnMockCommon
|
||||
}
|
||||
|
||||
// New creates a new mock telemetry store provider
|
||||
func New() (*Provider, error) {
|
||||
options := &clickhouse.Options{} // Default options
|
||||
mock, err := cmock.NewClickHouseNative(options)
|
||||
func New(_ telemetrystore.Config, matcher sqlmock.QueryMatcher) *Provider {
|
||||
clickhouseDB, err := cmock.NewClickHouseWithQueryMatcher(&clickhouse.Options{}, matcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
mock: mock,
|
||||
}, nil
|
||||
clickhouseDB: clickhouseDB,
|
||||
}
|
||||
}
|
||||
|
||||
// ClickhouseDB returns the mock Clickhouse connection
|
||||
func (p *Provider) ClickHouseDB() clickhouse.Conn {
|
||||
return p.mock.(clickhouse.Conn)
|
||||
func (p *Provider) ClickhouseDB() clickhouse.Conn {
|
||||
return p.clickhouseDB.(clickhouse.Conn)
|
||||
}
|
||||
|
||||
// Mock returns the underlying Clickhouse mock instance for setting expectations
|
||||
func (p *Provider) Mock() cmock.ClickConnMockCommon {
|
||||
return p.mock
|
||||
return p.clickhouseDB
|
||||
}
|
||||
|
@ -3,33 +3,14 @@ package telemetrystoretest
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "should create new provider successfully",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
provider, err := New()
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, provider)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
provider := New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherRegexp)
|
||||
assert.NotNil(t, provider)
|
||||
assert.NotNil(t, provider.Mock())
|
||||
assert.NotNil(t, provider.ClickHouseDB())
|
||||
})
|
||||
}
|
||||
assert.NotNil(t, provider.ClickhouseDB())
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user