Feat: enables metrics ingestion to signoz (#271)

* WIP promql support

* forked prometheus and promhouse integrated

* removing __debug_bin from  git

* feat: prometheus config file to load

* feat: read prometheus config from args

* fix: WIP fixing errors in docker build

* feat: added clickhousemetricswrite exporter in metrics

* feat: changing otelcol image tag

* fix: read prometheus.yml from config flag in docker-compose

* fix: WIP clickhouse connection error

* fix: used signoz/prometheus tag v1.9.4

* chore: response format as in prometheus

* chore: query_range works with clickhouse reader and throws not implemented error for druid

* chore: moved ApiError struct to model

* feat: enabled instant query api for metrics

* chore: parser for instant query api params
This commit is contained in:
Ankit Nayan 2021-08-29 10:28:40 +05:30 committed by GitHub
parent 66b423588e
commit 32ad4ef571
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1139 additions and 561 deletions

1
.gitignore vendored
View File

@ -36,5 +36,6 @@ frontend/cypress.env.json
**/build
**/storage
**/locust-scripts/__pycache__/
**/__debug_bin
frontend/*.env

View File

@ -22,9 +22,11 @@ services:
query-service:
image: signoz/query-service:0.3.6
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
environment:
- ClickHouseUrl=tcp://clickhouse:9000

View File

@ -7,6 +7,11 @@ receivers:
protocols:
grpc:
thrift_http:
hostmetrics:
collection_interval: 10s
scrapers:
load:
memory:
processors:
batch:
send_batch_size: 1000
@ -29,6 +34,10 @@ extensions:
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/?database=signoz_metrics
resource_to_telemetry_conversion:
enabled: true
service:
extensions: [health_check, zpages]
@ -36,4 +45,8 @@ service:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]
exporters: [clickhouse]
metrics:
receivers: [otlp, hostmetrics]
processors: [batch]
exporters: [clickhousemetricswrite]

View File

@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://clickhouse:9000/?database=signoz_metrics

View File

@ -32,6 +32,12 @@ RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
WORKDIR /root
# copy the binary from builder
COPY --from=builder /go/src/github.com/signoz/signoz/pkg/query-service/bin/query-service .
# run the binary
CMD ["./query-service"]
COPY config/prometheus.yml /root/config/prometheus.yml
# run the binary
ENTRYPOINT ["./query-service"]
CMD ["-config", "/root/config/prometheus.yml"]
# CMD ["./query-service -config /root/config/prometheus.yml"]
EXPOSE 8080

View File

@ -3,13 +3,21 @@ package clickhouseReader
import (
"context"
"errors"
"flag"
"fmt"
"os"
"strconv"
"time"
_ "github.com/ClickHouse/clickhouse-go"
"github.com/go-kit/log"
"github.com/jmoiron/sqlx"
promModel "github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/util/stats"
"go.signoz.io/query-service/model"
"go.uber.org/zap"
@ -36,6 +44,8 @@ type ClickHouseReader struct {
operationsTable string
indexTable string
spansTable string
queryEngine *promql.Engine
remoteStorage *remote.Storage
}
// NewTraceReader returns a TraceReader for the database
@ -48,11 +58,55 @@ func NewReader() *ClickHouseReader {
if err != nil {
zap.S().Error(err)
}
logLevel := promlog.AllowedLevel{}
logLevel.Set("debug")
// allowedFormat := promlog.AllowedFormat{}
// allowedFormat.Set("logfmt")
// promlogConfig := promlog.Config{
// Level: &logLevel,
// Format: &allowedFormat,
// }
logger := promlog.New(logLevel)
opts := promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"),
Reg: nil,
MaxConcurrent: 20,
MaxSamples: 50000000,
Timeout: time.Duration(2 * time.Minute),
}
queryEngine := promql.NewEngine(opts)
startTime := func() (int64, error) {
return int64(promModel.Latest), nil
}
remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), startTime, time.Duration(1*time.Minute))
filename := flag.String("config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.Parse()
conf, err := config.LoadFile(*filename)
if err != nil {
zap.S().Error("couldn't load configuration (--config.file=%q): %v", filename, err)
}
err = remoteStorage.ApplyConfig(conf)
if err != nil {
zap.S().Error("Error in remoteStorage.ApplyConfig: ", err)
}
return &ClickHouseReader{
db: db,
operationsTable: options.primary.OperationsTable,
indexTable: options.primary.IndexTable,
spansTable: options.primary.SpansTable,
queryEngine: queryEngine,
remoteStorage: remoteStorage,
}
}
@ -74,6 +128,45 @@ func connect(cfg *namespaceConfig) (*sqlx.DB, error) {
return cfg.Connector(cfg)
}
func (r *ClickHouseReader) GetInstantQueryMetricsResult(ctx context.Context, queryParams *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
qry, err := r.queryEngine.NewInstantQuery(r.remoteStorage, queryParams.Query, queryParams.Time)
if err != nil {
return nil, nil, &model.ApiError{model.ErrorBadData, err}
}
res := qry.Exec(ctx)
// Optional stats field in response if parameter "stats" is not empty.
var qs *stats.QueryStats
if queryParams.Stats != "" {
qs = stats.NewQueryStats(qry.Stats())
}
qry.Close()
return res, qs, nil
}
func (r *ClickHouseReader) GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
qry, err := r.queryEngine.NewRangeQuery(r.remoteStorage, query.Query, query.Start, query.End, query.Step)
if err != nil {
return nil, nil, &model.ApiError{model.ErrorBadData, err}
}
res := qry.Exec(ctx)
// Optional stats field in response if parameter "stats" is not empty.
var qs *stats.QueryStats
if query.Stats != "" {
qs = stats.NewQueryStats(qry.Stats())
}
qry.Close()
return res, qs, nil
}
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceItem, error) {
if r.indexTable == "" {

View File

@ -2,8 +2,11 @@ package druidReader
import (
"context"
"fmt"
"os"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
"go.signoz.io/query-service/druidQuery"
"go.signoz.io/query-service/godruid"
"go.signoz.io/query-service/model"
@ -39,6 +42,16 @@ func initialize() {
}
func (druid *DruidReader) GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
return nil, nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("Druid does not support metrics")}
}
func (druid *DruidReader) GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError) {
return nil, nil, &model.ApiError{model.ErrorNotImplemented, fmt.Errorf("Druid does not support metrics")}
}
func (druid *DruidReader) GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
return druidQuery.GetServiceOverview(druid.SqlClient, query)
}

View File

@ -7,10 +7,20 @@ import (
"net/http"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"github.com/posthog/posthog-go"
"github.com/prometheus/prometheus/promql"
"go.signoz.io/query-service/model"
"go.uber.org/zap"
)
type status string
const (
statusSuccess status = "success"
statusError status = "error"
)
// NewRouter creates and configures a Gorilla Router.
func NewRouter() *mux.Router {
return mux.NewRouter().UseEncodedPath()
@ -25,16 +35,18 @@ type APIHandler struct {
reader *Reader
pc *posthog.Client
distinctId string
ready func(http.HandlerFunc) http.HandlerFunc
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(reader *Reader, pc *posthog.Client, distinctId string) *APIHandler {
aH := &APIHandler{
reader: reader,
pc: pc,
distinctId: distinctId,
}
aH.ready = aH.testReady
return aH
}
@ -52,8 +64,100 @@ type structuredError struct {
// TraceID ui.TraceID `json:"traceID,omitempty"`
}
var corsHeaders = map[string]string{
"Access-Control-Allow-Headers": "Accept, Authorization, Content-Type, Origin",
"Access-Control-Allow-Methods": "GET, OPTIONS",
"Access-Control-Allow-Origin": "*",
"Access-Control-Expose-Headers": "Date",
}
// Enables cross-site script calls.
func setCORS(w http.ResponseWriter) {
for h, v := range corsHeaders {
w.Header().Set(h, v)
}
}
type apiFunc func(r *http.Request) (interface{}, *model.ApiError, func())
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (aH *APIHandler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
f(w, r)
}
}
type response struct {
Status status `json:"status"`
Data interface{} `json:"data,omitempty"`
ErrorType model.ErrorType `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
func (aH *APIHandler) respondError(w http.ResponseWriter, apiErr *model.ApiError, data interface{}) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(&response{
Status: statusError,
ErrorType: apiErr.Typ,
Error: apiErr.Err.Error(),
Data: data,
})
if err != nil {
zap.S().Error("msg", "error marshalling json response", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var code int
switch apiErr.Typ {
case model.ErrorBadData:
code = http.StatusBadRequest
case model.ErrorExec:
code = 422
case model.ErrorCanceled, model.ErrorTimeout:
code = http.StatusServiceUnavailable
case model.ErrorInternal:
code = http.StatusInternalServerError
case model.ErrorNotFound:
code = http.StatusNotFound
case model.ErrorNotImplemented:
code = http.StatusNotImplemented
default:
code = http.StatusInternalServerError
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
}
}
func (aH *APIHandler) respond(w http.ResponseWriter, data interface{}) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(&response{
Status: statusSuccess,
Data: data,
})
if err != nil {
zap.S().Error("msg", "error marshalling json response", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
zap.S().Error("msg", "error writing response", "bytesWritten", n, "err", err)
}
}
// RegisterRoutes registers routes for this handler on the given router
func (aH *APIHandler) RegisterRoutes(router *mux.Router) {
router.HandleFunc("/api/v1/query_range", aH.queryRangeMetrics).Methods(http.MethodGet)
router.HandleFunc("/api/v1/query", aH.queryMetrics).Methods(http.MethodGet)
router.HandleFunc("/api/v1/user", aH.user).Methods(http.MethodPost)
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
@ -74,6 +178,114 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router) {
router.HandleFunc("/api/v1/serviceMapDependencies", aH.serviceMapDependencies).Methods(http.MethodGet)
}
func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request) {
query, apiErrorObj := parseQueryRangeRequest(r)
if apiErrorObj != nil {
aH.respondError(w, apiErrorObj, nil)
return
}
// zap.S().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
var cancel context.CancelFunc
timeout, err := parseMetricsDuration(to)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
res, qs, apiError := (*aH.reader).GetQueryRangeResult(ctx, query)
if apiError != nil {
aH.respondError(w, apiError, nil)
return
}
if res.Err != nil {
zap.S().Error(res.Err)
}
if res.Err != nil {
switch res.Err.(type) {
case promql.ErrQueryCanceled:
aH.respondError(w, &model.ApiError{model.ErrorCanceled, res.Err}, nil)
case promql.ErrQueryTimeout:
aH.respondError(w, &model.ApiError{model.ErrorTimeout, res.Err}, nil)
}
aH.respondError(w, &model.ApiError{model.ErrorExec, res.Err}, nil)
}
response_data := &model.QueryData{
ResultType: res.Value.Type(),
Result: res.Value,
Stats: qs,
}
aH.respond(w, response_data)
}
func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
queryParams, apiErrorObj := parseInstantQueryMetricsRequest(r)
if apiErrorObj != nil {
aH.respondError(w, apiErrorObj, nil)
return
}
// zap.S().Info(query, apiError)
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
var cancel context.CancelFunc
timeout, err := parseMetricsDuration(to)
if aH.handleError(w, err, http.StatusBadRequest) {
return
}
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
res, qs, apiError := (*aH.reader).GetInstantQueryMetricsResult(ctx, queryParams)
if apiError != nil {
aH.respondError(w, apiError, nil)
return
}
if res.Err != nil {
zap.S().Error(res.Err)
}
if res.Err != nil {
switch res.Err.(type) {
case promql.ErrQueryCanceled:
aH.respondError(w, &model.ApiError{model.ErrorCanceled, res.Err}, nil)
case promql.ErrQueryTimeout:
aH.respondError(w, &model.ApiError{model.ErrorTimeout, res.Err}, nil)
}
aH.respondError(w, &model.ApiError{model.ErrorExec, res.Err}, nil)
}
response_data := &model.QueryData{
ResultType: res.Value.Type(),
Result: res.Value,
Stats: qs,
}
aH.respond(w, response_data)
}
func (aH *APIHandler) user(w http.ResponseWriter, r *http.Request) {
email := r.URL.Query().Get("email")

View File

@ -3,10 +3,14 @@ package app
import (
"context"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
"go.signoz.io/query-service/model"
)
type Reader interface {
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error)
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, error)
// GetApplicationPercentiles(ctx context.Context, query *model.ApplicationPercentileParams) ([]godruid.Timeseries, error)

View File

@ -4,10 +4,13 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"strconv"
"time"
promModel "github.com/prometheus/common/model"
"go.signoz.io/query-service/model"
"go.uber.org/zap"
)
@ -46,6 +49,95 @@ func parseGetTopEndpointsRequest(r *http.Request) (*model.GetTopEndpointsParams,
}
func parseMetricsTime(s string) (time.Time, error) {
if t, err := strconv.ParseFloat(s, 64); err == nil {
s, ns := math.Modf(t)
return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
// return time.Unix(0, t), nil
}
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
return t, nil
}
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
}
func parseMetricsDuration(s string) (time.Duration, error) {
if d, err := strconv.ParseFloat(s, 64); err == nil {
ts := d * float64(time.Second)
if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
return 0, fmt.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
}
return time.Duration(ts), nil
}
if d, err := promModel.ParseDuration(s); err == nil {
return time.Duration(d), nil
}
return 0, fmt.Errorf("cannot parse %q to a valid duration", s)
}
func parseInstantQueryMetricsRequest(r *http.Request) (*model.InstantQueryMetricsParams, *model.ApiError) {
var ts time.Time
if t := r.FormValue("time"); t != "" {
var err error
ts, err = parseMetricsTime(t)
if err != nil {
return nil, &model.ApiError{model.ErrorBadData, err}
}
} else {
ts = time.Now()
}
return &model.InstantQueryMetricsParams{
Time: ts,
Query: r.FormValue("query"),
Stats: r.FormValue("stats"),
}, nil
}
func parseQueryRangeRequest(r *http.Request) (*model.QueryRangeParams, *model.ApiError) {
start, err := parseMetricsTime(r.FormValue("start"))
if err != nil {
return nil, &model.ApiError{model.ErrorBadData, err}
}
end, err := parseMetricsTime(r.FormValue("end"))
if err != nil {
return nil, &model.ApiError{model.ErrorBadData, err}
}
if end.Before(start) {
err := errors.New("end timestamp must not be before start time")
return nil, &model.ApiError{model.ErrorBadData, err}
}
step, err := parseMetricsDuration(r.FormValue("step"))
if err != nil {
return nil, &model.ApiError{model.ErrorBadData, err}
}
if step <= 0 {
err := errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer")
return nil, &model.ApiError{model.ErrorBadData, err}
}
// For safety, limit the number of returned points per timeseries.
// This is sufficient for 60s resolution for a week or 1h resolution for a year.
if end.Sub(start)/step > 11000 {
err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
return nil, &model.ApiError{model.ErrorBadData, err}
}
queryRangeParams := model.QueryRangeParams{
Start: start,
End: end,
Step: step,
Query: r.FormValue("query"),
Stats: r.FormValue("stats"),
}
return &queryRangeParams, nil
}
func parseGetUsageRequest(r *http.Request) (*model.GetUsageParams, error) {
startTime, err := parseTime("start", r)
if err != nil {

View File

@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
remote_read:
- url: tcp://localhost:9000/?database=signoz_metrics

View File

@ -4,24 +4,57 @@ go 1.14
require (
github.com/ClickHouse/clickhouse-go v1.4.5
github.com/go-delve/delve v1.7.1-0.20210804080032-f95340ae1bf9 // indirect
github.com/gogo/protobuf v1.2.1
github.com/google/uuid v1.1.1
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
github.com/aws/aws-sdk-go v1.27.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/frankban/quicktest v1.13.0 // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-kit/log v0.1.0
github.com/gogo/protobuf v1.2.1 // indirect
github.com/google/uuid v1.1.2
github.com/googleapis/gnostic v0.2.3-0.20180520015035-48a0ecefe2e4 // indirect
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
github.com/jaegertracing/jaeger v1.21.0
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/hashicorp/go-cleanhttp v0.5.0 // indirect
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/go-multierror v1.0.0 // indirect
github.com/hashicorp/go-sockaddr v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/memberlist v0.1.0 // indirect
github.com/jmoiron/sqlx v1.3.4
github.com/opentracing/opentracing-go v1.1.0
github.com/ory/viper v1.7.5
github.com/json-iterator/go v1.1.10
github.com/kr/text v0.2.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/dns v1.0.4 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/onsi/gomega v1.14.0 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pascaldekloe/goe v0.1.0 // indirect
github.com/pierrec/lz4 v2.4.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/posthog/posthog-go v0.0.0-20200525173953-e46dc8e6b89b
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1
github.com/prometheus/procfs v0.0.8 // indirect
github.com/prometheus/prometheus v2.5.0+incompatible
github.com/rs/cors v1.7.0
github.com/shunfei/godruid v0.0.0-20171207111340-296a59dd69bd
github.com/sirupsen/logrus v1.7.0
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/smartystreets/goconvey v1.6.4
github.com/soheilhy/cmux v0.1.4
github.com/spf13/viper v1.7.0
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
go.uber.org/zap v1.16.0
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
google.golang.org/api v0.51.0 // indirect
google.golang.org/grpc/examples v0.0.0-20210803221256-6ba56c814be7 // indirect
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
)
replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.9.4

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,38 @@ import (
"time"
)
type ApiError struct {
Typ ErrorType
Err error
}
type ErrorType string
const (
ErrorNone ErrorType = ""
ErrorTimeout ErrorType = "timeout"
ErrorCanceled ErrorType = "canceled"
ErrorExec ErrorType = "execution"
ErrorBadData ErrorType = "bad_data"
ErrorInternal ErrorType = "internal"
ErrorUnavailable ErrorType = "unavailable"
ErrorNotFound ErrorType = "not_found"
ErrorNotImplemented ErrorType = "not_implemented"
)
type InstantQueryMetricsParams struct {
Time time.Time
Query string
Stats string
}
type QueryRangeParams struct {
Start time.Time
End time.Time
Step time.Duration
Query string
Stats string
}
type GetTopEndpointsParams struct {
StartTime string
EndTime string

View File

@ -5,8 +5,17 @@ import (
"fmt"
"strconv"
"time"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
)
type QueryData struct {
ResultType promql.ValueType `json:"resultType"`
Result promql.Value `json:"result"`
Stats *stats.QueryStats `json:"stats,omitempty"`
}
type ServiceItem struct {
ServiceName string `json:"serviceName" db:"serviceName"`
Percentile99 float32 `json:"p99" db:"p99"`