Merge pull request #6452 from SigNoz/release/v0.58.x

Release/v0.58.1
This commit is contained in:
Prashant Shahi 2024-11-15 22:36:10 +05:30 committed by GitHub
commit 07c24bcdf3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
88 changed files with 7459 additions and 944 deletions

View File

@ -146,7 +146,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.58.0
image: signoz/query-service:0.58.1
command:
[
"-config=/root/config/prometheus.yml",
@ -186,7 +186,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:0.58.0
image: signoz/frontend:0.58.1
deploy:
restart_policy:
condition: on-failure

View File

@ -66,28 +66,6 @@ processors:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
timeout: 2s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@ -138,6 +116,8 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
# logging: {}
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
@ -161,20 +141,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@ -162,7 +162,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.58.0}
image: signoz/query-service:${DOCKER_TAG:-0.58.1}
container_name: signoz-query-service
command:
[
@ -201,7 +201,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.58.0}
image: signoz/frontend:${DOCKER_TAG:-0.58.1}
container_name: signoz-frontend
restart: on-failure
depends_on:

View File

@ -167,7 +167,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.58.0}
image: signoz/query-service:${DOCKER_TAG:-0.58.1}
container_name: signoz-query-service
command:
[
@ -208,7 +208,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.58.0}
image: signoz/frontend:${DOCKER_TAG:-0.58.1}
container_name: signoz-frontend
restart: on-failure
depends_on:

View File

@ -57,35 +57,11 @@ receivers:
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
metrics_flush_interval: 60s
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
@ -149,6 +125,8 @@ exporters:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: tcp://clickhouse:9000/signoz_metrics
clickhousemetricswritev2:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
@ -168,20 +146,20 @@ service:
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
exporters: [clickhousemetricswrite, clickhousemetricswritev2]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]

View File

@ -40,6 +40,7 @@ type APIHandlerOptions struct {
// Querier Influx Interval
FluxInterval time.Duration
UseLogsNewSchema bool
UseLicensesV3 bool
}
type APIHandler struct {
@ -65,6 +66,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseLicensesV3: opts.UseLicensesV3,
})
if err != nil {
@ -173,10 +175,25 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew
router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut)
router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut)
// v2
router.HandleFunc("/api/v2/licenses",
am.ViewAccess(ah.listLicensesV2)).
Methods(http.MethodGet)
// v3
router.HandleFunc("/api/v3/licenses",
am.ViewAccess(ah.listLicensesV3)).
Methods(http.MethodGet)
router.HandleFunc("/api/v3/licenses",
am.AdminAccess(ah.applyLicenseV3)).
Methods(http.MethodPost)
router.HandleFunc("/api/v3/licenses",
am.AdminAccess(ah.refreshLicensesV3)).
Methods(http.MethodPut)
// v4
router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost)
// Gateway

View File

@ -9,6 +9,7 @@ import (
"go.signoz.io/signoz/ee/query-service/constants"
"go.signoz.io/signoz/ee/query-service/model"
"go.signoz.io/signoz/pkg/http/render"
"go.uber.org/zap"
)
@ -59,6 +60,21 @@ type billingDetails struct {
} `json:"data"`
}
type ApplyLicenseRequest struct {
LicenseKey string `json:"key"`
}
type ListLicenseResponse map[string]interface{}
func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse {
listLicenses := []ListLicenseResponse{}
for _, license := range licensesV3 {
listLicenses = append(listLicenses, license.Data)
}
return listLicenses
}
func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
@ -88,6 +104,51 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, license)
}
func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicensesV3(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses))
}
// this function is called by zeus when inserting licenses in the query-service
func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) {
var licenseKey ApplyLicenseRequest
if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
if licenseKey.LicenseKey == "" {
RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
return
}
_, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey)
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusAccepted, nil)
}
func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) {
apiError := ah.LM().RefreshLicense(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) {
type checkoutResponse struct {
@ -154,11 +215,45 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) {
ah.Respond(w, billingResponse.Data)
}
func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License {
licensesV2 := []model.License{}
for _, l := range licenses {
licenseV2 := model.License{
Key: l.Key,
ActivationId: "",
PlanDetails: "",
FeatureSet: l.Features,
ValidationMessage: "",
IsCurrent: l.IsCurrent,
LicensePlan: model.LicensePlan{
PlanKey: l.PlanName,
ValidFrom: l.ValidFrom,
ValidUntil: l.ValidUntil,
Status: l.Status},
}
licensesV2 = append(licensesV2, licenseV2)
}
return licensesV2
}
func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) {
licenses, apiError := ah.LM().GetLicenses(context.Background())
if apiError != nil {
RespondError(w, apiError, nil)
var licenses []model.License
if ah.UseLicensesV3 {
licensesV3, err := ah.LM().GetLicensesV3(r.Context())
if err != nil {
RespondError(w, err, nil)
return
}
licenses = convertLicenseV3ToLicenseV2(licensesV3)
} else {
_licenses, apiError := ah.LM().GetLicenses(r.Context())
if apiError != nil {
RespondError(w, apiError, nil)
return
}
licenses = _licenses
}
resp := model.Licenses{

View File

@ -31,7 +31,6 @@ import (
"go.signoz.io/signoz/ee/query-service/rules"
baseauth "go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/migrate"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
licensepkg "go.signoz.io/signoz/ee/query-service/license"
@ -78,6 +77,7 @@ type ServerOptions struct {
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseLicensesV3 bool
}
// Server runs HTTP api service
@ -134,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
// initiate license manager
lm, err := licensepkg.StartManager("sqlite", localDB)
lm, err := licensepkg.StartManager("sqlite", localDB, serverOptions.UseLicensesV3)
if err != nil {
return nil, err
}
@ -270,6 +270,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
FluxInterval: fluxInterval,
Gateway: gatewayProxy,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseLicensesV3: serverOptions.UseLicensesV3,
}
apiHandler, err := api.NewAPIHandler(apiOpts)
@ -348,7 +349,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e
}
if user.User.OrgId == "" {
return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims"))
return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims"))
}
return user, nil
@ -765,8 +766,9 @@ func makeRulesManager(
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
PrepareTaskFunc: rules.PrepareTaskFunc,
PrepareTestRuleFunc: rules.TestNotification,
UseLogsNewSchema: useLogsNewSchema,
}
// create Manager

View File

@ -13,6 +13,7 @@ var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
var ZeusURL = GetOrDefaultEnv("ZEUS_URL", "ZeusURL")
func GetOrDefaultEnv(key string, fallback string) string {
v := os.Getenv(key)

View File

@ -13,3 +13,8 @@ type ActivationResponse struct {
ActivationId string `json:"ActivationId"`
PlanDetails string `json:"PlanDetails"`
}
type ValidateLicenseResponse struct {
Status status `json:"status"`
Data map[string]interface{} `json:"data"`
}

View File

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
@ -23,12 +24,14 @@ const (
)
type Client struct {
Prefix string
Prefix string
GatewayUrl string
}
func New() *Client {
return &Client{
Prefix: constants.LicenseSignozIo,
Prefix: constants.LicenseSignozIo,
GatewayUrl: constants.ZeusURL,
}
}
@ -116,6 +119,60 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError)
}
func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) {
// Creating an HTTP client with a timeout for better control
client := &http.Client{
Timeout: 10 * time.Second,
}
req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err)))
}
// Setting the custom header
req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey)
response, err := client.Do(req)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err)))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl)))
}
defer response.Body.Close()
switch response.StatusCode {
case 200:
a := ValidateLicenseResponse{}
err = json.Unmarshal(body, &a)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
}
license, err := model.NewLicenseV3(a.Data)
if err != nil {
return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3"))
}
return license, nil
case 400:
return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("bad request error received from %v", C.GatewayUrl)))
case 401:
return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl)))
default:
return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
fmt.Sprintf("internal request error received from %v", C.GatewayUrl)))
}
}
func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, POST, url, body)
if err != nil {

View File

@ -3,10 +3,12 @@ package license
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"go.signoz.io/signoz/ee/query-service/license/sqlite"
"go.signoz.io/signoz/ee/query-service/model"
@ -48,6 +50,34 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
return licenses, nil
}
func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) {
licensesData := []model.LicenseDB{}
licenseV3Data := []*model.LicenseV3{}
query := "SELECT id,key,data FROM licenses_v3"
err := r.db.Select(&licensesData, query)
if err != nil {
return nil, fmt.Errorf("failed to get licenses from db: %v", err)
}
for _, l := range licensesData {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
licenseV3Data = append(licenseV3Data, license)
}
return licenseV3Data, nil
}
// GetActiveLicense fetches the latest active license from DB.
// If the license is not present, expect a nil license and a nil error in the output.
func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) {
@ -79,6 +109,45 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel
return active, nil
}
func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) {
var err error
licenses := []model.LicenseDB{}
query := "SELECT id,key,data FROM licenses_v3"
err = r.db.Select(&licenses, query)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err))
}
var active *model.LicenseV3
for _, l := range licenses {
var licenseData map[string]interface{}
err := json.Unmarshal([]byte(l.Data), &licenseData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err)
}
license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData)
if err != nil {
return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err)
}
if active == nil &&
(license.ValidFrom != 0) &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
if active != nil &&
license.ValidFrom > active.ValidFrom &&
(license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) {
active = license
}
}
return active, nil
}
// InsertLicense inserts a new license in db
func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
@ -204,3 +273,59 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error {
}
return nil
}
// InsertLicenseV3 inserts a new license v3 in db
func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError {
query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)`
// licsense is the entity of zeus so putting the entire license here without defining schema
licenseData, err := json.Marshal(l.Data)
if err != nil {
return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err}
}
_, err = r.db.ExecContext(ctx,
query,
l.ID,
l.Key,
string(licenseData),
)
if err != nil {
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr))
return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr}
}
}
zap.L().Error("error in inserting license data: ", zap.Error(err))
return &model.ApiError{Typ: basemodel.ErrorExec, Err: err}
}
return nil
}
// UpdateLicenseV3 updates a new license v3 in db
func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error {
// the key and id for the license can't change so only update the data here!
query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;`
license, err := json.Marshal(l.Data)
if err != nil {
return fmt.Errorf("insert license failed: license marshal error")
}
_, err = r.db.ExecContext(ctx,
query,
license,
l.ID,
)
if err != nil {
zap.L().Error("error in updating license data: ", zap.Error(err))
return fmt.Errorf("failed to update license in db: %v", err)
}
return nil
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"sync"
@ -45,11 +46,12 @@ type Manager struct {
failedAttempts uint64
// keep track of active license and features
activeLicense *model.License
activeFeatures basemodel.FeatureSet
activeLicense *model.License
activeLicenseV3 *model.LicenseV3
activeFeatures basemodel.FeatureSet
}
func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) {
func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...basemodel.Feature) (*Manager, error) {
if LM != nil {
return LM, nil
}
@ -65,7 +67,7 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
repo: &repo,
}
if err := m.start(features...); err != nil {
if err := m.start(useLicensesV3, features...); err != nil {
return m, err
}
LM = m
@ -73,8 +75,14 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M
}
// start loads active license in memory and initiates validator
func (lm *Manager) start(features ...basemodel.Feature) error {
err := lm.LoadActiveLicense(features...)
func (lm *Manager) start(useLicensesV3 bool, features ...basemodel.Feature) error {
var err error
if useLicensesV3 {
err = lm.LoadActiveLicenseV3(features...)
} else {
err = lm.LoadActiveLicense(features...)
}
return err
}
@ -108,6 +116,31 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) {
go lm.Validator(context.Background())
}
}
func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) {
lm.mutex.Lock()
defer lm.mutex.Unlock()
if l == nil {
return
}
lm.activeLicenseV3 = l
lm.activeFeatures = append(l.Features, features...)
// set default features
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Panic("Couldn't activate features", zap.Error(err))
}
if !lm.validatorRunning {
// we want to make sure only one validator runs,
// we already have lock() so good to go
lm.validatorRunning = true
go lm.ValidatorV3(context.Background())
}
}
func setDefaultFeatures(lm *Manager) {
@ -137,6 +170,28 @@ func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error {
return nil
}
func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error {
active, err := lm.repo.GetActiveLicenseV3(context.Background())
if err != nil {
return err
}
if active != nil {
lm.SetActiveV3(active, features...)
} else {
zap.L().Info("No active license found, defaulting to basic plan")
// if no active license is found, we default to basic(free) plan with all default features
lm.activeFeatures = model.BasicPlan
setDefaultFeatures(lm)
err := lm.InitFeatures(lm.activeFeatures)
if err != nil {
zap.L().Error("Couldn't initialize features", zap.Error(err))
return err
}
}
return nil
}
func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicenses(ctx)
@ -163,6 +218,23 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a
return
}
func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) {
licenses, err := lm.repo.GetLicensesV3(ctx)
if err != nil {
return nil, model.InternalError(err)
}
for _, l := range licenses {
if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key {
l.IsCurrent = true
}
response = append(response, l)
}
return response, nil
}
// Validator validates license after an epoch of time
func (lm *Manager) Validator(ctx context.Context) {
defer close(lm.terminated)
@ -187,6 +259,30 @@ func (lm *Manager) Validator(ctx context.Context) {
}
}
// Validator validates license after an epoch of time
func (lm *Manager) ValidatorV3(ctx context.Context) {
defer close(lm.terminated)
tick := time.NewTicker(validationFrequency)
defer tick.Stop()
lm.ValidateV3(ctx)
for {
select {
case <-lm.done:
return
default:
select {
case <-lm.done:
return
case <-tick.C:
lm.ValidateV3(ctx)
}
}
}
}
// Validate validates the current active license
func (lm *Manager) Validate(ctx context.Context) (reterr error) {
zap.L().Info("License validation started")
@ -254,6 +350,54 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) {
return nil
}
// todo[vikrantgupta25]: check the comparison here between old and new license!
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
if apiError != nil {
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
return apiError
}
err := lm.repo.UpdateLicenseV3(ctx, license)
if err != nil {
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
}
lm.SetActiveV3(license)
return nil
}
func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) {
zap.L().Info("License validation started")
if lm.activeLicenseV3 == nil {
return nil
}
defer func() {
lm.mutex.Lock()
lm.lastValidated = time.Now().Unix()
if reterr != nil {
zap.L().Error("License validation completed with error", zap.Error(reterr))
atomic.AddUint64(&lm.failedAttempts, 1)
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
map[string]interface{}{"err": reterr.Error()}, "", true, false)
} else {
zap.L().Info("License validation completed with no errors")
}
lm.mutex.Unlock()
}()
err := lm.RefreshLicense(ctx)
if err != nil {
return err
}
return nil
}
// Activate activates a license key with signoz server
func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
defer func() {
@ -298,6 +442,35 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m
return l, nil
}
func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) {
defer func() {
if errResponse != nil {
userEmail, err := auth.GetEmailFromJwt(ctx)
if err == nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false)
}
}
}()
license, apiError := validate.ValidateLicenseV3(licenseKey)
if apiError != nil {
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
return nil, apiError
}
// insert the new license to the sqlite db
err := lm.repo.InsertLicenseV3(ctx, license)
if err != nil {
zap.L().Error("failed to activate license", zap.Error(err))
return nil, err
}
// license is valid, activate it
lm.SetActiveV3(license)
return license, nil
}
// CheckFeature will be internally used by backend routines
// for feature gating
func (lm *Manager) CheckFeature(featureKey string) error {

View File

@ -48,5 +48,16 @@ func InitDB(db *sqlx.DB) error {
return fmt.Errorf("error in creating feature_status table: %s", err.Error())
}
table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
data TEXT
);`
_, err = db.Exec(table_schema)
if err != nil {
return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error())
}
return nil
}

View File

@ -94,6 +94,7 @@ func main() {
var cluster string
var useLogsNewSchema bool
var useLicensesV3 bool
var cacheConfigPath, fluxInterval string
var enableQueryServiceLogOTLPExport bool
var preferSpanMetrics bool
@ -104,6 +105,7 @@ func main() {
var gatewayUrl string
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses")
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)")
flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
@ -143,6 +145,7 @@ func main() {
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseLicensesV3: useLicensesV3,
}
// Read the jwt secret key

View File

@ -46,6 +46,13 @@ func BadRequest(err error) *ApiError {
}
}
func Unauthorized(err error) *ApiError {
return &ApiError{
Typ: basemodel.ErrorUnauthorized,
Err: err,
}
}
// BadRequestStr returns a ApiError object of bad request for string input
func BadRequestStr(s string) *ApiError {
return &ApiError{

View File

@ -3,6 +3,8 @@ package model
import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
@ -104,3 +106,144 @@ type SubscriptionServerResp struct {
Status string `json:"status"`
Data Licenses `json:"data"`
}
type Plan struct {
Name string `json:"name"`
}
type LicenseDB struct {
ID string `json:"id"`
Key string `json:"key"`
Data string `json:"data"`
}
type LicenseV3 struct {
ID string
Key string
Data map[string]interface{}
PlanName string
Features basemodel.FeatureSet
Status string
IsCurrent bool
ValidFrom int64
ValidUntil int64
}
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
var zeroValue T
if val, ok := data[key]; ok {
if value, ok := val.(T); ok {
return value, nil
}
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
}
return zeroValue, fmt.Errorf("%s key is missing", key)
}
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
var features basemodel.FeatureSet
// extract id from data
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
if err != nil {
return nil, err
}
delete(data, "id")
// extract key from data
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
if err != nil {
return nil, err
}
delete(data, "key")
// extract status from data
status, err := extractKeyFromMapStringInterface[string](data, "status")
if err != nil {
return nil, err
}
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
if err != nil {
return nil, err
}
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
if err != nil {
return nil, err
}
// if license status is inactive then default it to basic
if status == LicenseStatusInactive {
planName = PlanNameBasic
}
featuresFromZeus := basemodel.FeatureSet{}
if _features, ok := data["features"]; ok {
featuresData, err := json.Marshal(_features)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal features data")
}
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal features data")
}
}
switch planName {
case PlanNameTeams:
features = append(features, ProPlan...)
case PlanNameEnterprise:
features = append(features, EnterprisePlan...)
case PlanNameBasic:
features = append(features, BasicPlan...)
default:
features = append(features, BasicPlan...)
}
if len(featuresFromZeus) > 0 {
for _, feature := range featuresFromZeus {
exists := false
for i, existingFeature := range features {
if existingFeature.Name == feature.Name {
features[i] = feature // Replace existing feature
exists = true
break
}
}
if !exists {
features = append(features, feature) // Append if it doesn't exist
}
}
}
data["features"] = features
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
if err != nil {
_validFrom = 0
}
validFrom := int64(_validFrom)
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
if err != nil {
_validUntil = 0
}
validUntil := int64(_validUntil)
return &LicenseV3{
ID: licenseID,
Key: licenseKey,
Data: data,
PlanName: planName,
Features: features,
ValidFrom: validFrom,
ValidUntil: validUntil,
Status: status,
}, nil
}
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
licenseDataWithIdAndKey := data
licenseDataWithIdAndKey["id"] = id
licenseDataWithIdAndKey["key"] = key
return NewLicenseV3(licenseDataWithIdAndKey)
}

View File

@ -0,0 +1,170 @@
package model
import (
"encoding/json"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
)
func TestNewLicenseV3(t *testing.T) {
testCases := []struct {
name string
data []byte
pass bool
expected *LicenseV3
error error
}{
{
name: "Error for missing license id",
data: []byte(`{}`),
pass: false,
error: errors.New("id key is missing"),
},
{
name: "Error for license id not being a valid string",
data: []byte(`{"id": 10}`),
pass: false,
error: errors.New("id key is not a valid string"),
},
{
name: "Error for missing license key",
data: []byte(`{"id":"does-not-matter"}`),
pass: false,
error: errors.New("key key is missing"),
},
{
name: "Error for invalid string license key",
data: []byte(`{"id":"does-not-matter","key":10}`),
pass: false,
error: errors.New("key key is not a valid string"),
},
{
name: "Error for missing license status",
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
pass: false,
error: errors.New("status key is missing"),
},
{
name: "Error for invalid string license status",
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
pass: false,
error: errors.New("status key is not a valid string"),
},
{
name: "Error for missing license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
pass: false,
error: errors.New("plan key is missing"),
},
{
name: "Error for invalid json license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
pass: false,
error: errors.New("plan key is not a valid map[string]interface {}"),
},
{
name: "Error for invalid license plan",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
pass: false,
error: errors.New("name key is missing"),
},
{
name: "Parse the entire license properly",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "ACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameTeams,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "Fallback to basic plan if license status is inactive",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"category": "FREE",
"status": "INACTIVE",
"valid_from": float64(1730899309),
"valid_until": float64(-1),
},
PlanName: PlanNameBasic,
ValidFrom: 1730899309,
ValidUntil: -1,
Status: "INACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
{
name: "fallback states for validFrom and validUntil",
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
pass: true,
expected: &LicenseV3{
ID: "does-not-matter",
Key: "does-not-matter-key",
Data: map[string]interface{}{
"plan": map[string]interface{}{
"name": "TEAMS",
},
"valid_from": 1234.456,
"valid_until": 5678.567,
"category": "FREE",
"status": "ACTIVE",
},
PlanName: PlanNameTeams,
ValidFrom: 1234,
ValidUntil: 5678,
Status: "ACTIVE",
IsCurrent: false,
Features: model.FeatureSet{},
},
},
}
for _, tc := range testCases {
var licensePayload map[string]interface{}
err := json.Unmarshal(tc.data, &licensePayload)
require.NoError(t, err)
license, err := NewLicenseV3(licensePayload)
if license != nil {
license.Features = make(model.FeatureSet, 0)
delete(license.Data, "features")
}
if tc.pass {
require.NoError(t, err)
require.NotNil(t, license)
assert.Equal(t, tc.expected, license)
} else {
require.Error(t, err)
assert.EqualError(t, err, tc.error.Error())
require.Nil(t, license)
}
}
}

View File

@ -9,6 +9,17 @@ const SSO = "SSO"
const Basic = "BASIC_PLAN"
const Pro = "PRO_PLAN"
const Enterprise = "ENTERPRISE_PLAN"
var (
PlanNameEnterprise = "ENTERPRISE"
PlanNameTeams = "TEAMS"
PlanNameBasic = "BASIC"
)
var (
LicenseStatusInactive = "INACTIVE"
)
const DisableUpsell = "DISABLE_UPSELL"
const Onboarding = "ONBOARDING"
const ChatSupport = "CHAT_SUPPORT"

View File

@ -1,10 +1,15 @@
package rules
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
basemodel "go.signoz.io/signoz/pkg/query-service/model"
baserules "go.signoz.io/signoz/pkg/query-service/rules"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
@ -79,6 +84,106 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
return task, nil
}
// TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any)
func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.ApiError) {
ctx := context.Background()
if opts.Rule == nil {
return 0, basemodel.BadRequest(fmt.Errorf("rule is required"))
}
parsedRule := opts.Rule
var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
alertname = uuid.New().String()
}
// append name to indicate this is test alert
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix)
var rule baserules.Rule
var err error
if parsedRule.RuleType == baserules.RuleTypeThreshold {
// add special labels for test alerts
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
// create a threshold rule
rule, err = baserules.NewThresholdRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeProm {
// create promql rule
rule, err = baserules.NewPromRule(
alertname,
parsedRule,
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == baserules.RuleTypeAnomaly {
// create anomaly rule
rule, err = NewAnomalyRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.Cache,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, basemodel.BadRequest(err)
}
} else {
return 0, basemodel.BadRequest(fmt.Errorf("failed to derive ruletype with given information"))
}
// set timestamp to current utc time
ts := time.Now().UTC()
count, err := rule.Eval(ctx, ts)
if err != nil {
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
}
alertsFound, ok := count.(int)
if !ok {
return 0, basemodel.InternalError(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
return alertsFound, nil
}
// newTask returns an appropriate group for
// rule type
func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task {

View File

@ -42,7 +42,7 @@
"@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "7.102.1",
"@sentry/webpack-plugin": "2.16.0",
"@signozhq/design-tokens": "0.0.8",
"@signozhq/design-tokens": "1.1.4",
"@uiw/react-md-editor": "3.23.5",
"@visx/group": "3.3.0",
"@visx/shape": "3.5.0",

View File

@ -0,0 +1,24 @@
{
"metricGraphCategory": {
"brokerMetrics": {
"title": "Broker Metrics",
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
},
"consumerMetrics": {
"title": "Consumer Metrics",
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
},
"producerMetrics": {
"title": "Producer Metrics",
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
},
"brokerJVMMetrics": {
"title": "Broker JVM Metrics",
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
},
"partitionMetrics": {
"title": "Partition Metrics",
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
}
}
}

View File

@ -1,30 +1,54 @@
{
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details"
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
}
"breadcrumb": "Messaging Queues",
"header": "Kafka / Overview",
"overview": {
"title": "Start sending data in as little as 20 minutes",
"subtitle": "Connect and Monitor Your Data Streams"
},
"configureConsumer": {
"title": "Configure Consumer",
"description": "Add consumer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"configureProducer": {
"title": "Configure Producer",
"description": "Add producer data sources to gain insights and enhance monitoring.",
"button": "Get Started"
},
"monitorKafka": {
"title": "Monitor kafka",
"description": "Add your Kafka source to gain insights and enhance activity tracking.",
"button": "Get Started"
},
"summarySection": {
"viewDetailsButton": "View Details",
"consumer": {
"title": "Consumer lag view",
"description": "Connect and Monitor Your Data Streams"
},
"producer": {
"title": "Producer latency view",
"description": "Connect and Monitor Your Data Streams"
},
"partition": {
"title": "Partition Latency view",
"description": "Connect and Monitor Your Data Streams"
},
"dropRate": {
"title": "Drop Rate view",
"description": "Connect and Monitor Your Data Streams"
},
"metricPage": {
"title": "Metric View",
"description": "Connect and Monitor Your Data Streams"
}
},
"confirmModal": {
"content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.",
"okText": "Proceed"
},
"overviewSummarySection": {
"title": "Monitor Your Data Streams",
"subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time."
}
}

View File

@ -0,0 +1,24 @@
{
"metricGraphCategory": {
"brokerMetrics": {
"title": "Broker Metrics",
"description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics"
},
"consumerMetrics": {
"title": "Consumer Metrics",
"description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed."
},
"producerMetrics": {
"title": "Producer Metrics",
"description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery."
},
"brokerJVMMetrics": {
"title": "Broker JVM Metrics",
"description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate."
},
"partitionMetrics": {
"title": "Partition Metrics",
"description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas."
}
}
}

View File

@ -37,6 +37,10 @@
"dropRate": {
"title": "Drop Rate view",
"description": "Connect and Monitor Your Data Streams"
},
"metricPage": {
"title": "Metric View",
"description": "Connect and Monitor Your Data Streams"
}
},
"confirmModal": {

View File

@ -22,6 +22,7 @@ import AppActions from 'types/actions';
import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
import { Organization } from 'types/api/user/getOrganization';
import AppReducer from 'types/reducer/app';
import { isCloudUser } from 'utils/app';
import { routePermission } from 'utils/permission';
import routes, {
@ -76,6 +77,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const { t } = useTranslation(['common']);
const isCloudUserVal = isCloudUser();
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
const dispatch = useDispatch<Dispatch<AppActions>>();
@ -143,6 +146,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const handleRedirectForOrgOnboarding = (key: string): void => {
if (
isLoggedInState &&
isCloudUserVal &&
!isFetchingOrgPreferences &&
!isLoadingOrgUsers &&
!isEmpty(orgUsers?.payload) &&
@ -158,6 +162,10 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
history.push(ROUTES.ONBOARDING);
}
}
if (!isCloudUserVal && key === 'ONBOARDING') {
history.push(ROUTES.APPLICATION);
}
};
const handleUserLoginIfTokenPresent = async (
@ -250,7 +258,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const handleRouting = (): void => {
const showOrgOnboarding = shouldShowOnboarding();
if (showOrgOnboarding && !isOnboardingComplete) {
if (showOrgOnboarding && !isOnboardingComplete && isCloudUserVal) {
history.push(ROUTES.ONBOARDING);
} else {
history.push(ROUTES.APPLICATION);

View File

@ -18,4 +18,5 @@ export const REACT_QUERY_KEY = {
GET_ALL_ALLERTS: 'GET_ALL_ALLERTS',
REMOVE_ALERT_RULE: 'REMOVE_ALERT_RULE',
DUPLICATE_ALERT_RULE: 'DUPLICATE_ALERT_RULE',
UPDATE_ALERT_RULE: 'UPDATE_ALERT_RULE',
};

View File

@ -57,6 +57,7 @@ export const alertDefaults: AlertDef = {
},
annotations: defaultAnnotations,
evalWindow: defaultEvalWindow,
alert: '',
};
export const anamolyAlertDefaults: AlertDef = {
@ -101,6 +102,7 @@ export const anamolyAlertDefaults: AlertDef = {
},
annotations: defaultAnnotations,
evalWindow: defaultEvalWindow,
alert: '',
};
export const logAlertDefaults: AlertDef = {
@ -132,6 +134,7 @@ export const logAlertDefaults: AlertDef = {
},
annotations: defaultAnnotations,
evalWindow: defaultEvalWindow,
alert: '',
};
export const traceAlertDefaults: AlertDef = {
@ -163,6 +166,7 @@ export const traceAlertDefaults: AlertDef = {
},
annotations: defaultAnnotations,
evalWindow: defaultEvalWindow,
alert: '',
};
export const exceptionAlertDefaults: AlertDef = {
@ -194,6 +198,7 @@ export const exceptionAlertDefaults: AlertDef = {
},
annotations: defaultAnnotations,
evalWindow: defaultEvalWindow,
alert: '',
};
export const ALERTS_VALUES_MAP: Record<AlertTypes, AlertDef> = {

View File

@ -1,4 +1,4 @@
import { Color } from '@signozhq/design-tokens';
import { Color, ColorType } from '@signozhq/design-tokens';
import { showErrorNotification } from 'components/ExplorerCard/utils';
import { LOCALSTORAGE } from 'constants/localStorage';
import { QueryParams } from 'constants/query';
@ -8,7 +8,7 @@ import { DataSource } from 'types/common/queryBuilder';
import { SaveNewViewHandlerProps } from './types';
export const getRandomColor = (): Color => {
export const getRandomColor = (): ColorType => {
const colorKeys = Object.keys(Color) as (keyof typeof Color)[];
const randomKey = colorKeys[Math.floor(Math.random() * colorKeys.length)];
return Color[randomKey];

View File

@ -53,6 +53,7 @@ import {
QueryFunctionProps,
} from 'types/api/queryBuilder/queryBuilderData';
import { EQueryType } from 'types/common/dashboard';
import { DataSource } from 'types/common/queryBuilder';
import { GlobalReducer } from 'types/reducer/globalTime';
import BasicInfo from './BasicInfo';
@ -105,6 +106,11 @@ function FormAlertRules({
const location = useLocation();
const queryParams = new URLSearchParams(location.search);
const dataSource = useMemo(
() => urlQuery.get(QueryParams.alertType) as DataSource,
[urlQuery],
);
// In case of alert the panel types should always be "Graph" only
const panelType = PANEL_TYPES.TIME_SERIES;
@ -114,13 +120,12 @@ function FormAlertRules({
handleSetQueryData,
handleRunQuery,
handleSetConfig,
initialDataSource,
redirectWithQueryBuilderData,
} = useQueryBuilder();
useEffect(() => {
handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, initialDataSource);
}, [handleSetConfig, initialDataSource, panelType]);
handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, dataSource);
}, [handleSetConfig, dataSource, panelType]);
// use query client
const ruleCache = useQueryClient();

View File

@ -18,8 +18,13 @@
font-style: normal;
font-weight: var(--font-weight-normal);
line-height: 28px;
/* 155.556% */
letter-spacing: -0.09px;
width: 72%; // arbitrary number to match input width
display: flex;
align-items: center;
gap: 8px;
justify-content: space-between;
}
.subtitle {
@ -356,6 +361,8 @@
flex: 1;
.heading {
margin-bottom: 8px;
.title {
font-size: 12px;
}
@ -370,6 +377,18 @@
.ant-input-number {
width: 80%;
}
.no-limit {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 24px;
font-weight: 700;
font-size: 12px;
color: var(--bg-forest-400);
}
}
.signal-limit-view-mode {

View File

@ -12,6 +12,7 @@ import {
Modal,
Row,
Select,
Switch,
Table,
TablePaginationConfig,
TableProps as AntDTableProps,
@ -34,7 +35,7 @@ import dayjs, { Dayjs } from 'dayjs';
import { useGetAllIngestionsKeys } from 'hooks/IngestionKeys/useGetAllIngestionKeys';
import useDebouncedFn from 'hooks/useDebouncedFunction';
import { useNotifications } from 'hooks/useNotifications';
import { isNil } from 'lodash-es';
import { isNil, isUndefined } from 'lodash-es';
import {
ArrowUpRight,
CalendarClock,
@ -395,84 +396,6 @@ function MultiIngestionSettings(): JSX.Element {
const getFormattedTime = (date: string): string =>
dayjs(date).format('MMM DD,YYYY, hh:mm a');
const handleAddLimit = (
APIKey: IngestionKeyProps,
signalName: string,
): void => {
setActiveSignal({
id: signalName,
signal: signalName,
config: {},
});
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
keyID: APIKey.id,
signal: signalName,
config: {
day: {
size: gbToBytes(dailyLimit),
},
second: {
size: gbToBytes(secondsLimit),
},
},
};
createLimitForIngestionKey(payload);
};
const handleUpdateLimit = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveSignal(signal);
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
limitID: signal.id,
signal: signal.signal,
config: {
day: {
size: gbToBytes(dailyLimit),
},
second: {
size: gbToBytes(secondsLimit),
},
},
};
updateLimitForIngestionKey(payload);
};
const bytesToGb = (size: number | undefined): number => {
if (!size) {
return 0;
}
return size / BYTES;
};
const enableEditLimitMode = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveAPIKey(APIKey);
setActiveSignal(signal);
addEditLimitForm.setFieldsValue({
dailyLimit: bytesToGb(signal?.config?.day?.size || 0),
secondsLimit: bytesToGb(signal?.config?.second?.size || 0),
});
setIsEditAddLimitOpen(true);
};
const onDeleteLimitHandler = (): void => {
if (activeSignal && activeSignal?.id) {
deleteLimitForKey(activeSignal.id);
}
};
const showDeleteLimitModal = (
APIKey: IngestionKeyProps,
limit: LimitProps,
@ -496,6 +419,131 @@ function MultiIngestionSettings(): JSX.Element {
addEditLimitForm.resetFields();
};
const handleAddLimit = (
APIKey: IngestionKeyProps,
signalName: string,
): void => {
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
keyID: APIKey.id,
signal: signalName,
config: {},
};
if (!isUndefined(dailyLimit)) {
payload.config = {
day: {
size: gbToBytes(dailyLimit),
},
};
}
if (!isUndefined(secondsLimit)) {
payload.config = {
...payload.config,
second: {
size: gbToBytes(secondsLimit),
},
};
}
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
// No need to save as no limit is provided, close the edit view and reset active signal and api key
setActiveSignal(null);
setActiveAPIKey(null);
setIsEditAddLimitOpen(false);
setUpdatedTags([]);
hideAddViewModal();
setHasCreateLimitForIngestionKeyError(false);
return;
}
createLimitForIngestionKey(payload);
};
const handleUpdateLimit = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
const { dailyLimit, secondsLimit } = addEditLimitForm.getFieldsValue();
const payload = {
limitID: signal.id,
signal: signal.signal,
config: {},
};
if (isUndefined(dailyLimit) && isUndefined(secondsLimit)) {
showDeleteLimitModal(APIKey, signal);
return;
}
if (!isUndefined(dailyLimit)) {
payload.config = {
day: {
size: gbToBytes(dailyLimit),
},
};
}
if (!isUndefined(secondsLimit)) {
payload.config = {
...payload.config,
second: {
size: gbToBytes(secondsLimit),
},
};
}
updateLimitForIngestionKey(payload);
};
const bytesToGb = (size: number | undefined): number => {
if (!size) {
return 0;
}
return size / BYTES;
};
const enableEditLimitMode = (
APIKey: IngestionKeyProps,
signal: LimitProps,
): void => {
setActiveAPIKey(APIKey);
setActiveSignal({
...signal,
config: {
...signal.config,
day: {
...signal.config?.day,
enabled: !isNil(signal?.config?.day?.size),
},
second: {
...signal.config?.second,
enabled: !isNil(signal?.config?.second?.size),
},
},
});
addEditLimitForm.setFieldsValue({
dailyLimit: bytesToGb(signal?.config?.day?.size || 0),
secondsLimit: bytesToGb(signal?.config?.second?.size || 0),
enableDailyLimit: !isNil(signal?.config?.day?.size),
enableSecondLimit: !isNil(signal?.config?.second?.size),
});
setIsEditAddLimitOpen(true);
};
const onDeleteLimitHandler = (): void => {
if (activeSignal && activeSignal?.id) {
deleteLimitForKey(activeSignal.id);
}
};
const columns: AntDTableProps<IngestionKeyProps>['columns'] = [
{
title: 'Ingestion Key',
@ -684,50 +732,108 @@ function MultiIngestionSettings(): JSX.Element {
<div className="signal-limit-edit-mode">
<div className="daily-limit">
<div className="heading">
<div className="title"> Daily limit </div>
<div className="title">
Daily limit
<div className="limit-enable-disable-toggle">
<Form.Item name="enableDailyLimit">
<Switch
size="small"
checked={activeSignal?.config?.day?.enabled}
onChange={(value): void => {
setActiveSignal({
...activeSignal,
config: {
...activeSignal.config,
day: {
...activeSignal.config?.day,
enabled: value,
},
},
});
}}
/>
</Form.Item>
</div>
</div>
<div className="subtitle">
Add a limit for data ingested daily{' '}
Add a limit for data ingested daily
</div>
</div>
<div className="size">
<Form.Item name="dailyLimit">
<InputNumber
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
{activeSignal?.config?.day?.enabled ? (
<Form.Item name="dailyLimit" key="dailyLimit">
<InputNumber
disabled={!activeSignal?.config?.day?.enabled}
key="dailyLimit"
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
) : (
<div className="no-limit">
<Infinity size={16} /> NO LIMIT
</div>
)}
</div>
</div>
<div className="second-limit">
<div className="heading">
<div className="title"> Per Second limit </div>
<div className="title">
Per Second limit{' '}
<div className="limit-enable-disable-toggle">
<Form.Item name="enableSecondLimit">
<Switch
size="small"
checked={activeSignal?.config?.second?.enabled}
onChange={(value): void => {
setActiveSignal({
...activeSignal,
config: {
...activeSignal.config,
second: {
...activeSignal.config?.second,
enabled: value,
},
},
});
}}
/>
</Form.Item>
</div>
</div>
<div className="subtitle">
{' '}
Add a limit for data ingested every second{' '}
Add a limit for data ingested every second
</div>
</div>
<div className="size">
<Form.Item name="secondsLimit">
<InputNumber
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
{activeSignal?.config?.second?.enabled ? (
<Form.Item name="secondsLimit" key="secondsLimit">
<InputNumber
key="secondsLimit"
disabled={!activeSignal?.config?.second?.enabled}
addonAfter={
<Select defaultValue="GiB" disabled>
<Option value="TiB"> TiB</Option>
<Option value="GiB"> GiB</Option>
<Option value="MiB"> MiB </Option>
<Option value="KiB"> KiB </Option>
</Select>
}
/>
</Form.Item>
) : (
<div className="no-limit">
<Infinity size={16} /> NO LIMIT
</div>
)}
</div>
</div>
</div>

View File

@ -82,9 +82,8 @@ function ImportJSON({
const dashboardData = JSON.parse(editorValue) as DashboardData;
// Add validation for uuid
if (dashboardData.uuid !== undefined && dashboardData.uuid.trim() === '') {
// silently remove uuid if it is empty
// Remove uuid from the dashboard data, in all cases - empty, duplicate or any valid not duplicate uuid
if (dashboardData.uuid !== undefined) {
delete dashboardData.uuid;
}

View File

@ -202,6 +202,7 @@ function LogsExplorerViews({
id: 'severity_text--string----true',
},
],
legend: '{{severity_text}}',
};
const modifiedQuery: Query = {

View File

@ -58,12 +58,17 @@ export const databaseCallsRPS = ({
const legends = [legend];
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
groupBy,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@ -213,12 +213,17 @@ export const externalCallRpsByAddress = ({
const legends = [legend];
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
groupBy,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@ -25,6 +25,8 @@ export const getQueryBuilderQueries = ({
aggregateOperator,
dataSource,
queryNameAndExpression,
timeAggregateOperators,
spaceAggregateOperators,
}: BuilderQueriesProps): QueryBuilderData => ({
queryFormulas: [],
queryData: autocompleteData.map((item, index) => {
@ -50,6 +52,8 @@ export const getQueryBuilderQueries = ({
op: 'AND',
},
reduceTo: 'avg',
spaceAggregation: spaceAggregateOperators[index],
timeAggregation: timeAggregateOperators[index],
dataSource,
};

View File

@ -83,6 +83,17 @@ export const latency = ({
const dataSource = isSpanMetricEnable ? DataSource.METRICS : DataSource.TRACES;
const queryNameAndExpression = QUERYNAME_AND_EXPRESSION;
const timeAggregateOperators = [
MetricAggregateOperator.EMPTY,
MetricAggregateOperator.EMPTY,
MetricAggregateOperator.EMPTY,
];
const spaceAggregateOperators = [
MetricAggregateOperator.P50,
MetricAggregateOperator.P90,
MetricAggregateOperator.P99,
];
return getQueryBuilderQueries({
autocompleteData,
legends,
@ -90,6 +101,8 @@ export const latency = ({
aggregateOperator,
dataSource,
queryNameAndExpression,
timeAggregateOperators,
spaceAggregateOperators,
});
};
@ -510,11 +523,16 @@ export const operationPerSec = ({
const legends = OPERATION_LEGENDS;
const dataSource = DataSource.METRICS;
const timeAggregateOperators = [MetricAggregateOperator.RATE];
const spaceAggregateOperators = [MetricAggregateOperator.SUM];
return getQueryBuilderQueries({
autocompleteData,
legends,
filterItems,
dataSource,
timeAggregateOperators,
spaceAggregateOperators,
});
};

View File

@ -29,6 +29,8 @@ export interface BuilderQueriesProps {
aggregateOperator?: string[];
dataSource: DataSource;
queryNameAndExpression?: string[];
timeAggregateOperators: MetricAggregateOperator[];
spaceAggregateOperators: MetricAggregateOperator[];
}
export interface BuilderQuerieswithFormulaProps {

View File

@ -2,18 +2,27 @@
import { DownloadOptions } from 'container/Download/Download.types';
import { MenuItemKeys } from 'container/GridCardLayout/WidgetHeader/contants';
import {
MetricAggregateOperator,
TracesAggregatorOperator,
} from 'types/common/queryBuilder';
export const legend = {
address: '{{address}}',
};
export const QUERYNAME_AND_EXPRESSION = ['A', 'B', 'C'];
export const LATENCY_AGGREGATEOPERATOR = ['p50', 'p90', 'p99'];
export const LATENCY_AGGREGATEOPERATOR_SPAN_METRICS = [
'hist_quantile_50',
'hist_quantile_90',
'hist_quantile_99',
export const LATENCY_AGGREGATEOPERATOR = [
TracesAggregatorOperator.P50,
TracesAggregatorOperator.P90,
TracesAggregatorOperator.P99,
];
export const LATENCY_AGGREGATEOPERATOR_SPAN_METRICS = [
MetricAggregateOperator.P50,
MetricAggregateOperator.P90,
MetricAggregateOperator.P99,
];
export const OPERATION_LEGENDS = ['Operations'];
export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts];
@ -21,8 +30,21 @@ export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts];
export enum FORMULA {
ERROR_PERCENTAGE = 'A*100/B',
DATABASE_CALLS_AVG_DURATION = 'A/B',
// The apdex formula is (satisfied_count + 0.5 * tolerating_count + 0 * frustating_count) / total_count
// The satisfied_count is B, tolerating_count is C, total_count is A
// But why do we have (B+C)/2 instead of B + C/2?
// The way we issue the query is latency <= threshold, which means we over count i.e
// query B => durationNano <= 500ms
// query C => durationNano <= 2000ms
// Since <= 2000ms includes <= 500ms, we over count, to correct we subtract B/2
// so the full expression would be (B + C/2) - B/2 = (B+C)/2
APDEX_TRACES = '((B + C)/2)/A',
APDEX_DELTA_SPAN_METRICS = '((B + C)/2)/A',
// Does the same not apply for delta span metrics?
// No, because the delta metrics store the counts just for the current bucket
// so we don't need to subtract anything
APDEX_DELTA_SPAN_METRICS = '(B + C)/A',
// Cumulative span metrics store the counts for all buckets
// so we need to subtract B/2 to correct the over counting
APDEX_CUMULATIVE_SPAN_METRICS = '((B + C)/2)/A',
}

View File

@ -2,82 +2,90 @@ import './ActionButtons.styles.scss';
import { Color } from '@signozhq/design-tokens';
import { Divider, Dropdown, MenuProps, Switch, Tooltip } from 'antd';
import { QueryParams } from 'constants/query';
import ROUTES from 'constants/routes';
import { useIsDarkMode } from 'hooks/useDarkMode';
import useUrlQuery from 'hooks/useUrlQuery';
import history from 'lib/history';
import { Copy, Ellipsis, PenLine, Trash2 } from 'lucide-react';
import {
useAlertRuleDelete,
useAlertRuleDuplicate,
useAlertRuleStatusToggle,
useAlertRuleUpdate,
} from 'pages/AlertDetails/hooks';
import CopyToClipboard from 'periscope/components/CopyToClipboard';
import { useAlertRule } from 'providers/Alert';
import React, { useEffect, useState } from 'react';
import { useCallback, useEffect, useState } from 'react';
import { CSSProperties } from 'styled-components';
import { AlertDef } from 'types/api/alerts/def';
import { AlertHeaderProps } from '../AlertHeader';
import RenameModal from './RenameModal';
const menuItemStyle: CSSProperties = {
fontSize: '14px',
letterSpacing: '0.14px',
};
function AlertActionButtons({
ruleId,
alertDetails,
setUpdatedName,
}: {
ruleId: string;
alertDetails: AlertHeaderProps['alertDetails'];
setUpdatedName: (name: string) => void;
}): JSX.Element {
const { alertRuleState, setAlertRuleState } = useAlertRule();
const { handleAlertStateToggle } = useAlertRuleStatusToggle({ ruleId });
const [intermediateName, setIntermediateName] = useState<string>(
alertDetails.alert,
);
const [isRenameAlertOpen, setIsRenameAlertOpen] = useState<boolean>(false);
const isDarkMode = useIsDarkMode();
const { handleAlertStateToggle } = useAlertRuleStatusToggle({ ruleId });
const { handleAlertDuplicate } = useAlertRuleDuplicate({
alertDetails: (alertDetails as unknown) as AlertDef,
});
const { handleAlertDelete } = useAlertRuleDelete({ ruleId: Number(ruleId) });
const { handleAlertUpdate, isLoading } = useAlertRuleUpdate({
alertDetails: (alertDetails as unknown) as AlertDef,
setUpdatedName,
intermediateName,
});
const params = useUrlQuery();
const handleRename = useCallback(() => {
setIsRenameAlertOpen(true);
}, []);
const handleRename = React.useCallback(() => {
params.set(QueryParams.ruleId, String(ruleId));
history.push(`${ROUTES.ALERT_OVERVIEW}?${params.toString()}`);
}, [params, ruleId]);
const onNameChangeHandler = useCallback(() => {
handleAlertUpdate();
setIsRenameAlertOpen(false);
}, [handleAlertUpdate]);
const menu: MenuProps['items'] = React.useMemo(
() => [
{
key: 'rename-rule',
label: 'Rename',
icon: <PenLine size={16} color={Color.BG_VANILLA_400} />,
onClick: (): void => handleRename(),
style: menuItemStyle,
const menuItems: MenuProps['items'] = [
{
key: 'rename-rule',
label: 'Rename',
icon: <PenLine size={16} color={Color.BG_VANILLA_400} />,
onClick: handleRename,
style: menuItemStyle,
},
{
key: 'duplicate-rule',
label: 'Duplicate',
icon: <Copy size={16} color={Color.BG_VANILLA_400} />,
onClick: handleAlertDuplicate,
style: menuItemStyle,
},
{
key: 'delete-rule',
label: 'Delete',
icon: <Trash2 size={16} color={Color.BG_CHERRY_400} />,
onClick: handleAlertDelete,
style: {
...menuItemStyle,
color: Color.BG_CHERRY_400,
},
{
key: 'duplicate-rule',
label: 'Duplicate',
icon: <Copy size={16} color={Color.BG_VANILLA_400} />,
onClick: (): void => handleAlertDuplicate(),
style: menuItemStyle,
},
{ type: 'divider' },
{
key: 'delete-rule',
label: 'Delete',
icon: <Trash2 size={16} color={Color.BG_CHERRY_400} />,
onClick: (): void => handleAlertDelete(),
style: {
...menuItemStyle,
color: Color.BG_CHERRY_400,
},
},
],
[handleAlertDelete, handleAlertDuplicate, handleRename],
);
const isDarkMode = useIsDarkMode();
},
];
// state for immediate UI feedback rather than waiting for onSuccess of handleAlertStateTiggle to updating the alertRuleState
const [isAlertRuleDisabled, setIsAlertRuleDisabled] = useState<
@ -95,35 +103,48 @@ function AlertActionButtons({
// eslint-disable-next-line react-hooks/exhaustive-deps
useEffect(() => (): void => setAlertRuleState(undefined), []);
const toggleAlertRule = useCallback(() => {
setIsAlertRuleDisabled((prev) => !prev);
handleAlertStateToggle();
}, [handleAlertStateToggle]);
return (
<div className="alert-action-buttons">
<Tooltip title={alertRuleState ? 'Enable alert' : 'Disable alert'}>
{isAlertRuleDisabled !== undefined && (
<Switch
size="small"
onChange={(): void => {
setIsAlertRuleDisabled((prev) => !prev);
handleAlertStateToggle();
}}
checked={!isAlertRuleDisabled}
/>
)}
</Tooltip>
<CopyToClipboard textToCopy={window.location.href} />
<Divider type="vertical" />
<Dropdown trigger={['click']} menu={{ items: menu }}>
<Tooltip title="More options">
<Ellipsis
size={16}
color={isDarkMode ? Color.BG_VANILLA_400 : Color.BG_INK_400}
cursor="pointer"
className="dropdown-icon"
/>
<>
<div className="alert-action-buttons">
<Tooltip title={alertRuleState ? 'Enable alert' : 'Disable alert'}>
{isAlertRuleDisabled !== undefined && (
<Switch
size="small"
onChange={toggleAlertRule}
checked={!isAlertRuleDisabled}
/>
)}
</Tooltip>
</Dropdown>
</div>
<CopyToClipboard textToCopy={window.location.href} />
<Divider type="vertical" />
<Dropdown trigger={['click']} menu={{ items: menuItems }}>
<Tooltip title="More options">
<Ellipsis
size={16}
color={isDarkMode ? Color.BG_VANILLA_400 : Color.BG_INK_400}
cursor="pointer"
className="dropdown-icon"
/>
</Tooltip>
</Dropdown>
</div>
<RenameModal
isOpen={isRenameAlertOpen}
setIsOpen={setIsRenameAlertOpen}
isLoading={isLoading}
onNameChangeHandler={onNameChangeHandler}
intermediateName={intermediateName}
setIntermediateName={setIntermediateName}
/>
</>
);
}

View File

@ -0,0 +1,138 @@
.rename-alert {
.ant-modal-content {
width: 384px;
flex-shrink: 0;
border-radius: 4px;
border: 1px solid var(--bg-slate-500);
background: var(--bg-ink-400);
box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2);
padding: 0px;
.ant-modal-header {
height: 52px;
padding: 16px;
background: var(--bg-ink-400);
border-bottom: 1px solid var(--bg-slate-500);
margin-bottom: 0px;
.ant-modal-title {
color: var(--bg-vanilla-100);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px; /* 142.857% */
width: 349px;
height: 20px;
}
}
.ant-modal-body {
padding: 16px;
.alert-content {
display: flex;
flex-direction: column;
gap: 8px;
.name-text {
color: var(--bg-vanilla-100);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 500;
line-height: 20px; /* 142.857% */
}
.alert-name-input {
display: flex;
padding: 6px 6px 6px 8px;
align-items: center;
gap: 4px;
align-self: stretch;
border-radius: 0px 2px 2px 0px;
border: 1px solid var(--bg-slate-400);
background: var(--bg-ink-300);
}
}
}
.ant-modal-footer {
padding: 16px;
margin-top: 0px;
.alert-rename {
display: flex;
flex-direction: row-reverse;
gap: 12px;
.cancel-btn {
display: flex;
padding: 4px 8px;
justify-content: center;
align-items: center;
gap: 4px;
border-radius: 2px;
background: var(--bg-slate-500);
.ant-btn-icon {
margin-inline-end: 0px;
}
}
.rename-btn {
display: flex;
align-items: center;
display: flex;
padding: 4px 8px;
justify-content: center;
align-items: center;
gap: 4px;
border-radius: 2px;
background: var(--bg-robin-500);
.ant-btn-icon {
margin-inline-end: 0px;
}
}
}
}
}
}
.lightMode {
.rename-alert {
.ant-modal-content {
border: 1px solid var(--bg-vanilla-300);
background: var(--bg-vanilla-100);
.ant-modal-header {
background: var(--bg-vanilla-100);
border-bottom: 1px solid var(--bg-vanilla-300);
.ant-modal-title {
color: var(--bg-ink-300);
}
}
.ant-modal-body {
.alert-content {
.name-text {
color: var(--bg-ink-300);
}
.alert-name-input {
border: 1px solid var(--bg-vanilla-300);
background: var(--bg-vanilla-100);
}
}
}
.ant-modal-footer {
.alert-rename {
.cancel-btn {
background: var(--bg-vanilla-300);
}
}
}
}
}
}

View File

@ -0,0 +1,95 @@
import './RenameModal.styles.scss';
import { Button, Input, InputRef, Modal, Typography } from 'antd';
import { Check, X } from 'lucide-react';
import { useCallback, useEffect, useRef } from 'react';
type Props = {
isOpen: boolean;
setIsOpen: (isOpen: boolean) => void;
onNameChangeHandler: () => void;
isLoading: boolean;
intermediateName: string;
setIntermediateName: (name: string) => void;
};
function RenameModal({
isOpen,
setIsOpen,
onNameChangeHandler,
isLoading,
intermediateName,
setIntermediateName,
}: Props): JSX.Element {
const inputRef = useRef<InputRef>(null);
useEffect(() => {
if (isOpen && inputRef.current) {
inputRef.current.focus();
}
}, [isOpen]);
const handleClose = useCallback((): void => setIsOpen(false), [setIsOpen]);
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent): void => {
if (isOpen) {
if (e.key === 'Enter') {
onNameChangeHandler();
} else if (e.key === 'Escape') {
handleClose();
}
}
};
document.addEventListener('keydown', handleKeyDown);
return (): void => {
document.removeEventListener('keydown', handleKeyDown);
};
}, [isOpen, onNameChangeHandler, handleClose]);
return (
<Modal
open={isOpen}
title="Rename Alert"
onOk={onNameChangeHandler}
onCancel={handleClose}
rootClassName="rename-alert"
footer={
<div className="alert-rename">
<Button
type="primary"
icon={<Check size={14} />}
className="rename-btn"
onClick={onNameChangeHandler}
disabled={isLoading}
>
Rename Alert
</Button>
<Button
type="text"
icon={<X size={14} />}
className="cancel-btn"
onClick={handleClose}
>
Cancel
</Button>
</div>
}
>
<div className="alert-content">
<Typography.Text className="name-text">Enter a new name</Typography.Text>
<Input
ref={inputRef}
data-testid="alert-name"
className="alert-name-input"
value={intermediateName}
onChange={(e): void => setIntermediateName(e.target.value)}
/>
</div>
</Modal>
);
}
export default RenameModal;

View File

@ -2,7 +2,7 @@ import './AlertHeader.styles.scss';
import LineClampedText from 'periscope/components/LineClampedText/LineClampedText';
import { useAlertRule } from 'providers/Alert';
import { useMemo } from 'react';
import { useMemo, useState } from 'react';
import AlertActionButtons from './ActionButtons/ActionButtons';
import AlertLabels from './AlertLabels/AlertLabels';
@ -19,7 +19,9 @@ export type AlertHeaderProps = {
};
};
function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
const { state, alert, labels } = alertDetails;
const { state, alert: alertName, labels } = alertDetails;
const { alertRuleState } = useAlertRule();
const [updatedName, setUpdatedName] = useState(alertName);
const labelsWithoutSeverity = useMemo(
() =>
@ -29,8 +31,6 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
[labels],
);
const { alertRuleState } = useAlertRule();
return (
<div className="alert-info">
<div className="alert-info__info-wrapper">
@ -38,7 +38,7 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
<div className="alert-title-wrapper">
<AlertState state={alertRuleState ?? state} />
<div className="alert-title">
<LineClampedText text={alert} />
<LineClampedText text={updatedName || alertName} />
</div>
</div>
</div>
@ -54,7 +54,11 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
</div>
</div>
<div className="alert-info__action-buttons">
<AlertActionButtons alertDetails={alertDetails} ruleId={alertDetails.id} />
<AlertActionButtons
alertDetails={alertDetails}
ruleId={alertDetails.id}
setUpdatedName={setUpdatedName}
/>
</div>
</div>
);

View File

@ -467,6 +467,44 @@ export const useAlertRuleDuplicate = ({
return { handleAlertDuplicate };
};
export const useAlertRuleUpdate = ({
alertDetails,
setUpdatedName,
intermediateName,
}: {
alertDetails: AlertDef;
setUpdatedName: (name: string) => void;
intermediateName: string;
}): {
handleAlertUpdate: () => void;
isLoading: boolean;
} => {
const { notifications } = useNotifications();
const handleError = useAxiosError();
const { mutate: updateAlertRule, isLoading } = useMutation(
[REACT_QUERY_KEY.UPDATE_ALERT_RULE, alertDetails.id],
save,
{
onMutate: () => setUpdatedName(intermediateName),
onSuccess: () =>
notifications.success({ message: 'Alert renamed successfully' }),
onError: (error) => {
setUpdatedName(alertDetails.alert);
handleError(error);
},
},
);
const handleAlertUpdate = (): void => {
updateAlertRule({
data: { ...alertDetails, alert: intermediateName },
id: alertDetails.id,
});
};
return { handleAlertUpdate, isLoading };
};
export const useAlertRuleDelete = ({
ruleId,

View File

@ -18,6 +18,7 @@ import {
} from '../MessagingQueuesUtils';
import DropRateView from '../MQDetails/DropRateView/DropRateView';
import MessagingQueueOverview from '../MQDetails/MessagingQueueOverview';
import MetricPage from '../MQDetails/MetricPage/MetricPage';
import MessagingQueuesDetails from '../MQDetails/MQDetails';
import MessagingQueuesConfigOptions from '../MQGraph/MQConfigOptions';
import MessagingQueuesGraph from '../MQGraph/MQGraph';
@ -60,6 +61,10 @@ function MQDetailPage(): JSX.Element {
});
};
const showMessagingQueueDetails =
selectedView !== MessagingQueuesViewType.dropRate.value &&
selectedView !== MessagingQueuesViewType.metricPage.value;
return (
<div className="messaging-queue-container">
<div className="messaging-breadcrumb">
@ -82,7 +87,7 @@ function MQDetailPage(): JSX.Element {
setSelectedView(value);
updateUrlQuery({ [QueryParams.mqServiceView]: value });
}}
value={mqServiceView}
value={selectedView}
options={[
{
label: MessagingQueuesViewType.consumerLag.label,
@ -100,6 +105,10 @@ function MQDetailPage(): JSX.Element {
label: MessagingQueuesViewType.dropRate.label,
value: MessagingQueuesViewType.dropRate.value,
},
{
label: MessagingQueuesViewType.metricPage.label,
value: MessagingQueuesViewType.metricPage.value,
},
]}
/>
</div>
@ -112,6 +121,8 @@ function MQDetailPage(): JSX.Element {
</div>
) : selectedView === MessagingQueuesViewType.dropRate.value ? (
<DropRateView />
) : selectedView === MessagingQueuesViewType.metricPage.value ? (
<MetricPage />
) : (
<MessagingQueueOverview
selectedView={selectedView}
@ -119,7 +130,7 @@ function MQDetailPage(): JSX.Element {
setOption={setproducerLatencyOption}
/>
)}
{selectedView !== MessagingQueuesViewType.dropRate.value && (
{showMessagingQueueDetails && (
<div className="messaging-queue-details">
<MessagingQueuesDetails
selectedView={selectedView}

View File

@ -28,3 +28,16 @@
width: 200px;
margin: 6px;
}
.lightMode {
.evaluation-time-selector {
.eval-title {
color: var(--bg-ink-400);
}
.ant-selector {
background-color: var(--bg-vanilla-200);
border: 1px solid var(--bg-ink-400);
}
}
}

View File

@ -230,9 +230,7 @@ function DropRateView(): JSX.Element {
return (
<div className={cx('mq-overview-container', 'droprate-view')}>
<div className="mq-overview-title">
<div className="drop-rat-title">
{MessagingQueuesViewType.dropRate.label}
</div>
{MessagingQueuesViewType.dropRate.label}
<EvaluationTimeSelector setInterval={setInterval} />
</div>
<Table

View File

@ -22,15 +22,13 @@
align-items: center;
width: 100%;
.drop-rat-title {
color: var(--bg-vanilla-200);
color: var(--bg-vanilla-200);
font-family: Inter;
font-size: 18px;
font-style: normal;
font-weight: 500;
line-height: 28px;
}
font-family: Inter;
font-size: 18px;
font-style: normal;
font-weight: 500;
line-height: 28px;
}
.mq-details-options {
@ -116,3 +114,67 @@
}
}
}
.lightMode {
.mq-overview-container {
background: var(--bg-vanilla-200);
border: 1px solid var(--bg-vanilla-300);
.mq-overview-title {
color: var(--bg-ink-400);
}
.mq-details-options {
.ant-radio-button-wrapper {
border-color: var(--bg-vanilla-300);
color: var(--bg-slate-200);
}
.ant-radio-button-wrapper-checked {
color: var(--bg-slate-200);
background: var(--bg-vanilla-300);
}
.ant-radio-button-wrapper-disabled {
background: var(--bg-vanilla-100);
color: var(--bg-vanilla-400);
}
}
}
.droprate-view {
.mq-table {
.ant-table-content {
border: 1px solid var(--bg-vanilla-300);
}
.ant-table-tbody {
.ant-table-cell {
background-color: var(--bg-vanilla-100);
}
}
.ant-table-thead {
.ant-table-cell {
background-color: var(--bg-vanilla-100);
border-bottom: 1px solid var(--bg-vanilla-300);
}
}
}
.no-data-style {
border: 1px solid var(--bg-vanilla-300);
}
}
.trace-id-list {
.traceid-style {
.traceid-text {
border: 1px solid var(--bg-vanilla-300);
background: var(--bg-vanilla-300);
}
.remaing-count {
color: var(--bg-ink-400);
}
}
}
}

View File

@ -18,7 +18,6 @@ import {
ProducerLatencyOptions,
SelectedTimelineQuery,
} from '../MessagingQueuesUtils';
import { ComingSoon } from '../MQCommon/MQCommon';
import MessagingQueuesTable from './MQTables/MQTables';
const MQServiceDetailTypePerView = (
@ -28,7 +27,6 @@ const MQServiceDetailTypePerView = (
MessagingQueueServiceDetailType.ConsumerDetails,
MessagingQueueServiceDetailType.ProducerDetails,
MessagingQueueServiceDetailType.NetworkLatency,
MessagingQueueServiceDetailType.PartitionHostMetrics,
],
[MessagingQueuesViewType.partitionLatency.value]: [
MessagingQueueServiceDetailType.ConsumerDetails,
@ -62,22 +60,8 @@ function MessagingQueuesOptions({
const detailTypes =
MQServiceDetailTypePerView(producerLatencyOption)[selectedView] || [];
return detailTypes.map((detailType) => (
<Radio.Button
key={detailType}
value={detailType}
disabled={
detailType === MessagingQueueServiceDetailType.PartitionHostMetrics
}
className={
detailType === MessagingQueueServiceDetailType.PartitionHostMetrics
? 'disabled-option'
: ''
}
>
<Radio.Button key={detailType} value={detailType}>
{ConsumerLagDetailTitle[detailType]}
{detailType === MessagingQueueServiceDetailType.PartitionHostMetrics && (
<ComingSoon />
)}
</Radio.Button>
));
};

View File

@ -0,0 +1,115 @@
import { Typography } from 'antd';
import { CardContainer } from 'container/GridCardLayout/styles';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useTranslation } from 'react-i18next';
import { Widgets } from 'types/api/dashboard/getAll';
import MetricPageGridGraph from './MetricPageGraph';
import {
averageRequestLatencyWidgetData,
brokerCountWidgetData,
brokerNetworkThroughputWidgetData,
bytesConsumedWidgetData,
consumerFetchRateWidgetData,
consumerGroupMemberWidgetData,
consumerLagByGroupWidgetData,
consumerOffsetWidgetData,
ioWaitTimeWidgetData,
kafkaProducerByteRateWidgetData,
messagesConsumedWidgetData,
producerFetchRequestPurgatoryWidgetData,
requestResponseWidgetData,
requestTimesWidgetData,
} from './MetricPageUtil';
interface MetricSectionProps {
title: string;
description: string;
graphCount: Widgets[];
}
function MetricSection({
title,
description,
graphCount,
}: MetricSectionProps): JSX.Element {
const isDarkMode = useIsDarkMode();
return (
<div className="metric-column-graph">
<CardContainer className="row-card" isDarkMode={isDarkMode}>
<div className="row-panel">
<Typography.Text className="section-title">{title}</Typography.Text>
</div>
</CardContainer>
<Typography.Text className="graph-description">
{description}
</Typography.Text>
<div className="metric-page-grid">
{graphCount.map((widgetData) => (
<MetricPageGridGraph
key={`graph-${widgetData.id}`}
widgetData={widgetData}
/>
))}
</div>
</div>
);
}
function MetricColumnGraphs(): JSX.Element {
const { t } = useTranslation('messagingQueues');
const metricsData = [
{
title: t('metricGraphCategory.brokerMetrics.title'),
description: t('metricGraphCategory.brokerMetrics.description'),
graphCount: [
brokerCountWidgetData,
requestTimesWidgetData,
producerFetchRequestPurgatoryWidgetData,
brokerNetworkThroughputWidgetData,
],
id: 'broker-metrics',
},
{
title: t('metricGraphCategory.producerMetrics.title'),
description: t('metricGraphCategory.producerMetrics.description'),
graphCount: [
ioWaitTimeWidgetData,
requestResponseWidgetData,
averageRequestLatencyWidgetData,
kafkaProducerByteRateWidgetData,
bytesConsumedWidgetData,
],
id: 'producer-metrics',
},
{
title: t('metricGraphCategory.consumerMetrics.title'),
description: t('metricGraphCategory.consumerMetrics.description'),
graphCount: [
consumerOffsetWidgetData,
consumerGroupMemberWidgetData,
consumerLagByGroupWidgetData,
consumerFetchRateWidgetData,
messagesConsumedWidgetData,
],
id: 'consumer-metrics',
},
];
return (
<div className="metric-column-graph-container">
{metricsData.map((metric) => (
<MetricSection
key={metric.id}
title={metric.title}
description={metric.description}
graphCount={metric?.graphCount || []}
/>
))}
</div>
);
}
export default MetricColumnGraphs;

View File

@ -0,0 +1,128 @@
.metric-page {
padding: 20px;
display: flex;
flex-direction: column;
gap: 32px;
.metric-page-container {
display: flex;
flex-direction: column;
.row-panel {
padding-left: 10px;
}
.metric-page-grid {
display: grid;
grid-template-columns: repeat(3, 1fr);
align-items: flex-start;
gap: 10px;
.metric-graph {
height: 320px;
padding: 10px;
width: 100%;
box-sizing: border-box;
}
}
@media (max-width: 768px) {
.metric-page-grid {
grid-template-columns: 1fr;
}
}
.graph-description {
padding: 16px 10px 16px 10px;
}
}
.row-panel {
border-radius: 4px;
background: rgba(18, 19, 23, 0.4);
padding: 8px;
display: flex;
gap: 6px;
align-items: center;
height: 48px !important;
.ant-typography {
font-size: 14px;
font-weight: 500;
}
.row-panel-section {
display: flex;
gap: 6px;
align-items: center;
.row-icon {
color: var(--bg-vanilla-400);
cursor: pointer;
}
.section-title {
color: var(--bg-vanilla-400);
font-family: Inter;
font-size: 14px;
font-style: normal;
font-weight: 400;
line-height: 20px;
letter-spacing: -0.07px;
}
}
}
.metric-column-graph-container {
display: grid;
grid-template-columns: 1fr 1fr 1fr;
gap: 10px;
.metric-column-graph {
display: flex;
flex-direction: column;
gap: 10px;
.row-panel {
justify-content: center;
}
.metric-page-grid {
display: flex;
flex-direction: column;
flex: 1;
min-width: 0;
gap: 10px;
.metric-graph {
height: 320px;
padding: 10px;
width: 100%;
box-sizing: border-box;
}
}
}
}
@media (max-width: 768px) {
.metric-column-graph-container {
grid-template-columns: 1fr;
}
}
}
.lightMode {
.metric-page {
.row-panel {
.row-panel-section {
.row-icon {
color: var(--bg-ink-300);
}
.section-title {
color: var(--bg-ink-300);
}
}
}
}
}

View File

@ -0,0 +1,134 @@
import './MetricPage.styles.scss';
import { Typography } from 'antd';
import cx from 'classnames';
import { CardContainer } from 'container/GridCardLayout/styles';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { ChevronDown, ChevronUp } from 'lucide-react';
import { useState } from 'react';
import { useTranslation } from 'react-i18next';
import { Widgets } from 'types/api/dashboard/getAll';
import MetricColumnGraphs from './MetricColumnGraphs';
import MetricPageGridGraph from './MetricPageGraph';
import {
cpuRecentUtilizationWidgetData,
currentOffsetPartitionWidgetData,
insyncReplicasWidgetData,
jvmGcCollectionsElapsedWidgetData,
jvmGCCountWidgetData,
jvmMemoryHeapWidgetData,
oldestOffsetWidgetData,
partitionCountPerTopicWidgetData,
} from './MetricPageUtil';
interface CollapsibleMetricSectionProps {
title: string;
description: string;
graphCount: Widgets[];
isCollapsed: boolean;
onToggle: () => void;
}
function CollapsibleMetricSection({
title,
description,
graphCount,
isCollapsed,
onToggle,
}: CollapsibleMetricSectionProps): JSX.Element {
const isDarkMode = useIsDarkMode();
return (
<div className="metric-page-container">
<CardContainer className="row-card" isDarkMode={isDarkMode}>
<div className={cx('row-panel')}>
<div className="row-panel-section">
<Typography.Text className="section-title">{title}</Typography.Text>
{isCollapsed ? (
<ChevronDown size={14} onClick={onToggle} className="row-icon" />
) : (
<ChevronUp size={14} onClick={onToggle} className="row-icon" />
)}
</div>
</div>
</CardContainer>
{!isCollapsed && (
<>
<Typography.Text className="graph-description">
{description}
</Typography.Text>
<div className="metric-page-grid">
{graphCount.map((widgetData) => (
<MetricPageGridGraph
key={`graph-${widgetData.id}`}
widgetData={widgetData}
/>
))}
</div>
</>
)}
</div>
);
}
function MetricPage(): JSX.Element {
const [collapsedSections, setCollapsedSections] = useState<{
[key: string]: boolean;
}>({
producerMetrics: false,
consumerMetrics: false,
});
const toggleCollapse = (key: string): void => {
setCollapsedSections((prev) => ({
...prev,
[key]: !prev[key],
}));
};
const { t } = useTranslation('messagingQueues');
const metricSections = [
{
key: 'bokerJVMMetrics',
title: t('metricGraphCategory.brokerJVMMetrics.title'),
description: t('metricGraphCategory.brokerJVMMetrics.description'),
graphCount: [
jvmGCCountWidgetData,
jvmGcCollectionsElapsedWidgetData,
cpuRecentUtilizationWidgetData,
jvmMemoryHeapWidgetData,
],
},
{
key: 'partitionMetrics',
title: t('metricGraphCategory.partitionMetrics.title'),
description: t('metricGraphCategory.partitionMetrics.description'),
graphCount: [
partitionCountPerTopicWidgetData,
currentOffsetPartitionWidgetData,
oldestOffsetWidgetData,
insyncReplicasWidgetData,
],
},
];
return (
<div className="metric-page">
<MetricColumnGraphs />
{metricSections.map(({ key, title, description, graphCount }) => (
<CollapsibleMetricSection
key={key}
title={title}
description={description}
graphCount={graphCount}
isCollapsed={collapsedSections[key]}
onToggle={(): void => toggleCollapse(key)}
/>
))}
</div>
);
}
export default MetricPage;

View File

@ -0,0 +1,59 @@
import './MetricPage.styles.scss';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { ViewMenuAction } from 'container/GridCardLayout/config';
import GridCard from 'container/GridCardLayout/GridCard';
import { Card } from 'container/GridCardLayout/styles';
import { useIsDarkMode } from 'hooks/useDarkMode';
import useUrlQuery from 'hooks/useUrlQuery';
import { useCallback } from 'react';
import { useDispatch } from 'react-redux';
import { useHistory, useLocation } from 'react-router-dom';
import { UpdateTimeInterval } from 'store/actions';
import { Widgets } from 'types/api/dashboard/getAll';
function MetricPageGridGraph({
widgetData,
}: {
widgetData: Widgets;
}): JSX.Element {
const history = useHistory();
const { pathname } = useLocation();
const dispatch = useDispatch();
const urlQuery = useUrlQuery();
const isDarkMode = useIsDarkMode();
const onDragSelect = useCallback(
(start: number, end: number) => {
const startTimestamp = Math.trunc(start);
const endTimestamp = Math.trunc(end);
urlQuery.set(QueryParams.startTime, startTimestamp.toString());
urlQuery.set(QueryParams.endTime, endTimestamp.toString());
const generatedUrl = `${pathname}?${urlQuery.toString()}`;
history.push(generatedUrl);
if (startTimestamp !== endTimestamp) {
dispatch(UpdateTimeInterval('custom', [startTimestamp, endTimestamp]));
}
},
[dispatch, history, pathname, urlQuery],
);
return (
<Card
isDarkMode={isDarkMode}
$panelType={PANEL_TYPES.TIME_SERIES}
className="metric-graph"
>
<GridCard
widget={widgetData}
headerMenuList={[...ViewMenuAction]}
onDragSelect={onDragSelect}
/>
</Card>
);
}
export default MetricPageGridGraph;

File diff suppressed because it is too large Load Diff

View File

@ -166,3 +166,77 @@
padding-right: 8px;
}
}
.lightMode {
.mq-health-check-modal {
.ant-modal-content {
border: 1px solid var(--bg-vanilla-400);
background: var(--bg-vanilla-200);
.ant-modal-header {
border-bottom: 1px solid var(--bg-vanilla-400);
background: var(--bg-vanilla-200);
.ant-modal-title {
color: var(--bg-ink-300);
}
}
.modal-content {
background: var(--bg-vanilla-100);
.attribute-select {
.ant-select-selector {
border: 1px solid var(--bg-vanilla-300);
background: var(--bg-vanilla-200);
}
}
.tree-text {
color: var(--bg-ink-300);
}
.ant-tree {
.ant-tree-title {
.attribute-error-title {
color: var(--bg-amber-500);
.tree-text {
color: var(--bg-amber-500);
}
}
.attribute-success-title {
.success-attribute-icon {
color: var(--bg-ink-300);
}
}
}
}
}
.loader-container {
background: var(--bg-ink-300);
}
}
}
.config-btn {
background: var(--bg-vanilla-300);
&.missing-config-btn {
background: var(--bg-amber-100);
color: var(--bg-amber-500);
&:hover {
color: var(--bg-amber-600) !important;
}
}
.missing-config-btn {
.config-btn-content {
border-right: 1px solid var(--bg-amber-600);
}
}
}
}

View File

@ -222,6 +222,12 @@
}
}
:nth-child(2),
:nth-child(4) {
border-left: none !important;
border-right: none !important;
}
&.summary-section {
.overview-info-card {
min-height: 144px;
@ -331,6 +337,10 @@
.messaging-breadcrumb {
color: var(--bg-ink-400);
border-bottom: 1px solid var(--bg-vanilla-300);
.message-queue-text {
color: var(--bg-ink-400);
}
}
.messaging-header {
color: var(--bg-ink-400);

View File

@ -156,7 +156,7 @@ function MessagingQueues(): JSX.Element {
</Button>
</div>
</div>
<div className="overview-info-card middle-card">
<div className="overview-info-card">
<div>
<p className="card-title">{t('summarySection.producer.title')}</p>
<p className="card-info-text">
@ -174,7 +174,7 @@ function MessagingQueues(): JSX.Element {
</Button>
</div>
</div>
<div className="overview-info-card middle-card">
<div className="overview-info-card">
<div>
<p className="card-title">{t('summarySection.partition.title')}</p>
<p className="card-info-text">
@ -210,6 +210,24 @@ function MessagingQueues(): JSX.Element {
</Button>
</div>
</div>
<div className="overview-info-card">
<div>
<p className="card-title">{t('summarySection.metricPage.title')}</p>
<p className="card-info-text">
{t('summarySection.metricPage.description')}
</p>
</div>
<div className="button-grp">
<Button
type="default"
onClick={(): void =>
redirectToDetailsPage(MessagingQueuesViewType.metricPage.value)
}
>
{t('summarySection.viewDetailsButton')}
</Button>
</div>
</div>
</div>
</div>
</div>

View File

@ -222,7 +222,8 @@ export enum MessagingQueuesViewTypeOptions {
ConsumerLag = 'consumerLag',
PartitionLatency = 'partitionLatency',
ProducerLatency = 'producerLatency',
ConsumerLatency = 'consumerLatency',
DropRate = 'dropRate',
MetricPage = 'metricPage',
}
export const MessagingQueuesViewType = {
@ -240,7 +241,11 @@ export const MessagingQueuesViewType = {
},
dropRate: {
label: 'Drop Rate view',
value: 'dropRate',
value: MessagingQueuesViewTypeOptions.DropRate,
},
metricPage: {
label: 'Metric view',
value: MessagingQueuesViewTypeOptions.MetricPage,
},
};

View File

@ -19,7 +19,7 @@ export const defaultSeasonality = 'hourly';
export interface AlertDef {
id?: number;
alertType?: string;
alert?: string;
alert: string;
ruleType?: string;
frequency?: string;
condition: RuleCondition;

View File

@ -8,17 +8,21 @@ export interface LimitProps {
config?: {
day?: {
size?: number;
enabled?: boolean;
};
second?: {
size?: number;
enabled?: boolean;
};
};
metric?: {
day?: {
size?: number;
enabled?: boolean;
};
second?: {
size?: number;
enabled?: boolean;
};
};
}
@ -27,11 +31,13 @@ export interface AddLimitProps {
keyID: string;
signal: string;
config: {
day: {
size: number;
day?: {
size?: number;
enabled?: boolean;
};
second: {
size: number;
second?: {
size?: number;
enabled?: boolean;
};
};
}
@ -40,11 +46,13 @@ export interface UpdateLimitProps {
limitID: string;
signal: string;
config: {
day: {
size: number;
day?: {
size?: number;
enabled?: boolean;
};
second: {
size: number;
second?: {
size?: number;
enabled?: boolean;
};
};
}

View File

@ -860,11 +860,6 @@
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719"
integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==
"@babel/parser@^7.23.6":
version "7.23.6"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.6.tgz#ba1c9e512bda72a47e285ae42aff9d2a635a9e3b"
integrity sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==
"@babel/parser@^7.24.0", "@babel/parser@^7.24.1":
version "7.24.1"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.1.tgz#1e416d3627393fab1cb5b0f2f1796a100ae9133a"
@ -3099,48 +3094,6 @@
"@types/mdx" "^2.0.0"
"@types/react" ">=16"
"@microsoft/api-extractor-model@7.28.3":
version "7.28.3"
resolved "https://registry.yarnpkg.com/@microsoft/api-extractor-model/-/api-extractor-model-7.28.3.tgz#f6a213e41a2274d5195366b646954daee39e8493"
integrity sha512-wT/kB2oDbdZXITyDh2SQLzaWwTOFbV326fP0pUwNW00WeliARs0qjmXBWmGWardEzp2U3/axkO3Lboqun6vrig==
dependencies:
"@microsoft/tsdoc" "0.14.2"
"@microsoft/tsdoc-config" "~0.16.1"
"@rushstack/node-core-library" "3.62.0"
"@microsoft/api-extractor@7.39.0":
version "7.39.0"
resolved "https://registry.yarnpkg.com/@microsoft/api-extractor/-/api-extractor-7.39.0.tgz#41c25f7f522e8b9376debda07364ff234e602eff"
integrity sha512-PuXxzadgnvp+wdeZFPonssRAj/EW4Gm4s75TXzPk09h3wJ8RS3x7typf95B4vwZRrPTQBGopdUl+/vHvlPdAcg==
dependencies:
"@microsoft/api-extractor-model" "7.28.3"
"@microsoft/tsdoc" "0.14.2"
"@microsoft/tsdoc-config" "~0.16.1"
"@rushstack/node-core-library" "3.62.0"
"@rushstack/rig-package" "0.5.1"
"@rushstack/ts-command-line" "4.17.1"
colors "~1.2.1"
lodash "~4.17.15"
resolve "~1.22.1"
semver "~7.5.4"
source-map "~0.6.1"
typescript "5.3.3"
"@microsoft/tsdoc-config@~0.16.1":
version "0.16.2"
resolved "https://registry.yarnpkg.com/@microsoft/tsdoc-config/-/tsdoc-config-0.16.2.tgz#b786bb4ead00d54f53839a458ce626c8548d3adf"
integrity sha512-OGiIzzoBLgWWR0UdRJX98oYO+XKGf7tiK4Zk6tQ/E4IJqGCe7dvkTvgDZV5cFJUzLGDOjeAXrnZoA6QkVySuxw==
dependencies:
"@microsoft/tsdoc" "0.14.2"
ajv "~6.12.6"
jju "~1.4.0"
resolve "~1.19.0"
"@microsoft/tsdoc@0.14.2":
version "0.14.2"
resolved "https://registry.yarnpkg.com/@microsoft/tsdoc/-/tsdoc-0.14.2.tgz#c3ec604a0b54b9a9b87e9735dfc59e1a5da6a5fb"
integrity sha512-9b8mPpKrfeGRuhFH5iO1iwCLeIIsV6+H1sRfxbkoGXIyQE2BTsPd9zqSqQJ+pv5sJ/hT5M1zvOFL02MnEezFug==
"@monaco-editor/loader@^1.3.3":
version "1.3.3"
resolved "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.3.3.tgz"
@ -3546,46 +3499,6 @@
resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.20.0.tgz#03554155b45d8b529adf635b2f6ad1165d70d8b4"
integrity sha512-mUnk8rPJBI9loFDZ+YzPGdeniYK+FTmRD1TMCz7ev2SNIozyKKpnGgsxO34u6Z4z/t0ITuu7voi/AshfsGsgFg==
"@rollup/pluginutils@^5.1.0":
version "5.1.0"
resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-5.1.0.tgz#7e53eddc8c7f483a4ad0b94afb1f7f5fd3c771e0"
integrity sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==
dependencies:
"@types/estree" "^1.0.0"
estree-walker "^2.0.2"
picomatch "^2.3.1"
"@rushstack/node-core-library@3.62.0":
version "3.62.0"
resolved "https://registry.yarnpkg.com/@rushstack/node-core-library/-/node-core-library-3.62.0.tgz#a30a44a740b522944165f0faa6644134eb95be1d"
integrity sha512-88aJn2h8UpSvdwuDXBv1/v1heM6GnBf3RjEy6ZPP7UnzHNCqOHA2Ut+ScYUbXcqIdfew9JlTAe3g+cnX9xQ/Aw==
dependencies:
colors "~1.2.1"
fs-extra "~7.0.1"
import-lazy "~4.0.0"
jju "~1.4.0"
resolve "~1.22.1"
semver "~7.5.4"
z-schema "~5.0.2"
"@rushstack/rig-package@0.5.1":
version "0.5.1"
resolved "https://registry.yarnpkg.com/@rushstack/rig-package/-/rig-package-0.5.1.tgz#6c9c283cc96b5bb1eae9875946d974ac5429bb21"
integrity sha512-pXRYSe29TjRw7rqxD4WS3HN/sRSbfr+tJs4a9uuaSIBAITbUggygdhuG0VrO0EO+QqH91GhYMN4S6KRtOEmGVA==
dependencies:
resolve "~1.22.1"
strip-json-comments "~3.1.1"
"@rushstack/ts-command-line@4.17.1":
version "4.17.1"
resolved "https://registry.yarnpkg.com/@rushstack/ts-command-line/-/ts-command-line-4.17.1.tgz#c78db928ce5b93f2e98fd9e14c24f3f3876e57f1"
integrity sha512-2jweO1O57BYP5qdBGl6apJLB+aRIn5ccIRTPDyULh0KMwVzFqWtw6IZWt1qtUoZD/pD2RNkIOosH6Cq45rIYeg==
dependencies:
"@types/argparse" "1.0.38"
argparse "~1.0.9"
colors "~1.2.1"
string-argv "~0.3.1"
"@sentry-internal/feedback@7.102.1":
version "7.102.1"
resolved "https://registry.yarnpkg.com/@sentry-internal/feedback/-/feedback-7.102.1.tgz#747f88c2881c76fddd16bce57cc4bc17b4c2af93"
@ -3750,13 +3663,10 @@
unplugin "1.0.1"
uuid "^9.0.0"
"@signozhq/design-tokens@0.0.8":
version "0.0.8"
resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-0.0.8.tgz#368dc92cfe01d0cd893df140445c5d9dfd944a88"
integrity sha512-YUxQw6w7iyUMTBxj82nFZQNRsg7Boej3YM6K5bYfDMQg0MqvWQCWsP7EkyLHu/TiyOZwZWb++vzXG6m+YJX9bw==
dependencies:
style-dictionary "3.8.0"
vite-plugin-dts "^3.6.4"
"@signozhq/design-tokens@1.1.4":
version "1.1.4"
resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-1.1.4.tgz#5d5de5bd9d19b6a3631383db015cc4b70c3f7661"
integrity sha512-ICZz5szxTq8NcKAsk6LP+nSybPyEcyy8eu2zfxlPQCnJ1YjJP1PglaKLlF0N6+D60gAd3yC5he06BqR8/HxjNg==
"@sinclair/typebox@^0.25.16":
version "0.25.24"
@ -3876,11 +3786,6 @@
dependencies:
"@types/estree" "*"
"@types/argparse@1.0.38":
version "1.0.38"
resolved "https://registry.yarnpkg.com/@types/argparse/-/argparse-1.0.38.tgz#a81fd8606d481f873a3800c6ebae4f1d768a56a9"
integrity sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==
"@types/aria-query@^5.0.1":
version "5.0.1"
resolved "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.1.tgz"
@ -4859,68 +4764,7 @@
d3-time-format "4.1.0"
internmap "2.0.3"
"@volar/language-core@1.11.1", "@volar/language-core@~1.11.1":
version "1.11.1"
resolved "https://registry.yarnpkg.com/@volar/language-core/-/language-core-1.11.1.tgz#ecdf12ea8dc35fb8549e517991abcbf449a5ad4f"
integrity sha512-dOcNn3i9GgZAcJt43wuaEykSluAuOkQgzni1cuxLxTV0nJKanQztp7FxyswdRILaKH+P2XZMPRp2S4MV/pElCw==
dependencies:
"@volar/source-map" "1.11.1"
"@volar/source-map@1.11.1", "@volar/source-map@~1.11.1":
version "1.11.1"
resolved "https://registry.yarnpkg.com/@volar/source-map/-/source-map-1.11.1.tgz#535b0328d9e2b7a91dff846cab4058e191f4452f"
integrity sha512-hJnOnwZ4+WT5iupLRnuzbULZ42L7BWWPMmruzwtLhJfpDVoZLjNBxHDi2sY2bgZXCKlpU5XcsMFoYrsQmPhfZg==
dependencies:
muggle-string "^0.3.1"
"@volar/typescript@~1.11.1":
version "1.11.1"
resolved "https://registry.yarnpkg.com/@volar/typescript/-/typescript-1.11.1.tgz#ba86c6f326d88e249c7f5cfe4b765be3946fd627"
integrity sha512-iU+t2mas/4lYierSnoFOeRFQUhAEMgsFuQxoxvwn5EdQopw43j+J27a4lt9LMInx1gLJBC6qL14WYGlgymaSMQ==
dependencies:
"@volar/language-core" "1.11.1"
path-browserify "^1.0.1"
"@vue/compiler-core@3.4.4":
version "3.4.4"
resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.4.4.tgz#ba1ca008e95f118449cab79bdab3f7506bab2892"
integrity sha512-U5AdCN+6skzh2bSJrkMj2KZsVkUpgK8/XlxjSRYQZhNPcvt9/kmgIMpFEiTyK+Dz5E1J+8o8//BEIX+bakgVSw==
dependencies:
"@babel/parser" "^7.23.6"
"@vue/shared" "3.4.4"
entities "^4.5.0"
estree-walker "^2.0.2"
source-map-js "^1.0.2"
"@vue/compiler-dom@^3.3.0":
version "3.4.4"
resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.4.4.tgz#a11bba8af691b58700c479ce893b02bf71bb089a"
integrity sha512-iSwkdDULCN+Vr8z6uwdlL044GJ/nUmECxP9vu7MzEs4Qma0FwDLYvnvRcyO0ZITuu3Os4FptGUDnhi1kOLSaGw==
dependencies:
"@vue/compiler-core" "3.4.4"
"@vue/shared" "3.4.4"
"@vue/language-core@1.8.27", "@vue/language-core@^1.8.26":
version "1.8.27"
resolved "https://registry.yarnpkg.com/@vue/language-core/-/language-core-1.8.27.tgz#2ca6892cb524e024a44e554e4c55d7a23e72263f"
integrity sha512-L8Kc27VdQserNaCUNiSFdDl9LWT24ly8Hpwf1ECy3aFb9m6bDhBGQYOujDm21N7EW3moKIOKEanQwe1q5BK+mA==
dependencies:
"@volar/language-core" "~1.11.1"
"@volar/source-map" "~1.11.1"
"@vue/compiler-dom" "^3.3.0"
"@vue/shared" "^3.3.0"
computeds "^0.0.1"
minimatch "^9.0.3"
muggle-string "^0.3.1"
path-browserify "^1.0.1"
vue-template-compiler "^2.7.14"
"@vue/shared@3.4.4", "@vue/shared@^3.3.0":
version "3.4.4"
resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.4.4.tgz#26e4e62a5fbfb39c25e9e54d21eeb852f1c83a7a"
integrity sha512-abSgiVRhfjfl3JALR/cSuBl74hGJ3SePgf1mKzodf1eMWLwHZbfEGxT2cNJSsNiw44jEgrO7bNkhchaWA7RwNw==
"@webassemblyjs/ast@1.12.1", "@webassemblyjs/ast@^1.12.1":
"@webassemblyjs/ast@1.12.1":
version "1.12.1"
resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.12.1.tgz#bb16a0e8b1914f979f45864c23819cc3e3f0d4bb"
integrity sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==
@ -4928,16 +4772,34 @@
"@webassemblyjs/helper-numbers" "1.11.6"
"@webassemblyjs/helper-wasm-bytecode" "1.11.6"
"@webassemblyjs/ast@^1.12.1":
version "1.14.1"
resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.14.1.tgz#a9f6a07f2b03c95c8d38c4536a1fdfb521ff55b6"
integrity sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==
dependencies:
"@webassemblyjs/helper-numbers" "1.13.2"
"@webassemblyjs/helper-wasm-bytecode" "1.13.2"
"@webassemblyjs/floating-point-hex-parser@1.11.6":
version "1.11.6"
resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz#dacbcb95aff135c8260f77fa3b4c5fea600a6431"
integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==
"@webassemblyjs/floating-point-hex-parser@1.13.2":
version "1.13.2"
resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz#fcca1eeddb1cc4e7b6eed4fc7956d6813b21b9fb"
integrity sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==
"@webassemblyjs/helper-api-error@1.11.6":
version "1.11.6"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz#6132f68c4acd59dcd141c44b18cbebbd9f2fa768"
integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==
"@webassemblyjs/helper-api-error@1.13.2":
version "1.13.2"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz#e0a16152248bc38daee76dd7e21f15c5ef3ab1e7"
integrity sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==
"@webassemblyjs/helper-buffer@1.12.1":
version "1.12.1"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz#6df20d272ea5439bf20ab3492b7fb70e9bfcb3f6"
@ -4952,11 +4814,25 @@
"@webassemblyjs/helper-api-error" "1.11.6"
"@xtuc/long" "4.2.2"
"@webassemblyjs/helper-numbers@1.13.2":
version "1.13.2"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz#dbd932548e7119f4b8a7877fd5a8d20e63490b2d"
integrity sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==
dependencies:
"@webassemblyjs/floating-point-hex-parser" "1.13.2"
"@webassemblyjs/helper-api-error" "1.13.2"
"@xtuc/long" "4.2.2"
"@webassemblyjs/helper-wasm-bytecode@1.11.6":
version "1.11.6"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz#bb2ebdb3b83aa26d9baad4c46d4315283acd51e9"
integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==
"@webassemblyjs/helper-wasm-bytecode@1.13.2":
version "1.13.2"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz#e556108758f448aae84c850e593ce18a0eb31e0b"
integrity sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==
"@webassemblyjs/helper-wasm-section@1.12.1":
version "1.12.1"
resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz#3da623233ae1a60409b509a52ade9bc22a37f7bf"
@ -5223,7 +5099,7 @@ ajv-keywords@^5.1.0:
dependencies:
fast-deep-equal "^3.1.3"
ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.12.6, ajv@~6.12.6:
ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.12.6:
version "6.12.6"
resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@ -5382,7 +5258,7 @@ arg@^4.1.0:
resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz"
integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==
argparse@^1.0.7, argparse@~1.0.9:
argparse@^1.0.7:
version "1.0.10"
resolved "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz"
integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
@ -6298,15 +6174,6 @@ canvas-color-tracker@1:
dependencies:
tinycolor2 "^1.6.0"
capital-case@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/capital-case/-/capital-case-1.0.4.tgz#9d130292353c9249f6b00fa5852bee38a717e669"
integrity sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A==
dependencies:
no-case "^3.0.4"
tslib "^2.0.3"
upper-case-first "^2.0.2"
cardboard-vr-display@^1.0.19:
version "1.0.19"
resolved "https://registry.npmjs.org/cardboard-vr-display/-/cardboard-vr-display-1.0.19.tgz"
@ -6353,24 +6220,6 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.1:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
change-case@^4.1.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/change-case/-/change-case-4.1.2.tgz#fedfc5f136045e2398c0410ee441f95704641e12"
integrity sha512-bSxY2ws9OtviILG1EiY5K7NNxkqg/JnRnFxLtKQ96JaviiIxi7djMrSd0ECT9AC+lttClmYwKw53BWpOMblo7A==
dependencies:
camel-case "^4.1.2"
capital-case "^1.0.4"
constant-case "^3.0.4"
dot-case "^3.0.4"
header-case "^2.0.4"
no-case "^3.0.4"
param-case "^3.0.4"
pascal-case "^3.1.2"
path-case "^3.0.4"
sentence-case "^3.0.4"
snake-case "^3.0.4"
tslib "^2.0.3"
char-regex@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz"
@ -6622,11 +6471,6 @@ colorette@^2.0.10, colorette@^2.0.14, colorette@^2.0.16:
resolved "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz"
integrity sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==
colors@~1.2.1:
version "1.2.5"
resolved "https://registry.yarnpkg.com/colors/-/colors-1.2.5.tgz#89c7ad9a374bc030df8013241f68136ed8835afc"
integrity sha512-erNRLao/Y3Fv54qUa0LBB+//Uf3YwMUmdJinN20yMXm9zdKKqH9wt7R9IIVZ+K7ShzfpLV/Zg8+VyrBJYB4lpg==
combined-stream@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
@ -6649,11 +6493,6 @@ commander@2, commander@^2.20.0, commander@^2.20.3:
resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
commander@^10.0.0:
version "10.0.1"
resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06"
integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==
commander@^7.0.0, commander@^7.2.0:
version "7.2.0"
resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz"
@ -6723,11 +6562,6 @@ compute-scroll-into-view@^3.0.2:
resolved "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.0.3.tgz"
integrity sha512-nadqwNxghAGTamwIqQSG433W6OADZx2vCo3UXHNrzTRHK/htu+7+L0zhjEoaeaQVNAi3YgqWDv8+tzf0hRfR+A==
computeds@^0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/computeds/-/computeds-0.0.1.tgz#215b08a4ba3e08a11ff6eee5d6d8d7166a97ce2e"
integrity sha512-7CEBgcMjVmitjYo5q8JTJVra6X5mQ20uTThdK+0kR7UEaDrAWEQcRiBtWJzga4eRpP6afNwwLsX2SET2JhVB1Q==
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz"
@ -6743,15 +6577,6 @@ connect-history-api-fallback@^2.0.0:
resolved "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz"
integrity sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==
constant-case@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/constant-case/-/constant-case-3.0.4.tgz#3b84a9aeaf4cf31ec45e6bf5de91bdfb0589faf1"
integrity sha512-I2hSBi7Vvs7BEuJDr5dDHfzb/Ruj3FyvFyh7KLilAjNQw3Be+xgqUBA2W6scVEcL0hL1dwPRtIqEPVUCKkSsyQ==
dependencies:
no-case "^3.0.4"
tslib "^2.0.3"
upper-case "^2.0.2"
content-disposition@0.5.4:
version "0.5.4"
resolved "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz"
@ -7394,11 +7219,6 @@ dayjs@^1.10.7, dayjs@^1.11.1:
resolved "https://registry.npmjs.org/dayjs/-/dayjs-1.11.7.tgz"
integrity sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ==
de-indent@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/de-indent/-/de-indent-1.0.2.tgz#b2038e846dc33baa5796128d0804b455b8c1e21d"
integrity sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==
debounce@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/debounce/-/debounce-1.2.1.tgz#38881d8f4166a5c5848020c11827b834bcb3e0a5"
@ -7850,7 +7670,7 @@ entities@^2.0.0, entities@^2.2.0:
resolved "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz"
integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==
entities@^4.2.0, entities@^4.4.0, entities@^4.5.0:
entities@^4.2.0, entities@^4.4.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48"
integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
@ -8396,11 +8216,6 @@ estree-util-visit@^1.0.0:
"@types/estree-jsx" "^1.0.0"
"@types/unist" "^2.0.0"
estree-walker@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac"
integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==
estree-walker@^3.0.0:
version "3.0.3"
resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d"
@ -8819,15 +8634,6 @@ fs-extra@^10.0.0:
jsonfile "^6.0.1"
universalify "^2.0.0"
fs-extra@~7.0.1:
version "7.0.1"
resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9"
integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==
dependencies:
graceful-fs "^4.1.2"
jsonfile "^4.0.0"
universalify "^0.1.0"
fs-monkey@^1.0.3:
version "1.0.3"
resolved "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz"
@ -8982,7 +8788,7 @@ glob-to-regexp@^0.4.1:
resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz"
integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==
glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.2.0:
glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6:
version "7.2.3"
resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz"
integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
@ -9380,14 +9186,6 @@ he@^1.2.0:
resolved "https://registry.npmjs.org/he/-/he-1.2.0.tgz"
integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==
header-case@^2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/header-case/-/header-case-2.0.4.tgz#5a42e63b55177349cf405beb8d775acabb92c063"
integrity sha512-H/vuk5TEEVZwrR0lp2zed9OCo1uAILMlx0JEMgC26rzyJJ3N1v6XkwHHXJQdR2doSjcGPM6OKPYoJgf0plJ11Q==
dependencies:
capital-case "^1.0.4"
tslib "^2.0.3"
headers-polyfill@3.2.5:
version "3.2.5"
resolved "https://registry.yarnpkg.com/headers-polyfill/-/headers-polyfill-3.2.5.tgz#6e67d392c9d113d37448fe45014e0afdd168faed"
@ -9686,11 +9484,6 @@ import-fresh@^3.0.0, import-fresh@^3.2.1:
parent-module "^1.0.0"
resolve-from "^4.0.0"
import-lazy@~4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-4.0.0.tgz#e8eb627483a0a43da3c03f3e35548be5cb0cc153"
integrity sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==
import-local@^3.0.2:
version "3.1.0"
resolved "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz"
@ -9911,13 +9704,6 @@ is-ci@^3.0.1:
dependencies:
ci-info "^3.2.0"
is-core-module@^2.1.0:
version "2.13.1"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384"
integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==
dependencies:
hasown "^2.0.0"
is-core-module@^2.11.0, is-core-module@^2.5.0, is-core-module@^2.9.0:
version "2.12.0"
resolved "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz"
@ -10767,11 +10553,6 @@ jest@^27.5.1:
import-local "^3.0.2"
jest-cli "^27.5.1"
jju@~1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/jju/-/jju-1.4.0.tgz#a3abe2718af241a2b2904f84a625970f389ae32a"
integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==
js-base64@^3.7.2:
version "3.7.5"
resolved "https://registry.npmjs.org/js-base64/-/js-base64-3.7.5.tgz"
@ -10897,18 +10678,6 @@ json5@^1.0.2:
dependencies:
minimist "^1.2.0"
jsonc-parser@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/jsonc-parser/-/jsonc-parser-3.2.0.tgz#31ff3f4c2b9793f89c67212627c51c6394f88e76"
integrity sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==
jsonfile@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb"
integrity sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==
optionalDependencies:
graceful-fs "^4.1.6"
jsonfile@^6.0.1:
version "6.1.0"
resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz"
@ -10968,11 +10737,6 @@ klona@^2.0.4:
resolved "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz"
integrity sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==
kolorist@^1.8.0:
version "1.8.0"
resolved "https://registry.yarnpkg.com/kolorist/-/kolorist-1.8.0.tgz#edddbbbc7894bc13302cdf740af6374d4a04743c"
integrity sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==
language-subtag-registry@~0.3.2:
version "0.3.22"
resolved "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz"
@ -11199,12 +10963,7 @@ lodash.debounce@^4.0.8:
resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz"
integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
lodash.get@^4.4.2:
version "4.4.2"
resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99"
integrity sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==
lodash.isequal@^4.0.0, lodash.isequal@^4.5.0:
lodash.isequal@^4.0.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/lodash.isequal/-/lodash.isequal-4.5.0.tgz#415c4478f2bcc30120c22ce10ed3226f7d3e18e0"
integrity sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==
@ -11234,7 +10993,7 @@ lodash.uniq@^4.5.0:
resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz"
integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==
lodash@4.17.21, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.7.0, lodash@~4.17.15:
lodash@4.17.21, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.7.0:
version "4.17.21"
resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
@ -12164,13 +11923,6 @@ minimatch@^8.0.2:
dependencies:
brace-expansion "^2.0.1"
minimatch@^9.0.3:
version "9.0.3"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825"
integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==
dependencies:
brace-expansion "^2.0.1"
minimist-options@4.1.0:
version "4.1.0"
resolved "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz"
@ -12264,11 +12016,6 @@ msw@1.3.2:
type-fest "^2.19.0"
yargs "^17.3.1"
muggle-string@^0.3.1:
version "0.3.1"
resolved "https://registry.yarnpkg.com/muggle-string/-/muggle-string-0.3.1.tgz#e524312eb1728c63dd0b2ac49e3282e6ed85963a"
integrity sha512-ckmWDJjphvd/FvZawgygcUeQCxzvohjFO5RxTjj4eq8kw359gFF3E1brjfI+viLMxss5JrHTDRHZvu2/tuy0Qg==
multicast-dns@^7.2.5:
version "7.2.5"
resolved "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz"
@ -12944,19 +12691,6 @@ pascal-case@^3.1.2:
no-case "^3.0.4"
tslib "^2.0.3"
path-browserify@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd"
integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==
path-case@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/path-case/-/path-case-3.0.4.tgz#9168645334eb942658375c56f80b4c0cb5f82c6f"
integrity sha512-qO4qCFjXqVTrcbPt/hQfhTQ+VhFsqNKOPtytgNKkKxSoEp3XPUQ8ObFuePylOIok5gjn69ry8XiULxCwot3Wfg==
dependencies:
dot-case "^3.0.4"
tslib "^2.0.3"
path-exists@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz"
@ -12982,7 +12716,7 @@ path-key@^3.0.0, path-key@^3.1.0:
resolved "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz"
integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
path-parse@^1.0.6, path-parse@^1.0.7:
path-parse@^1.0.7:
version "1.0.7"
resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
@ -14854,23 +14588,6 @@ resolve@^2.0.0-next.4:
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
resolve@~1.19.0:
version "1.19.0"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.19.0.tgz#1af5bf630409734a067cae29318aac7fa29a267c"
integrity sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg==
dependencies:
is-core-module "^2.1.0"
path-parse "^1.0.6"
resolve@~1.22.1:
version "1.22.8"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d"
integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==
dependencies:
is-core-module "^2.13.0"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
restore-cursor@^3.1.0:
version "3.1.0"
resolved "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz"
@ -15065,7 +14782,7 @@ selfsigned@^2.1.1:
dependencies:
node-forge "^1"
"semver@2 || 3 || 4 || 5", semver@7.3.7, semver@7.5.4, semver@7.x, semver@^5.5.0, semver@^5.6.0, semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.4, semver@~7.5.4:
"semver@2 || 3 || 4 || 5", semver@7.3.7, semver@7.5.4, semver@7.x, semver@^5.5.0, semver@^5.6.0, semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7:
version "7.5.4"
resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e"
integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==
@ -15091,15 +14808,6 @@ send@0.19.0:
range-parser "~1.2.1"
statuses "2.0.1"
sentence-case@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/sentence-case/-/sentence-case-3.0.4.tgz#3645a7b8c117c787fde8702056225bb62a45131f"
integrity sha512-8LS0JInaQMCRoQ7YUytAo/xUu5W2XnQxV2HI/6uM6U7CITS1RqPElr30V6uIqyMKM9lJGRVFy5/4CuzcixNYSg==
dependencies:
no-case "^3.0.4"
tslib "^2.0.3"
upper-case-first "^2.0.2"
serialize-javascript@^5.0.1:
version "5.0.1"
resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz"
@ -15305,14 +15013,6 @@ slice-ansi@^5.0.0:
ansi-styles "^6.0.0"
is-fullwidth-code-point "^4.0.0"
snake-case@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/snake-case/-/snake-case-3.0.4.tgz#4f2bbd568e9935abdfd593f34c691dadb49c452c"
integrity sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==
dependencies:
dot-case "^3.0.4"
tslib "^2.0.3"
sockjs@^0.3.24:
version "0.3.24"
resolved "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz"
@ -15541,11 +15241,6 @@ string-argv@^0.3.1:
resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.1.tgz"
integrity sha512-a1uQGz7IyVy9YwhqjZIZu1c8JO8dNIe20xBmSS6qu9kv++k3JGzCVmprbNN5Kn+BgzD5E7YYwg1CcjuJMRNsvg==
string-argv@~0.3.1:
version "0.3.2"
resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.3.2.tgz#2b6d0ef24b656274d957d54e0a4bbf6153dc02b6"
integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==
string-convert@^0.2.0:
version "0.2.1"
resolved "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz"
@ -15690,26 +15385,11 @@ strip-indent@^3.0.0:
dependencies:
min-indent "^1.0.0"
strip-json-comments@^3.1.0, strip-json-comments@^3.1.1, strip-json-comments@~3.1.1:
strip-json-comments@^3.1.0, strip-json-comments@^3.1.1:
version "3.1.1"
resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz"
integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
style-dictionary@3.8.0:
version "3.8.0"
resolved "https://registry.yarnpkg.com/style-dictionary/-/style-dictionary-3.8.0.tgz#7cb8d64360c53431f768d44def665f61e971a73e"
integrity sha512-wHlB/f5eO3mDcYv6WtOz6gvQC477jBKrwuIXe+PtHskTCBsJdAOvL8hCquczJxDui2TnwpeNE+2msK91JJomZg==
dependencies:
chalk "^4.0.0"
change-case "^4.1.2"
commander "^8.3.0"
fs-extra "^10.0.0"
glob "^7.2.0"
json5 "^2.2.2"
jsonc-parser "^3.0.0"
lodash "^4.17.15"
tinycolor2 "^1.4.1"
style-loader@1.3.0:
version "1.3.0"
resolved "https://registry.npmjs.org/style-loader/-/style-loader-1.3.0.tgz"
@ -16036,7 +15716,7 @@ tiny-warning@^1.0.0:
resolved "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz"
integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==
tinycolor2@1, tinycolor2@1.6.0, tinycolor2@^1.4.1, tinycolor2@^1.6.0:
tinycolor2@1, tinycolor2@1.6.0, tinycolor2@^1.6.0:
version "1.6.0"
resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.6.0.tgz#f98007460169b0263b97072c5ae92484ce02d09e"
integrity sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw==
@ -16359,11 +16039,6 @@ typescript-plugin-css-modules@5.0.1:
stylus "^0.59.0"
tsconfig-paths "^4.1.2"
typescript@5.3.3:
version "5.3.3"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.3.3.tgz#b3ce6ba258e72e6305ba66f5c9b452aaee3ffe37"
integrity sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==
typescript@^4.0.5, typescript@^4.4.3:
version "4.9.5"
resolved "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz"
@ -16520,11 +16195,6 @@ unist-util-visit@^5.0.0:
unist-util-is "^6.0.0"
unist-util-visit-parents "^6.0.0"
universalify@^0.1.0:
version "0.1.2"
resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66"
integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==
universalify@^0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz"
@ -16571,20 +16241,6 @@ uplot@1.6.31:
resolved "https://registry.yarnpkg.com/uplot/-/uplot-1.6.31.tgz#092a4b586590e9794b679e1df885a15584b03698"
integrity sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==
upper-case-first@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/upper-case-first/-/upper-case-first-2.0.2.tgz#992c3273f882abd19d1e02894cc147117f844324"
integrity sha512-514ppYHBaKwfJRK/pNC6c/OxfGa0obSnAl106u97Ed0I625Nin96KAjttZF6ZL3e1XLtphxnqrOi9iWgm+u+bg==
dependencies:
tslib "^2.0.3"
upper-case@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-2.0.2.tgz#d89810823faab1df1549b7d97a76f8662bae6f7a"
integrity sha512-KgdgDGJt2TpuwBUIjgG6lzw2GWFRCW9Qkfkiv0DxqHHLYJHmtmdUIKcZd8rHgFSjopVTlw6ggzCm1b8MFQwikg==
dependencies:
tslib "^2.0.3"
uri-js@^4.2.2:
version "4.4.1"
resolved "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz"
@ -16693,11 +16349,6 @@ validate-npm-package-license@^3.0.1:
spdx-correct "^3.0.0"
spdx-expression-parse "^3.0.0"
validator@^13.7.0:
version "13.11.0"
resolved "https://registry.yarnpkg.com/validator/-/validator-13.11.0.tgz#23ab3fd59290c61248364eabf4067f04955fbb1b"
integrity sha512-Ii+sehpSfZy+At5nPdnyMhx78fEoPDkR2XW/zimHEL3MyGJQOCQ7WeP20jPYRz7ZCpcKLB21NxuXHF3bxjStBQ==
value-equal@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz"
@ -16759,40 +16410,11 @@ vfile@^6.0.0:
unist-util-stringify-position "^4.0.0"
vfile-message "^4.0.0"
vite-plugin-dts@^3.6.4:
version "3.7.0"
resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-3.7.0.tgz#654ee7c38c0cdd4589b9bc198a264f34172bd870"
integrity sha512-np1uPaYzu98AtPReB8zkMnbjwcNHOABsLhqVOf81b3ol9b5M2wPcAVs8oqPnOpr6Us+7yDXVauwkxsk5+ldmRA==
dependencies:
"@microsoft/api-extractor" "7.39.0"
"@rollup/pluginutils" "^5.1.0"
"@vue/language-core" "^1.8.26"
debug "^4.3.4"
kolorist "^1.8.0"
vue-tsc "^1.8.26"
void-elements@3.1.0:
version "3.1.0"
resolved "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz"
integrity sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==
vue-template-compiler@^2.7.14:
version "2.7.16"
resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.7.16.tgz#c81b2d47753264c77ac03b9966a46637482bb03b"
integrity sha512-AYbUWAJHLGGQM7+cNTELw+KsOG9nl2CnSv467WobS5Cv9uk3wFcnr1Etsz2sEIHEZvw1U+o9mRlEO6QbZvUPGQ==
dependencies:
de-indent "^1.0.2"
he "^1.2.0"
vue-tsc@^1.8.26:
version "1.8.27"
resolved "https://registry.yarnpkg.com/vue-tsc/-/vue-tsc-1.8.27.tgz#feb2bb1eef9be28017bb9e95e2bbd1ebdd48481c"
integrity sha512-WesKCAZCRAbmmhuGl3+VrdWItEvfoFIPXOvUJkjULi+x+6G/Dy69yO3TBRJDr9eUlmsNAwVmxsNZxvHKzbkKdg==
dependencies:
"@volar/typescript" "~1.11.1"
"@vue/language-core" "1.8.27"
semver "^7.5.4"
w3c-hr-time@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz"
@ -17392,17 +17014,6 @@ yocto-queue@^1.0.0:
resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251"
integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==
z-schema@~5.0.2:
version "5.0.6"
resolved "https://registry.yarnpkg.com/z-schema/-/z-schema-5.0.6.tgz#46d6a687b15e4a4369e18d6cb1c7b8618fc256c5"
integrity sha512-+XR1GhnWklYdfr8YaZv/iu+vY+ux7V5DS5zH1DQf6bO5ufrt/5cgNhVO5qyhsjFXvsqQb/f08DWE9b6uPscyAg==
dependencies:
lodash.get "^4.4.2"
lodash.isequal "^4.5.0"
validator "^13.7.0"
optionalDependencies:
commander "^10.0.0"
zwitch@^2.0.0, zwitch@^2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-2.0.4.tgz#c827d4b0acb76fc3e685a4c6ec2902d51070e9d7"

View File

@ -111,6 +111,7 @@ type APIHandler struct {
Upgrader *websocket.Upgrader
UseLogsNewSchema bool
UseLicensesV3 bool
hostsRepo *inframetrics.HostsRepo
processesRepo *inframetrics.ProcessesRepo
@ -118,6 +119,11 @@ type APIHandler struct {
nodesRepo *inframetrics.NodesRepo
namespacesRepo *inframetrics.NamespacesRepo
clustersRepo *inframetrics.ClustersRepo
// workloads
deploymentsRepo *inframetrics.DeploymentsRepo
daemonsetsRepo *inframetrics.DaemonSetsRepo
statefulsetsRepo *inframetrics.StatefulSetsRepo
jobsRepo *inframetrics.JobsRepo
}
type APIHandlerOpts struct {
@ -156,6 +162,9 @@ type APIHandlerOpts struct {
// Use Logs New schema
UseLogsNewSchema bool
// Use Licenses V3 structure
UseLicensesV3 bool
}
// NewAPIHandler returns an APIHandler
@ -193,6 +202,10 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
nodesRepo := inframetrics.NewNodesRepo(opts.Reader, querierv2)
namespacesRepo := inframetrics.NewNamespacesRepo(opts.Reader, querierv2)
clustersRepo := inframetrics.NewClustersRepo(opts.Reader, querierv2)
deploymentsRepo := inframetrics.NewDeploymentsRepo(opts.Reader, querierv2)
daemonsetsRepo := inframetrics.NewDaemonSetsRepo(opts.Reader, querierv2)
statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2)
jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2)
aH := &APIHandler{
reader: opts.Reader,
@ -211,12 +224,17 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
querier: querier,
querierV2: querierv2,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseLicensesV3: opts.UseLicensesV3,
hostsRepo: hostsRepo,
processesRepo: processesRepo,
podsRepo: podsRepo,
nodesRepo: nodesRepo,
namespacesRepo: namespacesRepo,
clustersRepo: clustersRepo,
deploymentsRepo: deploymentsRepo,
daemonsetsRepo: daemonsetsRepo,
statefulsetsRepo: statefulsetsRepo,
jobsRepo: jobsRepo,
}
logsQueryBuilder := logsv3.PrepareLogsQuery
@ -314,6 +332,8 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
code = http.StatusUnauthorized
case model.ErrorForbidden:
code = http.StatusForbidden
case model.ErrorConflict:
code = http.StatusConflict
default:
code = http.StatusInternalServerError
}
@ -395,6 +415,26 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid
clustersSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getClusterAttributeKeys)).Methods(http.MethodGet)
clustersSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getClusterAttributeValues)).Methods(http.MethodGet)
clustersSubRouter.HandleFunc("/list", am.ViewAccess(aH.getClusterList)).Methods(http.MethodPost)
deploymentsSubRouter := router.PathPrefix("/api/v1/deployments").Subrouter()
deploymentsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDeploymentAttributeKeys)).Methods(http.MethodGet)
deploymentsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDeploymentAttributeValues)).Methods(http.MethodGet)
deploymentsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDeploymentList)).Methods(http.MethodPost)
daemonsetsSubRouter := router.PathPrefix("/api/v1/daemonsets").Subrouter()
daemonsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDaemonSetAttributeKeys)).Methods(http.MethodGet)
daemonsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDaemonSetAttributeValues)).Methods(http.MethodGet)
daemonsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDaemonSetList)).Methods(http.MethodPost)
statefulsetsSubRouter := router.PathPrefix("/api/v1/statefulsets").Subrouter()
statefulsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getStatefulSetAttributeKeys)).Methods(http.MethodGet)
statefulsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getStatefulSetAttributeValues)).Methods(http.MethodGet)
statefulsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getStatefulSetList)).Methods(http.MethodPost)
jobsSubRouter := router.PathPrefix("/api/v1/jobs").Subrouter()
jobsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getJobAttributeKeys)).Methods(http.MethodGet)
jobsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getJobAttributeValues)).Methods(http.MethodGet)
jobsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getJobList)).Methods(http.MethodPost)
}
func (aH *APIHandler) RegisterWebSocketPaths(router *mux.Router, am *AuthMiddleware) {
@ -3217,16 +3257,16 @@ func (aH *APIHandler) getProducerThroughputOverview(
}
for _, res := range result {
for _, series := range res.Series {
serviceName, serviceNameOk := series.Labels["service_name"]
topicName, topicNameOk := series.Labels["topic"]
params := []string{serviceName, topicName}
for _, list := range res.List {
serviceName, serviceNameOk := list.Data["service_name"].(*string)
topicName, topicNameOk := list.Data["topic"].(*string)
params := []string{*serviceName, *topicName}
hashKey := uniqueIdentifier(params, "#")
_, ok := attributeCache.Hash[hashKey]
if topicNameOk && serviceNameOk && !ok {
attributeCache.Hash[hashKey] = struct{}{}
attributeCache.TopicName = append(attributeCache.TopicName, topicName)
attributeCache.ServiceName = append(attributeCache.ServiceName, serviceName)
attributeCache.TopicName = append(attributeCache.TopicName, *topicName)
attributeCache.ServiceName = append(attributeCache.ServiceName, *serviceName)
}
}
}
@ -3251,25 +3291,23 @@ func (aH *APIHandler) getProducerThroughputOverview(
}
latencyColumn := &v3.Result{QueryName: "latency"}
var latencySeries []*v3.Series
var latencySeries []*v3.Row
for _, res := range resultFetchLatency {
for _, series := range res.Series {
topic, topicOk := series.Labels["topic"]
serviceName, serviceNameOk := series.Labels["service_name"]
params := []string{topic, serviceName}
for _, list := range res.List {
topic, topicOk := list.Data["topic"].(*string)
serviceName, serviceNameOk := list.Data["service_name"].(*string)
params := []string{*serviceName, *topic}
hashKey := uniqueIdentifier(params, "#")
_, ok := attributeCache.Hash[hashKey]
if topicOk && serviceNameOk && ok {
latencySeries = append(latencySeries, series)
latencySeries = append(latencySeries, list)
}
}
}
latencyColumn.Series = latencySeries
latencyColumn.List = latencySeries
result = append(result, latencyColumn)
resultFetchLatency = postprocess.TransformToTableForBuilderQueries(result, queryRangeParams)
resp := v3.QueryRangeResponse{
Result: resultFetchLatency,
}

View File

@ -334,3 +334,213 @@ func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) {
aH.Respond(w, clusterList)
}
func (aH *APIHandler) getDeploymentAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.deploymentsRepo.GetDeploymentAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getDeploymentAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.deploymentsRepo.GetDeploymentAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getDeploymentList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.DeploymentListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
deploymentList, err := aH.deploymentsRepo.GetDeploymentList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, deploymentList)
}
func (aH *APIHandler) getDaemonSetAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.daemonsetsRepo.GetDaemonSetAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getDaemonSetAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.daemonsetsRepo.GetDaemonSetAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getDaemonSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.DaemonSetListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
daemonSetList, err := aH.daemonsetsRepo.GetDaemonSetList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, daemonSetList)
}
func (aH *APIHandler) getStatefulSetAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.statefulsetsRepo.GetStatefulSetAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getStatefulSetAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.statefulsetsRepo.GetStatefulSetAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getStatefulSetList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.StatefulSetListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
statefulSetList, err := aH.statefulsetsRepo.GetStatefulSetList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, statefulSetList)
}
func (aH *APIHandler) getJobAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.jobsRepo.GetJobAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getJobAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.jobsRepo.GetJobAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}
func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.JobListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
jobList, err := aH.jobsRepo.GetJobList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, jobList)
}

View File

@ -73,6 +73,22 @@ func getParamsForTopClusters(req model.ClusterListRequest) (int64, string, strin
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopDeployments(req model.DeploymentListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopDaemonSets(req model.DaemonSetListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopStatefulSets(req model.StatefulSetListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopJobs(req model.JobListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
// TODO(srikanthccv): remove this
// What is happening here?
// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint

View File

@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForDaemonSets = "k8s_pod_cpu_utilization"
k8sDaemonSetNameAttrKey = "k8s_daemonset_name"
metricNamesForDaemonSets = map[string]string{
"desired_nodes": "k8s_daemonset_desired_scheduled_nodes",
"available_nodes": "k8s_daemonset_current_scheduled_nodes",
}
daemonSetAttrsToEnrich = []string{
"k8s_daemonset_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForDaemonSets = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_nodes": {"H"},
"available_nodes": {"I"},
}
builderQueriesForDaemonSets = map[string]*v3.BuilderQuery{
// desired nodes
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDaemonSets["desired_nodes"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available nodes
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDaemonSets["available_nodes"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
daemonSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type DaemonSetsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewDaemonSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DaemonSetsRepo {
return &DaemonSetsRepo{reader: reader, querierV2: querierV2}
}
func (d *DaemonSetsRepo) GetDaemonSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDaemonSets
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *DaemonSetsRepo) GetDaemonSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDaemonSets
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *DaemonSetsRepo) getMetadataAttributes(ctx context.Context, req model.DaemonSetListRequest) (map[string]map[string]string, error) {
daemonSetAttrs := map[string]map[string]string{}
for _, key := range daemonSetAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForDaemonSets,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
daemonSetName := stringData[k8sDaemonSetNameAttrKey]
if _, ok := daemonSetAttrs[daemonSetName]; !ok {
daemonSetAttrs[daemonSetName] = map[string]string{}
}
for _, key := range req.GroupBy {
daemonSetAttrs[daemonSetName][key.Key] = stringData[key.Key]
}
}
return daemonSetAttrs, nil
}
func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.DaemonSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDaemonSets(req)
queryNames := queryNamesForDaemonSets[req.OrderBy.ColumnName]
topDaemonSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topDaemonSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDaemonSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDaemonSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopDaemonSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topDaemonSetGroups := []map[string]string{}
for _, series := range paginatedTopDaemonSetGroupsSeries {
topDaemonSetGroups = append(topDaemonSetGroups, series.Labels)
}
allDaemonSetGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allDaemonSetGroups = append(allDaemonSetGroups, series.Labels)
}
return topDaemonSetGroups, allDaemonSetGroups, nil
}
func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonSetListRequest) (model.DaemonSetListResponse, error) {
resp := model.DaemonSetListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sDaemonSetNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for daemon sets
for _, daemonSetQuery := range builderQueriesForDaemonSets {
query.CompositeQuery.BuilderQueries[daemonSetQuery.QueryName] = daemonSetQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for daemon sets
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sDaemonSetNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
daemonSetAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topDaemonSetGroups, allDaemonSetGroups, err := d.getTopDaemonSetGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topDaemonSetGroup := range topDaemonSetGroups {
for k, v := range topDaemonSetGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.DaemonSetListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.DaemonSetListRecord{
DaemonSetName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredNodes: -1,
AvailableNodes: -1,
}
if daemonSetName, ok := row.Data[k8sDaemonSetNameAttrKey].(string); ok {
record.DaemonSetName = daemonSetName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredNodes, ok := row.Data["H"].(float64); ok {
record.DesiredNodes = int(desiredNodes)
}
if availableNodes, ok := row.Data["I"].(float64); ok {
record.AvailableNodes = int(availableNodes)
}
record.Meta = map[string]string{}
if _, ok := daemonSetAttrs[record.DaemonSetName]; ok {
record.Meta = daemonSetAttrs[record.DaemonSetName]
}
for k, v := range row.Data {
if slices.Contains(daemonSetQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allDaemonSetGroups)
resp.Records = records
return resp, nil
}

View File

@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForDeployments = "k8s_pod_cpu_utilization"
k8sDeploymentNameAttrKey = "k8s_deployment_name"
metricNamesForDeployments = map[string]string{
"desired_pods": "k8s_deployment_desired",
"available_pods": "k8s_deployment_available",
}
deploymentAttrsToEnrich = []string{
"k8s_deployment_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForDeployments = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"available_pods": {"I"},
}
builderQueriesForDeployments = map[string]*v3.BuilderQuery{
// desired pods
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDeployments["desired_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available pods
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForDeployments["available_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
deploymentQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type DeploymentsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewDeploymentsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DeploymentsRepo {
return &DeploymentsRepo{reader: reader, querierV2: querierV2}
}
func (d *DeploymentsRepo) GetDeploymentAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDeployments
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *DeploymentsRepo) GetDeploymentAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForDeployments
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *DeploymentsRepo) getMetadataAttributes(ctx context.Context, req model.DeploymentListRequest) (map[string]map[string]string, error) {
deploymentAttrs := map[string]map[string]string{}
for _, key := range deploymentAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForDeployments,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
deploymentName := stringData[k8sDeploymentNameAttrKey]
if _, ok := deploymentAttrs[deploymentName]; !ok {
deploymentAttrs[deploymentName] = map[string]string{}
}
for _, key := range req.GroupBy {
deploymentAttrs[deploymentName][key.Key] = stringData[key.Key]
}
}
return deploymentAttrs, nil
}
func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.DeploymentListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopDeployments(req)
queryNames := queryNamesForDeployments[req.OrderBy.ColumnName]
topDeploymentGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topDeploymentGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topDeploymentGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDeploymentGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopDeploymentGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topDeploymentGroups := []map[string]string{}
for _, series := range paginatedTopDeploymentGroupsSeries {
topDeploymentGroups = append(topDeploymentGroups, series.Labels)
}
allDeploymentGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allDeploymentGroups = append(allDeploymentGroups, series.Labels)
}
return topDeploymentGroups, allDeploymentGroups, nil
}
func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.DeploymentListRequest) (model.DeploymentListResponse, error) {
resp := model.DeploymentListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sDeploymentNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for deployments
for _, deploymentQuery := range builderQueriesForDeployments {
query.CompositeQuery.BuilderQueries[deploymentQuery.QueryName] = deploymentQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for deployments
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sDeploymentNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
deploymentAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topDeploymentGroups, allDeploymentGroups, err := d.getTopDeploymentGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topDeploymentGroup := range topDeploymentGroups {
for k, v := range topDeploymentGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.DeploymentListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.DeploymentListRecord{
DeploymentName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredPods: -1,
AvailablePods: -1,
}
if deploymentName, ok := row.Data[k8sDeploymentNameAttrKey].(string); ok {
record.DeploymentName = deploymentName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredPods, ok := row.Data["H"].(float64); ok {
record.DesiredPods = int(desiredPods)
}
if availablePods, ok := row.Data["I"].(float64); ok {
record.AvailablePods = int(availablePods)
}
record.Meta = map[string]string{}
if _, ok := deploymentAttrs[record.DeploymentName]; ok {
record.Meta = deploymentAttrs[record.DeploymentName]
}
for k, v := range row.Data {
if slices.Contains(deploymentQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allDeploymentGroups)
resp.Records = records
return resp, nil
}

View File

@ -0,0 +1,498 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForJobs = "k8s_pod_cpu_utilization"
k8sJobNameAttrKey = "k8s_job_name"
metricNamesForJobs = map[string]string{
"desired_successful_pods": "k8s_job_desired_successful_pods",
"active_pods": "k8s_job_active_pods",
"failed_pods": "k8s_job_failed_pods",
"successful_pods": "k8s_job_successful_pods",
}
jobAttrsToEnrich = []string{
"k8s_job_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForJobs = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"active_pods": {"I"},
"failed_pods": {"J"},
"successful_pods": {"K"},
}
builderQueriesForJobs = map[string]*v3.BuilderQuery{
// desired nodes
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["desired_successful_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available nodes
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["active_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// failed pods
"J": {
QueryName: "J",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["failed_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "J",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// successful pods
"K": {
QueryName: "K",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForJobs["successful_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "K",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
jobQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
)
type JobsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewJobsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *JobsRepo {
return &JobsRepo{reader: reader, querierV2: querierV2}
}
func (d *JobsRepo) GetJobAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForJobs
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *JobsRepo) GetJobAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForJobs
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *JobsRepo) getMetadataAttributes(ctx context.Context, req model.JobListRequest) (map[string]map[string]string, error) {
jobAttrs := map[string]map[string]string{}
for _, key := range jobAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForJobs,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
jobName := stringData[k8sJobNameAttrKey]
if _, ok := jobAttrs[jobName]; !ok {
jobAttrs[jobName] = map[string]string{}
}
for _, key := range req.GroupBy {
jobAttrs[jobName][key.Key] = stringData[key.Key]
}
}
return jobAttrs, nil
}
func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopJobs(req)
queryNames := queryNamesForJobs[req.OrderBy.ColumnName]
topJobGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topJobGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topJobGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topJobGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopJobGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topJobGroups := []map[string]string{}
for _, series := range paginatedTopJobGroupsSeries {
topJobGroups = append(topJobGroups, series.Labels)
}
allJobGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allJobGroups = append(allJobGroups, series.Labels)
}
return topJobGroups, allJobGroups, nil
}
func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (model.JobListResponse, error) {
resp := model.JobListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "desired_pods", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sJobNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for jobs
for _, jobQuery := range builderQueriesForJobs {
query.CompositeQuery.BuilderQueries[jobQuery.QueryName] = jobQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for jobs
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sJobNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
jobAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topJobGroups, allJobGroups, err := d.getTopJobGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topJobGroup := range topJobGroups {
for k, v := range topJobGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.JobListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.JobListRecord{
JobName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredSuccessfulPods: -1,
ActivePods: -1,
FailedPods: -1,
SuccessfulPods: -1,
}
if jobName, ok := row.Data[k8sJobNameAttrKey].(string); ok {
record.JobName = jobName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredSuccessfulPods, ok := row.Data["H"].(float64); ok {
record.DesiredSuccessfulPods = int(desiredSuccessfulPods)
}
if activePods, ok := row.Data["I"].(float64); ok {
record.ActivePods = int(activePods)
}
if failedPods, ok := row.Data["J"].(float64); ok {
record.FailedPods = int(failedPods)
}
if successfulPods, ok := row.Data["K"].(float64); ok {
record.SuccessfulPods = int(successfulPods)
}
record.Meta = map[string]string{}
if _, ok := jobAttrs[record.JobName]; ok {
record.Meta = jobAttrs[record.JobName]
}
for k, v := range row.Data {
if slices.Contains(jobQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allJobGroups)
resp.Records = records
return resp, nil
}

View File

@ -0,0 +1,444 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForStatefulSets = "k8s_pod_cpu_utilization"
k8sStatefulSetNameAttrKey = "k8s_statefulset_name"
metricNamesForStatefulSets = map[string]string{
"desired_pods": "k8s_statefulset_desired_pods",
"available_pods": "k8s_statefulset_current_pods",
}
statefulSetAttrsToEnrich = []string{
"k8s_statefulset_name",
"k8s_namespace_name",
"k8s_cluster_name",
}
queryNamesForStatefulSets = map[string][]string{
"cpu": {"A"},
"cpu_request": {"B", "A"},
"cpu_limit": {"C", "A"},
"memory": {"D"},
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"desired_pods": {"H"},
"available_pods": {"I"},
}
builderQueriesForStatefulSets = map[string]*v3.BuilderQuery{
// desired pods
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForStatefulSets["desired_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// available pods
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForStatefulSets["available_pods"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
}
statefulSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"}
)
type StatefulSetsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewStatefulSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *StatefulSetsRepo {
return &StatefulSetsRepo{reader: reader, querierV2: querierV2}
}
func (d *StatefulSetsRepo) GetStatefulSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
// TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForStatefulSets
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (d *StatefulSetsRepo) GetStatefulSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForStatefulSets
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (d *StatefulSetsRepo) getMetadataAttributes(ctx context.Context, req model.StatefulSetListRequest) (map[string]map[string]string, error) {
statefulSetAttrs := map[string]map[string]string{}
for _, key := range statefulSetAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForStatefulSets,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := d.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
statefulSetName := stringData[k8sStatefulSetNameAttrKey]
if _, ok := statefulSetAttrs[statefulSetName]; !ok {
statefulSetAttrs[statefulSetName] = map[string]string{}
}
for _, key := range req.GroupBy {
statefulSetAttrs[statefulSetName][key.Key] = stringData[key.Key]
}
}
return statefulSetAttrs, nil
}
func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req model.StatefulSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopStatefulSets(req)
queryNames := queryNamesForStatefulSets[req.OrderBy.ColumnName]
topStatefulSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topStatefulSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, topStatefulSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topStatefulSetGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopStatefulSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topStatefulSetGroups := []map[string]string{}
for _, series := range paginatedTopStatefulSetGroupsSeries {
topStatefulSetGroups = append(topStatefulSetGroups, series.Labels)
}
allStatefulSetGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allStatefulSetGroups = append(allStatefulSetGroups, series.Labels)
}
return topStatefulSetGroups, allStatefulSetGroups, nil
}
func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.StatefulSetListRequest) (model.StatefulSetListResponse, error) {
resp := model.StatefulSetListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sStatefulSetNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := WorkloadTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
// add additional queries for stateful sets
for _, statefulSetQuery := range builderQueriesForStatefulSets {
query.CompositeQuery.BuilderQueries[statefulSetQuery.QueryName] = statefulSetQuery
}
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
// make sure we only get records for daemon sets
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: k8sStatefulSetNameAttrKey},
Operator: v3.FilterOperatorExists,
})
}
statefulSetAttrs, err := d.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topStatefulSetGroups, allStatefulSetGroups, err := d.getTopStatefulSetGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topStatefulSetGroup := range topStatefulSetGroups {
for k, v := range topStatefulSetGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := d.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.StatefulSetListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.StatefulSetListRecord{
StatefulSetName: "",
CPUUsage: -1,
CPURequest: -1,
CPULimit: -1,
MemoryUsage: -1,
MemoryRequest: -1,
MemoryLimit: -1,
DesiredPods: -1,
AvailablePods: -1,
}
if statefulSetName, ok := row.Data[k8sStatefulSetNameAttrKey].(string); ok {
record.StatefulSetName = statefulSetName
}
if cpu, ok := row.Data["A"].(float64); ok {
record.CPUUsage = cpu
}
if cpuRequest, ok := row.Data["B"].(float64); ok {
record.CPURequest = cpuRequest
}
if cpuLimit, ok := row.Data["C"].(float64); ok {
record.CPULimit = cpuLimit
}
if memory, ok := row.Data["D"].(float64); ok {
record.MemoryUsage = memory
}
if memoryRequest, ok := row.Data["E"].(float64); ok {
record.MemoryRequest = memoryRequest
}
if memoryLimit, ok := row.Data["F"].(float64); ok {
record.MemoryLimit = memoryLimit
}
if restarts, ok := row.Data["G"].(float64); ok {
record.Restarts = int(restarts)
}
if desiredPods, ok := row.Data["H"].(float64); ok {
record.DesiredPods = int(desiredPods)
}
if availablePods, ok := row.Data["I"].(float64); ok {
record.AvailablePods = int(availablePods)
}
record.Meta = map[string]string{}
if _, ok := statefulSetAttrs[record.StatefulSetName]; ok {
record.Meta = statefulSetAttrs[record.StatefulSetName]
}
for k, v := range row.Data {
if slices.Contains(statefulSetQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allStatefulSetGroups)
resp.Records = records
return resp, nil
}

View File

@ -0,0 +1,166 @@
package inframetrics
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
var (
metricNamesForWorkloads = map[string]string{
"cpu": "k8s_pod_cpu_utilization",
"cpu_req": "k8s_pod_cpu_request_utilization",
"cpu_limit": "k8s_pod_cpu_limit_utilization",
"memory": "k8s_pod_memory_usage",
"memory_req": "k8s_pod_memory_request_utilization",
"memory_limit": "k8s_pod_memory_limit_utilization",
"restarts": "k8s_container_restarts",
}
)
var WorkloadTableListQuery = v3.QueryRangeParamsV3{
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
// pod cpu utilization
"A": {
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "A",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod cpu request utilization
"B": {
QueryName: "B",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu_request"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "B",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod cpu limit utilization
"C": {
QueryName: "C",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["cpu_limit"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "C",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory utilization
"D": {
QueryName: "D",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "D",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory request utilization
"E": {
QueryName: "E",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory_request"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "E",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// pod memory limit utilization
"F": {
QueryName: "F",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["memory_limit"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "F",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
"G": {
QueryName: "G",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForWorkloads["restarts"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
GroupBy: []v3.AttributeKey{},
Expression: "G",
ReduceTo: v3.ReduceToOperatorSum,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationMax,
Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}},
Disabled: false,
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,
},
Version: "v4",
FormatForWeb: true,
}

View File

@ -284,7 +284,7 @@ func BuildQRParamsWithCache(messagingQueue *MessagingQueue, queryContext string,
cq = &v3.CompositeQuery{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: bhq,
PanelType: v3.PanelTypeTable,
PanelType: v3.PanelTypeList,
}
}
@ -364,7 +364,7 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer
func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) {
if queryContext == "producer-consumer-eval" {
if queryContext == "producer-consumer-eval" || queryContext == "producer-throughput-overview" {
return &v3.CompositeQuery{
QueryType: v3.QueryTypeClickHouseSQL,
ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq},

View File

@ -436,8 +436,6 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
} else if panelType == v3.PanelTypeTable {
queryTmplPrefix =
"SELECT"
// step or aggregate interval is whole time period in case of table panel
step = (utils.GetEpochNanoSecs(end) - utils.GetEpochNanoSecs(start)) / NANOSECOND
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmplPrefix =

View File

@ -2,6 +2,7 @@ package cumulative
import (
"fmt"
"os"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/constants"
@ -40,6 +41,9 @@ import (
const (
rateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))`
increaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window)))`
experimentalRateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
experimentalIncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window))`
)
// prepareTimeAggregationSubQueryTimeSeries prepares the sub-query to be used for temporal aggregation
@ -151,14 +155,22 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery)
subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
case v3.TimeAggregationRate:
innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
rateExp := rateWithoutNegative
if _, ok := os.LookupEnv("EXPERIMENTAL_RATE_WITHOUT_NEGATIVE"); ok {
rateExp = fmt.Sprintf(experimentalRateWithoutNegative, start)
}
rateQueryTmpl :=
"SELECT %s ts, " + rateWithoutNegative +
"SELECT %s ts, " + rateExp +
" as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)"
subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery)
case v3.TimeAggregationIncrease:
innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery)
increaseExp := increaseWithoutNegative
if _, ok := os.LookupEnv("EXPERIMENTAL_INCREASE_WITHOUT_NEGATIVE"); ok {
increaseExp = experimentalIncreaseWithoutNegative
}
rateQueryTmpl :=
"SELECT %s ts, " + increaseWithoutNegative +
"SELECT %s ts, " + increaseExp +
" as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)"
subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery)
}

View File

@ -116,7 +116,9 @@ func expressionToQuery(
for _, tag := range qp.CompositeQuery.BuilderQueries[variable].GroupBy {
groupTags = append(groupTags, tag.Key)
}
groupTags = append(groupTags, "ts")
if qp.CompositeQuery.PanelType != v3.PanelTypeTable {
groupTags = append(groupTags, "ts")
}
if joinUsing == "" {
for _, tag := range groupTags {
joinUsing += fmt.Sprintf("%s.`%s` as `%s`, ", variable, tag, tag)

View File

@ -498,11 +498,11 @@ var testLogsWithFormula = []struct {
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.2')] = true AND " +
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
"has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",
@ -707,12 +707,12 @@ var testLogsWithFormulaV2 = []struct {
},
},
},
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " +
"toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " +
"AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " +
"attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " +
"AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " +
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`",
"mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`",
},
{
Name: "test formula with dot in filter and group by materialized attribute",

View File

@ -10,7 +10,7 @@ import (
"go.signoz.io/signoz/pkg/query-service/utils"
)
var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
var AggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP05: 0.05,
v3.AggregateOperatorP10: 0.10,
v3.AggregateOperatorP20: 0.20,
@ -22,7 +22,7 @@ var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{
v3.AggregateOperatorP99: 0.99,
}
var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
var AggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{
v3.AggregateOperatorAvg: "avg",
v3.AggregateOperatorMax: "max",
v3.AggregateOperatorMin: "min",
@ -109,7 +109,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri
return selectLabels
}
func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
func GetSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string {
var selectLabels []string
if aggregatorOperator == v3.AggregateOperatorNoOp {
return ""
@ -173,7 +173,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if item.Key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
subQuery, err := ExistsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
@ -199,7 +199,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
return queryString, nil
}
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
func ExistsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
if key.DataType == v3.AttributeKeyDataTypeString {
if op == v3.FilterOperatorExists {
return fmt.Sprintf("%s %s ''", key.Key, tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
@ -244,7 +244,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy)
having := having(mq.Having)
having := Having(mq.Having)
if having != "" {
having = " having " + having
}
@ -272,7 +272,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
// we don't need value for first query
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
queryTmpl = "SELECT " + GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
@ -281,7 +281,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
}
filterSubQuery += emptyValuesInGroupByFilter
groupBy := groupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
groupBy := GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
if groupBy != "" {
groupBy = " group by " + groupBy
}
@ -291,7 +291,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
}
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
aggregationKey := ""
@ -311,7 +311,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
op := fmt.Sprintf("%s(%s)/%f", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case
@ -324,17 +324,17 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", aggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("quantile(%v)(%s)", AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
op := fmt.Sprintf("%s(%s)", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
if mq.AggregateAttribute.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
subQuery, err := ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
@ -354,9 +354,9 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan
var query string
if panelType == v3.PanelTypeTrace {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME, spanIndexTableTimeFilter, filterSubQuery)
withSubQuery = addLimitToQuery(withSubQuery, mq.Limit)
withSubQuery = AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = addOffsetToQuery(withSubQuery, mq.Offset)
withSubQuery = AddOffsetToQuery(withSubQuery, mq.Offset)
}
// query = withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME)
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, spanIndexTableTimeFilter)
@ -403,7 +403,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str
return strings.Join(tags, ",")
}
func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
func GroupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string {
groupTags := []string{}
for _, tag := range tags {
groupTags = append(groupTags, fmt.Sprintf("`%s`", tag.Key))
@ -456,7 +456,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []
return str
}
func having(items []v3.Having) string {
func Having(items []v3.Having) string {
// aggregate something and filter on that aggregate
var having []string
for _, item := range items {
@ -465,7 +465,7 @@ func having(items []v3.Having) string {
return strings.Join(having, " AND ")
}
func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
func ReduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) {
var groupBy string
switch reduceTo {
@ -485,14 +485,14 @@ func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOpe
return query, nil
}
func addLimitToQuery(query string, limit uint64) string {
func AddLimitToQuery(query string, limit uint64) string {
if limit == 0 {
limit = 100
}
return fmt.Sprintf("%s LIMIT %d", query, limit)
}
func addOffsetToQuery(query string, offset uint64) string {
func AddOffsetToQuery(query string, offset uint64) string {
return fmt.Sprintf("%s OFFSET %d", query, offset)
}
@ -513,7 +513,7 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
if err != nil {
return "", err
}
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
@ -529,13 +529,13 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = reduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
query, err = ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
query = addLimitToQuery(query, mq.Limit)
query = AddLimitToQuery(query, mq.Limit)
if mq.Offset != 0 {
query = addOffsetToQuery(query, mq.Offset)
query = AddOffsetToQuery(query, mq.Offset)
}
}
return query, err

View File

@ -0,0 +1,414 @@
package v4
import (
"fmt"
"strings"
"go.signoz.io/signoz/pkg/query-service/app/resource"
tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/utils"
)
const NANOSECOND = 1000000000
var tracesOperatorMappingV3 = map[v3.FilterOperator]string{
v3.FilterOperatorIn: "IN",
v3.FilterOperatorNotIn: "NOT IN",
v3.FilterOperatorEqual: "=",
v3.FilterOperatorNotEqual: "!=",
v3.FilterOperatorLessThan: "<",
v3.FilterOperatorLessThanOrEq: "<=",
v3.FilterOperatorGreaterThan: ">",
v3.FilterOperatorGreaterThanOrEq: ">=",
v3.FilterOperatorLike: "ILIKE",
v3.FilterOperatorNotLike: "NOT ILIKE",
v3.FilterOperatorRegex: "match(%s, %s)",
v3.FilterOperatorNotRegex: "NOT match(%s, %s)",
v3.FilterOperatorContains: "ILIKE",
v3.FilterOperatorNotContains: "NOT ILIKE",
v3.FilterOperatorExists: "mapContains(%s, '%s')",
v3.FilterOperatorNotExists: "NOT mapContains(%s, '%s')",
}
func getClickHouseTracesColumnType(columnType v3.AttributeKeyType) string {
if columnType == v3.AttributeKeyTypeResource {
return "resources"
}
return "attributes"
}
func getClickHouseTracesColumnDataType(columnDataType v3.AttributeKeyDataType) string {
if columnDataType == v3.AttributeKeyDataTypeFloat64 || columnDataType == v3.AttributeKeyDataTypeInt64 {
return "number"
}
if columnDataType == v3.AttributeKeyDataTypeBool {
return "bool"
}
return "string"
}
func getColumnName(key v3.AttributeKey) string {
// if key present in static return as it is
if _, ok := constants.StaticFieldsTraces[key.Key]; ok {
return key.Key
}
if !key.IsColumn {
keyType := getClickHouseTracesColumnType(key.Type)
keyDType := getClickHouseTracesColumnDataType(key.DataType)
return fmt.Sprintf("%s_%s['%s']", keyType, keyDType, key.Key)
}
return "`" + utils.GetClickhouseColumnNameV2(string(key.Type), string(key.DataType), key.Key) + "`"
}
// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator
func getSelectLabels(groupBy []v3.AttributeKey) string {
var labels []string
for _, tag := range groupBy {
name := getColumnName(tag)
labels = append(labels, fmt.Sprintf(" %s as `%s`", name, tag.Key))
}
return strings.Join(labels, ",")
}
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
var conditions []string
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
// skip if it's a resource attribute
if item.Key.Type == v3.AttributeKeyTypeResource {
continue
}
val := item.Value
// generate the key
columnName := getColumnName(item.Key)
var fmtVal string
item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists {
var err error
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err)
}
}
if val != nil {
fmtVal = utils.ClickHouseFormattedValue(val)
}
if operator, ok := tracesOperatorMappingV3[item.Operator]; ok {
switch item.Operator {
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
// we also want to treat %, _ as literals for contains
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false)
conditions = append(conditions, fmt.Sprintf("%s %s '%%%s%%'", columnName, operator, val))
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if item.Key.IsColumn {
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
conditions = append(conditions, subQuery)
} else {
cType := getClickHouseTracesColumnType(item.Key.Type)
cDataType := getClickHouseTracesColumnDataType(item.Key.DataType)
col := fmt.Sprintf("%s_%s", cType, cDataType)
conditions = append(conditions, fmt.Sprintf(operator, col, item.Key.Key))
}
default:
conditions = append(conditions, fmt.Sprintf("%s %s %s", columnName, operator, fmtVal))
}
} else {
return "", fmt.Errorf("unsupported operator %s", item.Operator)
}
}
}
queryString := strings.Join(conditions, " AND ")
return queryString, nil
}
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
// TODO(nitya): in future when we support user based mat column handle them
// skipping now as we don't support creating them
filterItems := []v3.FilterItem{}
if len(groupBy) != 0 {
for _, item := range groupBy {
if !item.IsColumn {
filterItems = append(filterItems, v3.FilterItem{
Key: item,
Operator: v3.FilterOperatorExists,
})
}
}
}
if len(filterItems) != 0 {
filterSet := v3.FilterSet{
Operator: "AND",
Items: filterItems,
}
return buildTracesFilterQuery(&filterSet)
}
return "", nil
}
// orderBy returns a string of comma separated tags for order by clause
// if there are remaining items which are not present in tags they are also added
// if the order is not specified, it defaults to ASC
func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string {
var orderBy []string
for _, item := range items {
if item.ColumnName == constants.SigNozOrderByValue {
orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order))
} else if _, ok := tagLookup[item.ColumnName]; ok {
orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order))
} else if panelType == v3.PanelTypeList {
attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn}
name := getColumnName(attr)
orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order))
}
}
return orderBy
}
func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string {
tagLookup := map[string]struct{}{}
for _, v := range tags {
tagLookup[v.Key] = struct{}{}
}
orderByArray := orderBy(panelType, items, tagLookup)
if len(orderByArray) == 0 {
if panelType == v3.PanelTypeList {
orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC")
} else {
orderByArray = append(orderByArray, "value DESC")
}
}
str := strings.Join(orderByArray, ",")
return str
}
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions) (string, error) {
tracesStart := utils.GetEpochNanoSecs(start)
tracesEnd := utils.GetEpochNanoSecs(end)
// -1800 this is added so that the bucket start considers all the fingerprints.
bucketStart := tracesStart/NANOSECOND - 1800
bucketEnd := tracesEnd / NANOSECOND
timeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d') AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", tracesStart, tracesEnd, bucketStart, bucketEnd)
filterSubQuery, err := buildTracesFilterQuery(mq.Filters)
if err != nil {
return "", err
}
if filterSubQuery != "" {
filterSubQuery = " AND " + filterSubQuery
}
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
if err != nil {
return "", err
}
if emptyValuesInGroupByFilter != "" {
filterSubQuery = filterSubQuery + " AND " + emptyValuesInGroupByFilter
}
resourceSubQuery, err := resource.BuildResourceSubQuery("signoz_traces", "distributed_traces_v3_resource", bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false)
if err != nil {
return "", err
}
// join both the filter clauses
if resourceSubQuery != "" {
filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")"
}
// timerange will be sent in epoch millisecond
selectLabels := getSelectLabels(mq.GroupBy)
if selectLabels != "" {
selectLabels = selectLabels + ","
}
orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy)
if orderBy != "" {
orderBy = " order by " + orderBy
}
if mq.AggregateOperator == v3.AggregateOperatorNoOp {
var query string
if panelType == v3.PanelTypeTrace {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery)
withSubQuery = tracesV3.AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = tracesV3.AddOffsetToQuery(withSubQuery, mq.Offset)
}
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3, timeFilter)
} else if panelType == v3.PanelTypeList {
if len(mq.SelectColumns) == 0 {
return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType)
}
// add it to the select labels
selectLabels = getSelectLabels(mq.SelectColumns)
queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID,%s ", selectLabels) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 + " where %s %s" + "%s"
query = fmt.Sprintf(queryNoOpTmpl, timeFilter, filterSubQuery, orderBy)
} else {
return "", fmt.Errorf("unsupported aggregate operator %s for panelType %s", mq.AggregateOperator, panelType)
}
return query, nil
// ---- NOOP ends here ----
}
having := tracesV3.Having(mq.Having)
if having != "" {
having = " having " + having
}
groupBy := tracesV3.GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...)
if groupBy != "" {
groupBy = " group by " + groupBy
}
aggregationKey := ""
if mq.AggregateAttribute.Key != "" {
aggregationKey = getColumnName(mq.AggregateAttribute)
}
var queryTmpl string
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT"
} else if panelType == v3.PanelTypeTable {
queryTmpl =
"SELECT "
} else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue {
// Select the aggregate value for interval
queryTmpl =
fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d SECOND) AS ts,", step)
}
queryTmpl = queryTmpl + selectLabels +
" %s as value " +
"from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 +
" where " + timeFilter + "%s" +
"%s%s" +
"%s"
// we don't need value for first query
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
queryTmpl = "SELECT " + tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")"
}
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
switch mq.AggregateOperator {
case v3.AggregateOperatorRateSum,
v3.AggregateOperatorRateMax,
v3.AggregateOperatorRateAvg,
v3.AggregateOperatorRateMin,
v3.AggregateOperatorRate:
rate := float64(step)
if options.PreferRPM {
rate = rate / 60.0
}
op := fmt.Sprintf("%s(%s)/%f", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case
v3.AggregateOperatorP05,
v3.AggregateOperatorP10,
v3.AggregateOperatorP20,
v3.AggregateOperatorP25,
v3.AggregateOperatorP50,
v3.AggregateOperatorP75,
v3.AggregateOperatorP90,
v3.AggregateOperatorP95,
v3.AggregateOperatorP99:
op := fmt.Sprintf("quantile(%v)(%s)", tracesV3.AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax:
op := fmt.Sprintf("%s(%s)", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
if mq.AggregateAttribute.IsColumn {
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}
} else {
column := getColumnName(mq.AggregateAttribute)
filterSubQuery = fmt.Sprintf("%s AND has(%s, '%s')", filterSubQuery, column, mq.AggregateAttribute.Key)
}
}
op := "toFloat64(count())"
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCountDistinct:
op := fmt.Sprintf("toFloat64(count(distinct(%s)))", aggregationKey)
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
default:
return "", fmt.Errorf("unsupported aggregate operator %s", mq.AggregateOperator)
}
}
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) {
// adjust the start and end time to the step interval
if panelType == v3.PanelTypeGraph {
// adjust the start and end time to the step interval for graph panel types
start = start - (start % (mq.StepInterval * 1000))
end = end - (end % (mq.StepInterval * 1000))
}
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group by names
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
query = tracesV3.AddLimitToQuery(query, mq.Limit)
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
if err != nil {
return "", err
}
if panelType == v3.PanelTypeValue {
query, err = tracesV3.ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator)
}
if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable {
query = tracesV3.AddLimitToQuery(query, mq.Limit)
if mq.Offset != 0 {
query = tracesV3.AddOffsetToQuery(query, mq.Offset)
}
}
return query, err
}

View File

@ -0,0 +1,708 @@
package v4
import (
"testing"
"go.signoz.io/signoz/pkg/query-service/constants"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
)
func Test_getClickHouseTracesColumnType(t *testing.T) {
type args struct {
columnType v3.AttributeKeyType
}
tests := []struct {
name string
args args
want string
}{
{
name: "tag",
args: args{
columnType: v3.AttributeKeyTypeTag,
},
want: "attributes",
},
{
name: "resource",
args: args{
columnType: v3.AttributeKeyTypeResource,
},
want: "resources",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getClickHouseTracesColumnType(tt.args.columnType); got != tt.want {
t.Errorf("GetClickhouseTracesColumnType() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getClickHouseTracesColumnDataType(t *testing.T) {
type args struct {
columnDataType v3.AttributeKeyDataType
}
tests := []struct {
name string
args args
want string
}{
{
name: "string",
args: args{
columnDataType: v3.AttributeKeyDataTypeString,
},
want: "string",
},
{
name: "float64",
args: args{
columnDataType: v3.AttributeKeyDataTypeFloat64,
},
want: "number",
},
{
name: "int64",
args: args{
columnDataType: v3.AttributeKeyDataTypeInt64,
},
want: "number",
},
{
name: "bool",
args: args{
columnDataType: v3.AttributeKeyDataTypeBool,
},
want: "bool",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getClickHouseTracesColumnDataType(tt.args.columnDataType); got != tt.want {
t.Errorf("getClickhouseTracesColumnDataType() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getColumnName(t *testing.T) {
type args struct {
key v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "tag",
args: args{
key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
want: "attributes_string['data']",
},
{
name: "column",
args: args{
key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: "`attribute_string_data`",
},
{
name: "static column",
args: args{
key: v3.AttributeKey{Key: "spanKind", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true},
},
want: "spanKind",
},
{
name: "missing meta",
args: args{
key: v3.AttributeKey{Key: "xyz"},
},
want: "attributes_string['xyz']",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getColumnName(tt.args.key); got != tt.want {
t.Errorf("getColumnName() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getSelectLabels(t *testing.T) {
type args struct {
groupBy []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "count",
args: args{
groupBy: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: " attributes_string['user_name'] as `user_name`",
},
{
name: "multiple group by",
args: args{
groupBy: []v3.AttributeKey{
{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, // static col
{Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource, IsColumn: true},
},
},
want: " name as `name`, `resource_string_service_name` as `service_name`",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getSelectLabels(tt.args.groupBy); got != tt.want {
t.Errorf("getSelectLabels() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildTracesFilterQuery(t *testing.T) {
type args struct {
fs *v3.FilterSet
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test ignore resource",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"service"}, Operator: v3.FilterOperatorIn},
},
}},
want: "",
},
{
name: "Test buildTracesFilterQuery in, nin",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"GET", "POST"}, Operator: v3.FilterOperatorIn},
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"PUT"}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"server"}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "status.code", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{200}, Operator: v3.FilterOperatorNotIn},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{100.0}, Operator: v3.FilterOperatorIn},
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{true}, Operator: v3.FilterOperatorIn},
}},
},
want: "attributes_string['method'] IN ['GET','POST'] AND attributes_string['method'] NOT IN ['PUT'] AND attributes_number['status.code'] NOT IN [200] AND attributes_number['duration'] IN [100] AND attributes_bool['isDone'] IN [true]",
wantErr: false,
},
{
name: "Test buildTracesFilterQuery not eq, neq, gt, lt, gte, lte",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 102, Operator: v3.FilterOperatorEqual},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 100, Operator: v3.FilterOperatorNotEqual},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: v3.FilterOperatorGreaterThan},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 200, Operator: v3.FilterOperatorLessThan},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: 10.0, Operator: v3.FilterOperatorGreaterThanOrEq},
{Key: v3.AttributeKey{Key: "duration_str", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "200", Operator: v3.FilterOperatorLessThanOrEq},
}},
},
want: "attributes_number['duration'] = 102 AND attributes_number['duration'] != 100 AND attributes_number['duration'] > 10 AND attributes_number['duration'] < 200" +
" AND attributes_number['duration'] >= 10.000000 AND attributes_string['duration_str'] <= '200'",
wantErr: false,
},
{
name: "Test contains, ncontains, like, nlike, regex, nregex",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.%", Operator: v3.FilterOperatorContains},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "103_", Operator: v3.FilterOperatorNotContains},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: v3.FilterOperatorLike},
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102", Operator: v3.FilterOperatorNotLike},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/mypath", Operator: v3.FilterOperatorRegex},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/health.*", Operator: v3.FilterOperatorNotRegex},
}},
},
want: "attributes_string['host'] ILIKE '%102.\\%%' AND attributes_string['host'] NOT ILIKE '%103\\_%' AND attributes_string['host'] ILIKE '102.' AND attributes_string['host'] NOT ILIKE '102' AND " +
"match(`attribute_string_path`, '/mypath') AND NOT match(`attribute_string_path`, '/health.*')",
},
{
name: "Test exists, nexists",
args: args{
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists},
{Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists},
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
}},
},
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildTracesFilterQuery(tt.args.fs)
if (err != nil) != tt.wantErr {
t.Errorf("buildTracesFilterQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("buildTracesFilterQuery() = %v, want %v", got, tt.want)
}
})
}
}
func Test_handleEmptyValuesInGroupBy(t *testing.T) {
type args struct {
groupBy []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test handleEmptyValuesInGroupBy",
args: args{
groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "mapContains(attributes_string, 'bytes')",
wantErr: false,
},
{
name: "Test handleEmptyValuesInGroupBy",
args: args{
groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := handleEmptyValuesInGroupBy(tt.args.groupBy)
if (err != nil) != tt.wantErr {
t.Errorf("handleEmptyValuesInGroupBy() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("handleEmptyValuesInGroupBy() = %v, want %v", got, tt.want)
}
})
}
}
func Test_orderByAttributeKeyTags(t *testing.T) {
type args struct {
panelType v3.PanelType
items []v3.OrderBy
tags []v3.AttributeKey
}
tests := []struct {
name string
args args
want string
}{
{
name: "test",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}},
tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "`name` ASC",
},
{
name: "order by value",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}, {ColumnName: constants.SigNozOrderByValue, Order: "DESC"}},
tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
},
want: "`name` ASC,value DESC",
},
{
name: "test",
args: args{
panelType: v3.PanelTypeList,
items: []v3.OrderBy{{ColumnName: "status", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{ColumnName: "route", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
want: "attributes_string['status'] DESC,`attribute_string_route` DESC",
},
{
name: "ignore order by in table panel",
args: args{
panelType: v3.PanelTypeTable,
items: []v3.OrderBy{{ColumnName: "timestamp", Order: "DESC"}},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
{
name: "add default order by ts for list panel",
args: args{
panelType: v3.PanelTypeList,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "timestamp DESC",
},
{
name: "add default order by value for graph panel",
args: args{
panelType: v3.PanelTypeGraph,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
{
name: "don't add default order by for table panel",
args: args{
panelType: v3.PanelTypeTable,
items: []v3.OrderBy{},
tags: []v3.AttributeKey{},
},
want: "value DESC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := orderByAttributeKeyTags(tt.args.panelType, tt.args.items, tt.args.tags); got != tt.want {
t.Errorf("orderByAttributeKeyTags() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildTracesQuery(t *testing.T) {
type args struct {
start int64
end int64
step int64
mq *v3.BuilderQuery
panelType v3.PanelType
options v3.QBOptions
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "Test buildTracesQuery",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
step: 1000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCount,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
},
},
GroupBy: []v3.AttributeKey{{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{
{ColumnName: "http.method", Order: "ASC"}},
},
},
want: "SELECT attributes_string['http.method'] as `http.method`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['http.method'] = '100' AND mapContains(attributes_string, 'http.method') " +
"group by `http.method` order by `http.method` ASC",
},
{
name: "Test buildTracesQuery",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
step: 1000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCount,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "bytes", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeInt64}, Value: 100, Operator: ">"},
{Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, DataType: v3.AttributeKeyDataTypeString}, Value: "myService", Operator: "="},
},
},
GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeResource}},
OrderBy: []v3.OrderBy{
{ColumnName: "host", Order: "ASC"}},
},
},
want: "SELECT resources_number['host'] as `host`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['bytes'] > 100 AND " +
"(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND " +
"(seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%' AND " +
"( (simpleJSONHas(labels, 'host') AND labels like '%host%') ))) " +
"group by `host` order by `host` ASC",
},
{
name: "test noop list view",
args: args{
panelType: v3.PanelTypeList,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{},
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}},
},
},
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp ASC",
},
{
name: "test noop list view-without ts",
args: args{
panelType: v3.PanelTypeList,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{},
SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}},
},
},
want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " +
"AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp DESC",
},
{
name: "test noop trace view",
args: args{
panelType: v3.PanelTypeTrace,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorNoOp,
Filters: &v3.FilterSet{
Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="},
{Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "myService", Operator: "="},
},
},
},
},
want: "SELECT subQuery.serviceName, subQuery.name, count() AS span_count, subQuery.durationNano, subQuery.traceID AS traceID FROM signoz_traces.distributed_signoz_index_v3 INNER JOIN " +
"( SELECT * FROM (SELECT traceID, durationNano, serviceName, name FROM signoz_traces.signoz_index_v3 WHERE parentSpanID = '' AND (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " +
"(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['method'] = 'GET' AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource " +
"WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%')) " +
"ORDER BY durationNano DESC LIMIT 1 BY traceID LIMIT 100) AS inner_subquery ) AS subQuery ON signoz_traces.distributed_signoz_index_v3.traceID = subQuery.traceID WHERE (timestamp >= '1680066360726210000' AND " +
"timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) GROUP BY subQuery.traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY " +
"subQuery.durationNano desc LIMIT 1 BY subQuery.traceID;",
},
{
name: "Test order by value with having",
args: args{
panelType: v3.PanelTypeTable,
start: 1680066360726210000,
end: 1680066458000000000,
mq: &v3.BuilderQuery{
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}},
Having: []v3.Having{
{
ColumnName: "name",
Operator: ">",
Value: 10,
},
},
},
},
want: "SELECT toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " +
"(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) having value > 10 order by value ASC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildTracesQuery(tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.panelType, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("buildTracesQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("buildTracesQuery() = %v, want %v", got, tt.want)
}
})
}
}
func TestPrepareTracesQuery(t *testing.T) {
type args struct {
start int64
end int64
panelType v3.PanelType
mq *v3.BuilderQuery
options v3.QBOptions
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "test with limit - first",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
Limit: 10,
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
},
options: v3.QBOptions{
GraphLimitQtype: constants.FirstQueryGraphLimit,
},
},
want: "SELECT `function` from (SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') group by `function` order by value DESC) LIMIT 10",
},
{
name: "test with limit - second",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{},
AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
Limit: 10,
},
options: v3.QBOptions{
GraphLimitQtype: constants.SecondQueryGraphLimit,
},
},
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
},
{
name: "test with limit with resources- first",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
{
Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
Value: "server1",
Operator: v3.FilterOperatorEqual,
},
},
},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{
{Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{Key: "service.name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
},
Limit: 10,
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
},
options: v3.QBOptions{
GraphLimitQtype: constants.FirstQueryGraphLimit,
},
},
want: "SELECT `function`,`service.name` from (SELECT `attribute_string_function` as `function`, `resource_string_service$$name` as `service.name`, toFloat64(count(distinct(name))) as value " +
"from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " +
"AND attributes_number['line'] = 100 AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE " +
"(seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%' AND " +
"( (simpleJSONHas(labels, 'service.name') AND labels like '%service.name%') ))) group by `function`,`service.name` order by value DESC) LIMIT 10",
},
{
name: "test with limit with resources - second",
args: args{
start: 1680066360726210000,
end: 1680066458000000000,
panelType: v3.PanelTypeTable,
mq: &v3.BuilderQuery{
StepInterval: 60,
AggregateOperator: v3.AggregateOperatorCountDistinct,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag},
Value: 100,
Operator: v3.FilterOperatorEqual,
},
{
Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource},
Value: "server1",
Operator: v3.FilterOperatorEqual,
},
},
},
AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
GroupBy: []v3.AttributeKey{
{Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
{Key: "serviceName", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag},
},
OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}},
Limit: 10,
},
options: v3.QBOptions{
GraphLimitQtype: constants.SecondQueryGraphLimit,
},
},
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := PrepareTracesQuery(tt.args.start, tt.args.end, tt.args.panelType, tt.args.mq, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("PrepareTracesQuery() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("PrepareTracesQuery() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -239,6 +239,8 @@ const (
SIGNOZ_TRACE_DBNAME = "signoz_traces"
SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2"
SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2"
SIGNOZ_SPAN_INDEX_V3 = "distributed_signoz_index_v3"
SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME = "signoz_index_v3"
SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4"
SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs"
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
@ -444,3 +446,147 @@ const MaxFilterSuggestionsExamplesLimit = 10
var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500")
var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000")
var StaticFieldsTraces = map[string]v3.AttributeKey{
"timestamp": {},
"traceID": {
Key: "traceID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"spanID": {
Key: "spanID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"parentSpanID": {
Key: "parentSpanID",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"name": {
Key: "name",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"serviceName": {
Key: "serviceName",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"kind": {
Key: "kind",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"spanKind": {
Key: "spanKind",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"durationNano": {
Key: "durationNano",
DataType: v3.AttributeKeyDataTypeFloat64,
IsColumn: true,
},
"statusCode": {
Key: "statusCode",
DataType: v3.AttributeKeyDataTypeFloat64,
IsColumn: true,
},
"hasError": {
Key: "hasError",
DataType: v3.AttributeKeyDataTypeBool,
IsColumn: true,
},
"statusMessage": {
Key: "statusMessage",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"statusCodeString": {
Key: "statusCodeString",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"externalHttpMethod": {
Key: "externalHttpMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"externalHttpUrl": {
Key: "externalHttpUrl",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbSystem": {
Key: "dbSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbName": {
Key: "dbName",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"dbOperation": {
Key: "dbOperation",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"peerService": {
Key: "peerService",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpMethod": {
Key: "httpMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpUrl": {
Key: "httpUrl",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpRoute": {
Key: "httpRoute",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"httpHost": {
Key: "httpHost",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"msgSystem": {
Key: "msgSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"msgOperation": {
Key: "msgOperation",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcSystem": {
Key: "rpcSystem",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcService": {
Key: "rpcService",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"rpcMethod": {
Key: "rpcMethod",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
"responseStatusCode": {
Key: "responseStatusCode",
DataType: v3.AttributeKeyDataTypeString,
IsColumn: true,
},
}

View File

@ -173,3 +173,125 @@ type ClusterListRecord struct {
MemoryAllocatable float64 `json:"memoryAllocatable"`
Meta map[string]string `json:"meta"`
}
type DeploymentListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type DeploymentListResponse struct {
Type ResponseType `json:"type"`
Records []DeploymentListRecord `json:"records"`
Total int `json:"total"`
}
type DeploymentListRecord struct {
DeploymentName string `json:"deploymentName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
DesiredPods int `json:"desiredPods"`
AvailablePods int `json:"availablePods"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
Meta map[string]string `json:"meta"`
}
type DaemonSetListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type DaemonSetListResponse struct {
Type ResponseType `json:"type"`
Records []DaemonSetListRecord `json:"records"`
Total int `json:"total"`
}
type DaemonSetListRecord struct {
DaemonSetName string `json:"daemonSetName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredNodes int `json:"desiredNodes"`
AvailableNodes int `json:"availableNodes"`
Meta map[string]string `json:"meta"`
}
type StatefulSetListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type StatefulSetListResponse struct {
Type ResponseType `json:"type"`
Records []StatefulSetListRecord `json:"records"`
Total int `json:"total"`
}
type StatefulSetListRecord struct {
StatefulSetName string `json:"statefulSetName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredPods int `json:"desiredPods"`
AvailablePods int `json:"availablePods"`
Meta map[string]string `json:"meta"`
}
type JobListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type JobListResponse struct {
Type ResponseType `json:"type"`
Records []JobListRecord `json:"records"`
Total int `json:"total"`
}
type JobListRecord struct {
JobName string `json:"jobName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
CPULimit float64 `json:"cpuLimit"`
MemoryLimit float64 `json:"memoryLimit"`
Restarts int `json:"restarts"`
DesiredSuccessfulPods int `json:"desiredSuccessfulPods"`
ActivePods int `json:"activePods"`
FailedPods int `json:"failedPods"`
SuccessfulPods int `json:"successfulPods"`
Meta map[string]string `json:"meta"`
}

View File

@ -617,6 +617,7 @@ type AlertsInfo struct {
TotalAlerts int `json:"totalAlerts"`
LogsBasedAlerts int `json:"logsBasedAlerts"`
MetricBasedAlerts int `json:"metricBasedAlerts"`
AnomalyBasedAlerts int `json:"anomalyBasedAlerts"`
TracesBasedAlerts int `json:"tracesBasedAlerts"`
TotalChannels int `json:"totalChannels"`
SlackChannels int `json:"slackChannels"`

View File

@ -42,16 +42,6 @@ var (
// this file contains api request and responses to be
// served over http
// newApiErrorInternal returns a new api error object of type internal
func newApiErrorInternal(err error) *model.ApiError {
return &model.ApiError{Typ: model.ErrorInternal, Err: err}
}
// newApiErrorBadData returns a new api error object of bad request type
func newApiErrorBadData(err error) *model.ApiError {
return &model.ApiError{Typ: model.ErrorBadData, Err: err}
}
// PostableRule is used to create alerting rule from HTTP api
type PostableRule struct {
AlertName string `yaml:"alert,omitempty" json:"alert,omitempty"`

View File

@ -8,7 +8,7 @@ import (
func TestIsAllQueriesDisabled(t *testing.T) {
testCases := []*v3.CompositeQuery{
&v3.CompositeQuery{
{
BuilderQueries: map[string]*v3.BuilderQuery{
"query1": {
Disabled: true,
@ -20,10 +20,10 @@ func TestIsAllQueriesDisabled(t *testing.T) {
QueryType: v3.QueryTypeBuilder,
},
nil,
&v3.CompositeQuery{
{
QueryType: v3.QueryTypeBuilder,
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"query1": {
@ -34,10 +34,10 @@ func TestIsAllQueriesDisabled(t *testing.T) {
},
},
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypePromQL,
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypePromQL,
PromQueries: map[string]*v3.PromQuery{
"query3": {
@ -45,7 +45,7 @@ func TestIsAllQueriesDisabled(t *testing.T) {
},
},
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypePromQL,
PromQueries: map[string]*v3.PromQuery{
"query3": {
@ -53,10 +53,10 @@ func TestIsAllQueriesDisabled(t *testing.T) {
},
},
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypeClickHouseSQL,
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypeClickHouseSQL,
ClickHouseQueries: map[string]*v3.ClickHouseQuery{
"query4": {
@ -64,7 +64,7 @@ func TestIsAllQueriesDisabled(t *testing.T) {
},
},
},
&v3.CompositeQuery{
{
QueryType: v3.QueryTypeClickHouseSQL,
ClickHouseQueries: map[string]*v3.ClickHouseQuery{
"query4": {

View File

@ -599,6 +599,9 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) {
}
}
}
if rule.RuleType == RuleTypeAnomaly {
alertsInfo.AnomalyBasedAlerts = alertsInfo.AnomalyBasedAlerts + 1
}
} else if rule.AlertType == AlertTypeTraces {
alertsInfo.TracesBasedAlerts = alertsInfo.TracesBasedAlerts + 1
}

View File

@ -10,8 +10,6 @@ import (
"sync"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"errors"
@ -24,7 +22,6 @@ import (
"go.signoz.io/signoz/pkg/query-service/model"
pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
"go.signoz.io/signoz/pkg/query-service/telemetry"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
)
type PrepareTaskOptions struct {
@ -41,6 +38,19 @@ type PrepareTaskOptions struct {
UseLogsNewSchema bool
}
type PrepareTestRuleOptions struct {
Rule *PostableRule
RuleDB RuleDB
Logger *zap.Logger
Reader interfaces.Reader
Cache cache.Cache
FF interfaces.FeatureLookup
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
UseLogsNewSchema bool
}
const taskNamesuffix = "webAppEditor"
func RuleIdFromTaskName(n string) string {
@ -81,6 +91,8 @@ type ManagerOptions struct {
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
UseLogsNewSchema bool
}
@ -99,10 +111,11 @@ type Manager struct {
logger *zap.Logger
featureFlags interfaces.FeatureLookup
reader interfaces.Reader
cache cache.Cache
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
featureFlags interfaces.FeatureLookup
reader interfaces.Reader
cache cache.Cache
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
UseLogsNewSchema bool
}
@ -123,6 +136,9 @@ func defaultOptions(o *ManagerOptions) *ManagerOptions {
if o.PrepareTaskFunc == nil {
o.PrepareTaskFunc = defaultPrepareTaskFunc
}
if o.PrepareTestRuleFunc == nil {
o.PrepareTestRuleFunc = defaultTestNotification
}
return o
}
@ -203,17 +219,18 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
telemetry.GetInstance().SetAlertsInfoCallback(db.GetAlertsInfo)
m := &Manager{
tasks: map[string]Task{},
rules: map[string]Rule{},
notifier: notifier,
ruleDB: db,
opts: o,
block: make(chan struct{}),
logger: o.Logger,
featureFlags: o.FeatureFlags,
reader: o.Reader,
cache: o.Cache,
prepareTaskFunc: o.PrepareTaskFunc,
tasks: map[string]Task{},
rules: map[string]Rule{},
notifier: notifier,
ruleDB: db,
opts: o,
block: make(chan struct{}),
logger: o.Logger,
featureFlags: o.FeatureFlags,
reader: o.Reader,
cache: o.Cache,
prepareTaskFunc: o.PrepareTaskFunc,
prepareTestRuleFunc: o.PrepareTestRuleFunc,
}
return m, nil
}
@ -788,78 +805,20 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
parsedRule, err := ParsePostableRule([]byte(ruleStr))
if err != nil {
return 0, newApiErrorBadData(err)
return 0, model.BadRequest(err)
}
var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
alertname = uuid.New().String()
}
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
Rule: parsedRule,
RuleDB: m.ruleDB,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
FF: m.featureFlags,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
UseLogsNewSchema: m.opts.UseLogsNewSchema,
})
// append name to indicate this is test alert
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
var rule Rule
if parsedRule.RuleType == RuleTypeThreshold {
// add special labels for test alerts
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
// create a threshold rule
rule, err = NewThresholdRule(
alertname,
parsedRule,
m.featureFlags,
m.reader,
m.opts.UseLogsNewSchema,
WithSendAlways(),
WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
} else if parsedRule.RuleType == RuleTypeProm {
// create promql rule
rule, err = NewPromRule(
alertname,
parsedRule,
m.logger,
m.reader,
m.opts.PqlEngine,
WithSendAlways(),
WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, newApiErrorBadData(err)
}
} else {
return 0, newApiErrorBadData(fmt.Errorf("failed to derive ruletype with given information"))
}
// set timestamp to current utc time
ts := time.Now().UTC()
count, err := rule.Eval(ctx, ts)
if err != nil {
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed"))
}
alertsFound, ok := count.(int)
if !ok {
return 0, newApiErrorInternal(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc())
return alertsFound, nil
return alertCount, apiErr
}

View File

@ -233,6 +233,7 @@ func AlertTemplateData(labels map[string]string, value string, threshold string)
// consistent across the platform.
// If there is a go template block, it won't be replaced.
// The example for existing go template block is: {{$threshold}} or {{$value}} or any other valid go template syntax.
// See templates_test.go for examples.
func (te *TemplateExpander) preprocessTemplate() {
// Handle the $variable syntax
reDollar := regexp.MustCompile(`({{.*?}})|(\$(\w+(?:\.\w+)*))`)
@ -256,6 +257,19 @@ func (te *TemplateExpander) preprocessTemplate() {
rest := submatches[2]
return fmt.Sprintf(`{{index .Labels "%s"%s}}`, path, rest)
})
// Handle the {{$variable}} syntax
// skip the special case for {{$threshold}} and {{$value}}
reVariable := regexp.MustCompile(`{{\s*\$\s*([a-zA-Z0-9_.]+)\s*}}`)
te.text = reVariable.ReplaceAllStringFunc(te.text, func(match string) string {
if strings.HasPrefix(match, "{{$threshold}}") || strings.HasPrefix(match, "{{$value}}") {
return match
}
// get the variable name from {{$variable}} syntax
variable := strings.TrimPrefix(match, "{{$")
variable = strings.TrimSuffix(variable, "}}")
return fmt.Sprintf(`{{index .Labels "%s"}}`, variable)
})
}
// Funcs adds the functions in fm to the Expander's function map.
@ -335,6 +349,7 @@ func (te TemplateExpander) ExpandHTML(templateFiles []string) (result string, re
// ParseTest parses the templates and returns the error if any.
func (te TemplateExpander) ParseTest() error {
te.preprocessTemplate()
_, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
if err != nil {
return err

View File

@ -74,3 +74,14 @@ func TestTemplateExpander_WithLablesDotSyntax(t *testing.T) {
}
require.Equal(t, "test my-service exceeds 100 and observed at 200", result)
}
func TestTemplateExpander_WithVariableSyntax(t *testing.T) {
defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}"
data := AlertTemplateData(map[string]string{"service.name": "my-service"}, "200", "100")
expander := NewTemplateExpander(context.Background(), defs+"test {{$service.name}} exceeds {{$threshold}} and observed at {{$value}}", "test", data, times.Time(time.Now().Unix()), nil)
result, err := expander.Expand()
if err != nil {
t.Fatal(err)
}
require.Equal(t, "test my-service exceeds 100 and observed at 200", result)
}

View File

@ -0,0 +1,97 @@
package rules
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/utils/labels"
"go.uber.org/zap"
)
// TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any)
func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError) {
ctx := context.Background()
if opts.Rule == nil {
return 0, model.BadRequest(fmt.Errorf("rule is required"))
}
parsedRule := opts.Rule
var alertname = parsedRule.AlertName
if alertname == "" {
// alertname is not mandatory for testing, so picking
// a random string here
alertname = uuid.New().String()
}
// append name to indicate this is test alert
parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix)
var rule Rule
var err error
if parsedRule.RuleType == RuleTypeThreshold {
// add special labels for test alerts
parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target)
parsedRule.Labels[labels.RuleSourceLabel] = ""
parsedRule.Labels[labels.AlertRuleIdLabel] = ""
// create a threshold rule
rule, err = NewThresholdRule(
alertname,
parsedRule,
opts.FF,
opts.Reader,
opts.UseLogsNewSchema,
WithSendAlways(),
WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, model.BadRequest(err)
}
} else if parsedRule.RuleType == RuleTypeProm {
// create promql rule
rule, err = NewPromRule(
alertname,
parsedRule,
opts.Logger,
opts.Reader,
opts.ManagerOpts.PqlEngine,
WithSendAlways(),
WithSendUnmatched(),
)
if err != nil {
zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err))
return 0, model.BadRequest(err)
}
} else {
return 0, model.BadRequest(fmt.Errorf("failed to derive ruletype with given information"))
}
// set timestamp to current utc time
ts := time.Now().UTC()
count, err := rule.Eval(ctx, ts)
if err != nil {
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
return 0, model.InternalError(fmt.Errorf("rule evaluation failed"))
}
alertsFound, ok := count.(int)
if !ok {
return 0, model.InternalError(fmt.Errorf("something went wrong"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
return alertsFound, nil
}