From 2d732ae4a906105d1d1b0ee493cc6e1afbcb217e Mon Sep 17 00:00:00 2001 From: Akira Hayashi Date: Mon, 21 Oct 2024 00:24:12 +0900 Subject: [PATCH] chore: run all query-service tests (#6098) --- Makefile | 13 +- .../queryBuilderToExpr_test.go | 32 +- .../querycache/query_range_cache.go | 13 +- pkg/query-service/tests/auth_test.go | 126 -- pkg/query-service/tests/cold_storage_test.go | 223 ---- pkg/query-service/tests/docker.go | 117 -- .../tests/test-deploy/alertmanager.yml | 35 - .../tests/test-deploy/alerts.yml | 11 - .../tests/test-deploy/clickhouse-cluster.xml | 75 -- .../tests/test-deploy/clickhouse-config.xml | 1139 ----------------- .../tests/test-deploy/clickhouse-storage.xml | 29 - .../tests/test-deploy/clickhouse-users.xml | 123 -- .../tests/test-deploy/docker-compose.yaml | 283 ---- .../test-deploy/otel-collector-config.yaml | 148 --- .../otel-collector-opamp-config.yaml | 1 - .../tests/test-deploy/prometheus.yml | 25 - pkg/query-service/utils/format_test.go | 8 +- 17 files changed, 33 insertions(+), 2368 deletions(-) delete mode 100644 pkg/query-service/tests/auth_test.go delete mode 100644 pkg/query-service/tests/cold_storage_test.go delete mode 100644 pkg/query-service/tests/docker.go delete mode 100644 pkg/query-service/tests/test-deploy/alertmanager.yml delete mode 100644 pkg/query-service/tests/test-deploy/alerts.yml delete mode 100644 pkg/query-service/tests/test-deploy/clickhouse-cluster.xml delete mode 100644 pkg/query-service/tests/test-deploy/clickhouse-config.xml delete mode 100644 pkg/query-service/tests/test-deploy/clickhouse-storage.xml delete mode 100644 pkg/query-service/tests/test-deploy/clickhouse-users.xml delete mode 100644 pkg/query-service/tests/test-deploy/docker-compose.yaml delete mode 100644 pkg/query-service/tests/test-deploy/otel-collector-config.yaml delete mode 100644 pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml delete mode 100644 pkg/query-service/tests/test-deploy/prometheus.yml diff --git a/Makefile b/Makefile index c110ebdaf2..cc8bf1cf0e 100644 --- a/Makefile +++ b/Makefile @@ -79,7 +79,7 @@ build-query-service-static: @if [ $(DEV_BUILD) != "" ]; then \ cd $(QUERY_SERVICE_DIRECTORY) && \ CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \ - -ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \ + -ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \ else \ cd $(QUERY_SERVICE_DIRECTORY) && \ CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \ @@ -188,13 +188,4 @@ check-no-ee-references: fi test: - go test ./pkg/query-service/app/metrics/... - go test ./pkg/query-service/cache/... - go test ./pkg/query-service/app/... - go test ./pkg/query-service/app/querier/... - go test ./pkg/query-service/converter/... - go test ./pkg/query-service/formatter/... - go test ./pkg/query-service/tests/integration/... - go test ./pkg/query-service/rules/... - go test ./pkg/query-service/collectorsimulator/... - go test ./pkg/query-service/postprocess/... + go test ./pkg/query-service/... diff --git a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go index 2460f90485..e2b17bb0a3 100644 --- a/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go +++ b/pkg/query-service/queryBuilderToExpr/queryBuilderToExpr_test.go @@ -18,42 +18,42 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "="}, }}, - Expr: `attributes.key == "checkbody"`, + Expr: `attributes["key"] == "checkbody"`, }, { Name: "not equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "!="}, }}, - Expr: `attributes.key != "checkbody"`, + Expr: `attributes["key"] != "checkbody"`, }, { Name: "less than", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<"}, }}, - Expr: "attributes.key < 10", + Expr: `attributes["key"] != nil && attributes["key"] < 10`, }, { Name: "greater than", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">"}, }}, - Expr: "attributes.key > 10", + Expr: `attributes["key"] != nil && attributes["key"] > 10`, }, { Name: "less than equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<="}, }}, - Expr: "attributes.key <= 10", + Expr: `attributes["key"] != nil && attributes["key"] <= 10`, }, { Name: "greater than equal", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">="}, }}, - Expr: "attributes.key >= 10", + Expr: `attributes["key"] != nil && attributes["key"] >= 10`, }, // case sensitive { @@ -61,42 +61,42 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "contains"}, }}, - Expr: `body contains "checkbody"`, + Expr: `body != nil && lower(body) contains lower("checkbody")`, }, { Name: "body ncontains", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "ncontains"}, }}, - Expr: `body not contains "checkbody"`, + Expr: `body != nil && lower(body) not contains lower("checkbody")`, }, { Name: "body regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "regex"}, }}, - Expr: `body matches "[0-1]+regex$"`, + Expr: `body != nil && body matches "[0-1]+regex$"`, }, { Name: "body not regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"}, }}, - Expr: `body not matches "[0-1]+regex$"`, + Expr: `body != nil && body not matches "[0-1]+regex$"`, }, { Name: "regex with escape characters", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: `^Executing \[\S+@\S+:[0-9]+\] \S+".*`, Operator: "regex"}, }}, - Expr: `body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`, + Expr: `body != nil && body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`, }, { Name: "invalid regex", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"}, }}, - Expr: `body not matches "[0-9]++"`, + Expr: `body != nil && lower(body) not matches "[0-9]++"`, ExpectError: true, }, { @@ -104,14 +104,14 @@ var testCases = []struct { Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{1, 2, 3, 4}, Operator: "in"}, }}, - Expr: "attributes.key in [1,2,3,4]", + Expr: `attributes["key"] != nil && attributes["key"] in [1,2,3,4]`, }, { Name: "not in", Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"1", "2"}, Operator: "nin"}, }}, - Expr: "attributes.key not in ['1','2']", + Expr: `attributes["key"] != nil && attributes["key"] not in ['1','2']`, }, { Name: "exists", @@ -134,7 +134,7 @@ var testCases = []struct { {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"}, {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"}, }}, - Expr: `attributes.key <= 10 and body not matches "[0-1]+regex$" and "key" not in attributes`, + Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body != nil && body not matches "[0-1]+regex$" and "key" not in attributes`, }, { Name: "incorrect multi filter", @@ -143,7 +143,7 @@ var testCases = []struct { {Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"}, {Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"}, }}, - Expr: `attributes.key <= 10 and body not matches "[0-9]++" and "key" not in attributes`, + Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body not matches "[0-9]++" and "key" not in attributes`, ExpectError: true, }, } diff --git a/pkg/query-service/querycache/query_range_cache.go b/pkg/query-service/querycache/query_range_cache.go index 3b3e3be93c..b2bde35eb5 100644 --- a/pkg/query-service/querycache/query_range_cache.go +++ b/pkg/query-service/querycache/query_range_cache.go @@ -142,9 +142,18 @@ func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3. } seriesesByLabels[h].Points = append(seriesesByLabels[h].Points, series.Points...) } + + hashes := make([]uint64, 0, len(seriesesByLabels)) + for h := range seriesesByLabels { + hashes = append(hashes, h) + } + sort.Slice(hashes, func(i, j int) bool { + return hashes[i] < hashes[j] + }) + // Sort the points in each series by timestamp - for idx := range seriesesByLabels { - series := seriesesByLabels[idx] + for _, h := range hashes { + series := seriesesByLabels[h] series.SortPoints() series.RemoveDuplicatePoints() mergedSeries = append(mergedSeries, series) diff --git a/pkg/query-service/tests/auth_test.go b/pkg/query-service/tests/auth_test.go deleted file mode 100644 index 7c7d5277b6..0000000000 --- a/pkg/query-service/tests/auth_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package tests - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - "go.signoz.io/signoz/pkg/query-service/auth" - "go.signoz.io/signoz/pkg/query-service/model" -) - -func invite(t *testing.T, email string) *model.InviteResponse { - q := endpoint + fmt.Sprintf("/api/v1/invite?email=%s", email) - resp, err := client.Get(q) - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - var inviteResp model.InviteResponse - err = json.Unmarshal(b, &inviteResp) - require.NoError(t, err) - - return &inviteResp -} - -func register(email, password, token string) (string, error) { - q := endpoint + "/api/v1/register" - - req := auth.RegisterRequest{ - Email: email, - Password: password, - InviteToken: token, - } - - b, err := json.Marshal(req) - if err != nil { - return "", err - } - resp, err := client.Post(q, "application/json", bytes.NewBuffer(b)) - if err != nil { - return "", err - } - - defer resp.Body.Close() - b, err = io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(b), nil -} - -func login(email, password, refreshToken string) (*model.LoginResponse, error) { - q := endpoint + "/api/v1/login" - - req := model.LoginRequest{ - Email: email, - Password: password, - RefreshToken: refreshToken, - } - - b, err := json.Marshal(req) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal") - } - resp, err := client.Post(q, "application/json", bytes.NewBuffer(b)) - if err != nil { - return nil, errors.Wrap(err, "failed to post") - } - - defer resp.Body.Close() - b, err = io.ReadAll(resp.Body) - if err != nil { - return nil, errors.Wrap(err, "failed to read body") - } - - loginResp := &model.LoginResponse{} - err = json.Unmarshal(b, loginResp) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal") - } - - return loginResp, nil -} - -func TestAuthInviteAPI(t *testing.T) { - t.Skip() - email := "abc@signoz.io" - resp := invite(t, email) - require.Equal(t, email, resp.Email) - require.NotNil(t, resp.InviteToken) -} - -func TestAuthRegisterAPI(t *testing.T) { - email := "alice@signoz.io" - resp, err := register(email, "Password@123", "") - require.NoError(t, err) - require.Contains(t, resp, "user registered successfully") - -} - -func TestAuthLoginAPI(t *testing.T) { - t.Skip() - email := "abc-login@signoz.io" - password := "Password@123" - inv := invite(t, email) - - resp, err := register(email, password, inv.InviteToken) - require.NoError(t, err) - require.Contains(t, resp, "user registered successfully") - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - loginResp2, err := login("", "", loginResp.RefreshJwt) - require.NoError(t, err) - - require.NotNil(t, loginResp2.AccessJwt) -} diff --git a/pkg/query-service/tests/cold_storage_test.go b/pkg/query-service/tests/cold_storage_test.go deleted file mode 100644 index 87db1b6a93..0000000000 --- a/pkg/query-service/tests/cold_storage_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package tests - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.signoz.io/signoz/pkg/query-service/model" -) - -const ( - endpoint = "http://localhost:8180" -) - -var ( - client http.Client -) - -func setTTL(table, coldStorage, toColdTTL, deleteTTL string, jwtToken string) ([]byte, error) { - params := fmt.Sprintf("type=%s&duration=%s", table, deleteTTL) - if len(toColdTTL) > 0 { - params += fmt.Sprintf("&coldStorage=%s&toColdDuration=%s", coldStorage, toColdTTL) - } - var bearer = "Bearer " + jwtToken - req, err := http.NewRequest("POST", endpoint+"/api/v1/settings/ttl?"+params, nil) - if err != nil { - return nil, err - } - req.Header.Add("Authorization", bearer) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - return b, err - } - - return b, nil -} - -func TestListDisks(t *testing.T) { - t.Skip() - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - var bearer = "Bearer " + loginResp.AccessJwt - req, err := http.NewRequest("POST", endpoint+"/api/v1/disks", nil) - req.Header.Add("Authorization", bearer) - - resp, err := client.Do(req) - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, `[{"name":"default","type":"local"}, {"name":"s3","type":"s3"}]`, string(b)) -} - -func TestSetTTL(t *testing.T) { - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - testCases := []struct { - caseNo int - coldStorage string - table string - coldTTL string - deleteTTL string - expected string - }{ - { - 1, "s3", "traces", "100h", "60h", - "Delete TTL should be greater than cold storage move TTL.", - }, - { - 2, "s3", "traces", "100", "60s", - "Not a valid toCold TTL duration 100", - }, - { - 3, "s3", "traces", "100s", "100", - "Not a valid TTL duration 100", - }, - { - 4, "s3", "metrics", "1h", "2h", - "move ttl has been successfully set up", - }, - { - 5, "s3", "traces", "10s", "6h", - "move ttl has been successfully set up", - }, - } - - for _, tc := range testCases { - r, err := setTTL(tc.table, tc.coldStorage, tc.coldTTL, tc.deleteTTL, loginResp.AccessJwt) - require.NoErrorf(t, err, "Failed case: %d", tc.caseNo) - require.Containsf(t, string(r), tc.expected, "Failed case: %d", tc.caseNo) - } - - time.Sleep(20 * time.Second) - doneCh := make(chan struct{}) - defer close(doneCh) - - count := 0 - for range minioClient.ListObjects(bucketName, "", false, doneCh) { - count++ - } - - require.True(t, count > 0, "No objects are present in Minio") - fmt.Printf("=== Found %d objects in Minio\n", count) -} - -func getTTL(t *testing.T, table string, jwtToken string) *model.GetTTLResponseItem { - url := endpoint + fmt.Sprintf("/api/v1/settings/ttl?type=%s", table) - if len(table) == 0 { - url = endpoint + "/api/v1/settings/ttl" - } - - var bearer = "Bearer " + jwtToken - req, err := http.NewRequest("GET", url, nil) - require.NoError(t, err) - req.Header.Add("Authorization", bearer) - resp, err := client.Do(req) - - require.NoError(t, err) - - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - res := &model.GetTTLResponseItem{} - require.NoError(t, json.Unmarshal(b, res)) - return res -} - -func TestGetTTL(t *testing.T) { - email := "alice@signoz.io" - password := "Password@123" - - loginResp, err := login(email, password, "") - require.NoError(t, err) - - resp := getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - } - require.Equal(t, "success", resp.Status) - - r, err := setTTL("traces", "s3", "1h", "2h", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "successfully set up") - - resp = getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - resp = getTTL(t, "traces", loginResp.AccessJwt) - require.Equal(t, 1, resp.ExpectedTracesMoveTime) - require.Equal(t, 2, resp.ExpectedTracesTime) - } - resp = getTTL(t, "traces", loginResp.AccessJwt) - require.Equal(t, "success", resp.Status) - require.Equal(t, 1, resp.TracesMoveTime) - require.Equal(t, 2, resp.TracesTime) - - resp = getTTL(t, "metrics", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - } - require.Equal(t, "success", resp.Status) - - r, err = setTTL("traces", "s3", "10h", "20h", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "successfully set up") - - resp = getTTL(t, "traces", loginResp.AccessJwt) - for resp.Status == "pending" { - time.Sleep(time.Second) - resp = getTTL(t, "traces", loginResp.AccessJwt) - } - require.Equal(t, "success", resp.Status) - require.Equal(t, 10, resp.TracesMoveTime) - require.Equal(t, 20, resp.TracesTime) - - resp = getTTL(t, "metrics", loginResp.AccessJwt) - for resp.Status != "success" && resp.Status != "failed" { - time.Sleep(time.Second) - resp = getTTL(t, "metrics", loginResp.AccessJwt) - } - require.Equal(t, "success", resp.Status) - require.Equal(t, 1, resp.MetricsMoveTime) - require.Equal(t, 2, resp.MetricsTime) - - r, err = setTTL("metrics", "s3", "0s", "0s", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "Not a valid TTL duration 0s") - - r, err = setTTL("traces", "s3", "0s", "0s", loginResp.AccessJwt) - require.NoError(t, err) - require.Contains(t, string(r), "Not a valid TTL duration 0s") -} - -func TestMain(m *testing.M) { - if err := startCluster(); err != nil { - fmt.Println(err) - } - defer stopCluster() - - m.Run() -} diff --git a/pkg/query-service/tests/docker.go b/pkg/query-service/tests/docker.go deleted file mode 100644 index c65a627512..0000000000 --- a/pkg/query-service/tests/docker.go +++ /dev/null @@ -1,117 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "net/http" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "log" - - minio "github.com/minio/minio-go/v6" -) - -const ( - prefix = "signoz_test" - minioEndpoint = "localhost:9100" - accessKey = "ash" - secretKey = "password" - bucketName = "test" -) - -var ( - minioClient *minio.Client - composeFile string -) - -func init() { - goArch := runtime.GOARCH - if goArch == "arm64" { - composeFile = "./test-deploy/docker-compose.arm.yaml" - } else if goArch == "amd64" { - composeFile = "./test-deploy/docker-compose.yaml" - } else { - log.Fatalf("Unsupported architecture: %s", goArch) - } -} - -func getCmd(args ...string) *exec.Cmd { - cmd := exec.CommandContext(context.Background(), args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Env = os.Environ() - return cmd -} - -func startMinio() error { - log.Printf("Starting minio") - cmd := getCmd("docker", "run", "-d", "-p", "9100:9000", "-p", "9101:9001", - "--name", "signoz-minio-test", "-e", "MINIO_ROOT_USER=ash", - "-e", "MINIO_ROOT_PASSWORD=password", - "quay.io/minio/minio", "server", "/data", "--console-address", ":9001") - - if err := cmd.Run(); err != nil { - return err - } - - var err error - minioClient, err = minio.New(minioEndpoint, accessKey, secretKey, false) - if err != nil { - return err - } - if err = minioClient.MakeBucket(bucketName, ""); err != nil { - return err - } - return nil -} - -func startCluster() error { - if err := os.MkdirAll("./test-deploy/data/minio/test", 0777); err != nil { - return err - } - - if err := startMinio(); err != nil { - return err - } - - cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix, - "up", "--force-recreate", "--build", "--remove-orphans", "--detach") - - log.Printf("Starting signoz cluster...\n") - if err := cmd.Run(); err != nil { - log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err) - return err - } - - client := http.Client{} - for i := 0; i < 10; i++ { - if _, err := client.Get("http://localhost:8180/api/v1/health"); err != nil { - time.Sleep(2 * time.Second) - } else { - log.Printf("CLUSTER UP\n") - return nil - } - } - return fmt.Errorf("query-service is not healthy") -} - -func stopCluster() { - cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix, "down", "-v") - if err := cmd.Run(); err != nil { - log.Printf("Error while stopping the cluster. Error: %v\n", err) - } - if err := os.RemoveAll("./test-deploy/data"); err != nil { - log.Printf("Error while cleaning temporary dir. Error: %v\n", err) - } - - cmd = getCmd("docker", "container", "rm", "-f", "signoz-minio-test") - if err := cmd.Run(); err != nil { - log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err) - } - - log.Printf("CLUSTER DOWN: %s\n", prefix) -} diff --git a/pkg/query-service/tests/test-deploy/alertmanager.yml b/pkg/query-service/tests/test-deploy/alertmanager.yml deleted file mode 100644 index d69357f9dd..0000000000 --- a/pkg/query-service/tests/test-deploy/alertmanager.yml +++ /dev/null @@ -1,35 +0,0 @@ -global: - resolve_timeout: 1m - slack_api_url: 'https://hooks.slack.com/services/xxx' - -route: - receiver: 'slack-notifications' - -receivers: -- name: 'slack-notifications' - slack_configs: - - channel: '#alerts' - send_resolved: true - icon_url: https://avatars3.githubusercontent.com/u/3380462 - title: |- - [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} - {{- if gt (len .CommonLabels) (len .GroupLabels) -}} - {{" "}}( - {{- with .CommonLabels.Remove .GroupLabels.Names }} - {{- range $index, $label := .SortedPairs -}} - {{ if $index }}, {{ end }} - {{- $label.Name }}="{{ $label.Value -}}" - {{- end }} - {{- end -}} - ) - {{- end }} - text: >- - {{ range .Alerts -}} - *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }} - - *Description:* {{ .Annotations.description }} - - *Details:* - {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` - {{ end }} - {{ end }} \ No newline at end of file diff --git a/pkg/query-service/tests/test-deploy/alerts.yml b/pkg/query-service/tests/test-deploy/alerts.yml deleted file mode 100644 index 810a20750c..0000000000 --- a/pkg/query-service/tests/test-deploy/alerts.yml +++ /dev/null @@ -1,11 +0,0 @@ -groups: -- name: ExampleCPULoadGroup - rules: - - alert: HighCpuLoad - expr: system_cpu_load_average_1m > 0.1 - for: 0m - labels: - severity: warning - annotations: - summary: High CPU load - description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" diff --git a/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml b/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml deleted file mode 100644 index 0e3ddcdde0..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-cluster.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - zookeeper-1 - 2181 - - - - - - - - - - - - - - - - clickhouse - 9000 - - - - - - - - \ No newline at end of file diff --git a/pkg/query-service/tests/test-deploy/clickhouse-config.xml b/pkg/query-service/tests/test-deploy/clickhouse-config.xml deleted file mode 100644 index 4e8dc00b30..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-config.xml +++ /dev/null @@ -1,1139 +0,0 @@ - - - - - - information - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - - 1000M - 10 - - - - - - - - - - - - - - - - - - 8123 - - - 9000 - - - 9004 - - - 9005 - - - - - - - - - - - - 9009 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 4096 - - - 3 - - - - - false - - - /path/to/ssl_cert_file - /path/to/ssl_key_file - - - false - - - /path/to/ssl_ca_cert_file - - - none - - - 0 - - - -1 - -1 - - - false - - - - - - - - - - - none - true - true - sslv2,sslv3 - true - - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - - - - - - - 100 - - - 0 - - - - 10000 - - - - - - 0.9 - - - 4194304 - - - 0 - - - - - - 8589934592 - - - 5368709120 - - - - 1000 - - - 134217728 - - - 10000 - - - /var/lib/clickhouse/ - - - /var/lib/clickhouse/tmp/ - - - - ` - - - - - - /var/lib/clickhouse/user_files/ - - - - - - - - - - - - - users.xml - - - - /var/lib/clickhouse/access/ - - - - - - - default - - - - - - - - - - - - default - - - - - - - - - true - - - false - - ' | sed -e 's|.*>\(.*\)<.*|\1|') - wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb - apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb - clickhouse-jdbc-bridge & - - * [CentOS/RHEL] - export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge - export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') - wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm - yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm - clickhouse-jdbc-bridge & - - Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information. - ]]> - - - - - - - - - - - - - - - 01 - example01-01-1 - - - - - - 3600 - - - - 3600 - - - 60 - - - - - - - - - - - - - system - query_log
- - toYYYYMM(event_date) - - - - - - 7500 -
- - - - system - trace_log
- - toYYYYMM(event_date) - 7500 -
- - - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - - system - query_views_log
- toYYYYMM(event_date) - 7500 -
- - - - system - part_log
- toYYYYMM(event_date) - 7500 -
- - - - - - system - metric_log
- 7500 - 1000 -
- - - - system - asynchronous_metric_log
- - 7000 -
- - - - - - engine MergeTree - partition by toYYYYMM(finish_date) - order by (finish_date, finish_time_us, trace_id) - - system - opentelemetry_span_log
- 7500 -
- - - - - system - crash_log
- - - 1000 -
- - - - - - - system - processors_profile_log
- - toYYYYMM(event_date) - 7500 -
- - - - - - - - - *_dictionary.xml - - - *_function.xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /clickhouse/task_queue/ddl - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - click_cost - any - - 0 - 3600 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - - - /var/lib/clickhouse/format_schemas/ - - - - - hide encrypt/decrypt arguments - ((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\) - - \1(???) - - - - - - - - - - false - - false - - - https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277 - - - - - - - - - - - 268435456 - true - -
diff --git a/pkg/query-service/tests/test-deploy/clickhouse-storage.xml b/pkg/query-service/tests/test-deploy/clickhouse-storage.xml deleted file mode 100644 index f444bf43b4..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-storage.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - 10485760 - - - s3 - http://172.17.0.1:9100/test// - ash - password - - - - - - - default - - - s3 - 0 - - - - - - diff --git a/pkg/query-service/tests/test-deploy/clickhouse-users.xml b/pkg/query-service/tests/test-deploy/clickhouse-users.xml deleted file mode 100644 index f18562071d..0000000000 --- a/pkg/query-service/tests/test-deploy/clickhouse-users.xml +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - 10000000000 - - - random - - - - - 1 - - - - - - - - - - - - - ::/0 - - - - default - - - default - - - - - - - - - - - - - - 3600 - - - 0 - 0 - 0 - 0 - 0 - - - - diff --git a/pkg/query-service/tests/test-deploy/docker-compose.yaml b/pkg/query-service/tests/test-deploy/docker-compose.yaml deleted file mode 100644 index 562f19d83a..0000000000 --- a/pkg/query-service/tests/test-deploy/docker-compose.yaml +++ /dev/null @@ -1,283 +0,0 @@ -version: "2.4" - -x-clickhouse-defaults: &clickhouse-defaults - restart: on-failure - image: clickhouse/clickhouse-server:24.1.2-alpine - tty: true - depends_on: - - zookeeper-1 - # - zookeeper-2 - # - zookeeper-3 - logging: - options: - max-size: 50m - max-file: "3" - healthcheck: - # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" - test: - [ - "CMD", - "wget", - "--spider", - "-q", - "0.0.0.0:8123/ping" - ] - interval: 30s - timeout: 5s - retries: 3 - ulimits: - nproc: 65535 - nofile: - soft: 262144 - hard: 262144 - -x-db-depend: &db-depend - depends_on: - clickhouse: - condition: service_healthy - otel-collector-migrator: - condition: service_completed_successfully - # clickhouse-2: - # condition: service_healthy - # clickhouse-3: - # condition: service_healthy - -services: - zookeeper-1: - image: bitnami/zookeeper:3.7.1 - container_name: signoz-zookeeper-1 - user: root - ports: - - "2181:2181" - - "2888:2888" - - "3888:3888" - volumes: - - ./data/zookeeper-1:/bitnami/zookeeper - environment: - - ZOO_SERVER_ID=1 - - ZOO_SERVERS=0.0.0.0:2888:3888 - # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888 - - ALLOW_ANONYMOUS_LOGIN=yes - - ZOO_AUTOPURGE_INTERVAL=1 - - # zookeeper-2: - # image: bitnami/zookeeper:3.7.0 - # container_name: signoz-zookeeper-2 - # user: root - # ports: - # - "2182:2181" - # - "2889:2888" - # - "3889:3888" - # volumes: - # - ./data/zookeeper-2:/bitnami/zookeeper - # environment: - # - ZOO_SERVER_ID=2 - # - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888 - # - ALLOW_ANONYMOUS_LOGIN=yes - # - ZOO_AUTOPURGE_INTERVAL=1 - - # zookeeper-3: - # image: bitnami/zookeeper:3.7.0 - # container_name: signoz-zookeeper-3 - # user: root - # ports: - # - "2183:2181" - # - "2890:2888" - # - "3890:3888" - # volumes: - # - ./data/zookeeper-3:/bitnami/zookeeper - # environment: - # - ZOO_SERVER_ID=3 - # - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888 - # - ALLOW_ANONYMOUS_LOGIN=yes - # - ZOO_AUTOPURGE_INTERVAL=1 - - clickhouse: - <<: *clickhouse-defaults - container_name: signoz-clickhouse - hostname: clickhouse - ports: - - "9000:9000" - - "8123:8123" - - "9181:9181" - volumes: - - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - - ./data/clickhouse/:/var/lib/clickhouse/ - - # clickhouse-2: - # <<: *clickhouse-defaults - # container_name: signoz-clickhouse-2 - # hostname: clickhouse-2 - # ports: - # - "9001:9000" - # - "8124:8123" - # - "9182:9181" - # volumes: - # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - # - ./data/clickhouse-2/:/var/lib/clickhouse/ - - # clickhouse-3: - # <<: *clickhouse-defaults - # container_name: signoz-clickhouse-3 - # hostname: clickhouse-3 - # ports: - # - "9002:9000" - # - "8125:8123" - # - "9183:9181" - # volumes: - # - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - # - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml - # - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - # # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml - # - ./data/clickhouse-3/:/var/lib/clickhouse/ - - alertmanager: - image: signoz/alertmanager:0.23.7 - container_name: signoz-alertmanager - volumes: - - ./data/alertmanager:/data - depends_on: - query-service: - condition: service_healthy - restart: on-failure - command: - - --queryService.url=http://query-service:8085 - - --storage.path=/data - - # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` - - query-service: - image: signoz/query-service:latest - container_name: signoz-query-service - command: - [ - "-config=/root/config/prometheus.yml", - "--use-logs-new-schema=true" - ] - # ports: - # - "6060:6060" # pprof port - # - "8080:8080" # query-service port - volumes: - - ./prometheus.yml:/root/config/prometheus.yml - - ../dashboards:/root/config/dashboards - - ./data/signoz/:/var/lib/signoz/ - environment: - - ClickHouseUrl=tcp://clickhouse:9000 - - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - - DASHBOARDS_PATH=/root/config/dashboards - - STORAGE=clickhouse - - GODEBUG=netdns=go - - TELEMETRY_ENABLED=true - - DEPLOYMENT_TYPE=docker-standalone-amd - restart: on-failure - healthcheck: - test: - [ - "CMD", - "wget", - "--spider", - "-q", - "localhost:8080/api/v1/health" - ] - interval: 30s - timeout: 5s - retries: 3 - <<: *db-depend - - otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10} - container_name: otel-migrator - command: - - "--dsn=tcp://clickhouse:9000" - depends_on: - clickhouse: - condition: service_healthy - # clickhouse-2: - # condition: service_healthy - # clickhouse-3: - # condition: service_healthy - - otel-collector: - image: signoz/signoz-otel-collector:0.102.12 - container_name: signoz-otel-collector - command: - [ - "--config=/etc/otel-collector-config.yaml", - "--manager-config=/etc/manager-config.yaml", - "--copy-path=/var/tmp/collector-config.yaml", - "--feature-gates=-pkg.translator.prometheus.NormalizeName" - ] - user: root # required for reading docker container logs - volumes: - - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml - - /var/lib/docker/containers:/var/lib/docker/containers:ro - - /:/hostfs:ro - environment: - - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux - - DOCKER_MULTI_NODE_CLUSTER=false - - LOW_CARDINAL_EXCEPTION_GROUPING=false - ports: - # - "1777:1777" # pprof extension - - "4317:4317" # OTLP gRPC receiver - - "4318:4318" # OTLP HTTP receiver - # - "8888:8888" # OtelCollector internal metrics - # - "8889:8889" # signoz spanmetrics exposed by the agent - # - "9411:9411" # Zipkin port - # - "13133:13133" # health check extension - # - "14250:14250" # Jaeger gRPC - # - "14268:14268" # Jaeger thrift HTTP - # - "55678:55678" # OpenCensus receiver - # - "55679:55679" # zPages extension - restart: on-failure - depends_on: - clickhouse: - condition: service_healthy - otel-collector-migrator: - condition: service_completed_successfully - query-service: - condition: service_healthy - - logspout: - image: "gliderlabs/logspout:v3.2.14" - container_name: signoz-logspout - volumes: - - /etc/hostname:/etc/host_hostname:ro - - /var/run/docker.sock:/var/run/docker.sock - command: syslog+tcp://otel-collector:2255 - depends_on: - - otel-collector - restart: on-failure - - hotrod: - image: jaegertracing/example-hotrod:1.30 - container_name: hotrod - logging: - options: - max-size: 50m - max-file: "3" - command: [ "all" ] - environment: - - JAEGER_ENDPOINT=http://otel-collector:14268/api/traces - - load-hotrod: - image: "signoz/locust:1.2.3" - container_name: load-hotrod - hostname: load-hotrod - environment: - ATTACKED_HOST: http://hotrod:8080 - LOCUST_MODE: standalone - NO_PROXY: standalone - TASK_DELAY_FROM: 5 - TASK_DELAY_TO: 30 - QUIET_MODE: "${QUIET_MODE:-false}" - LOCUST_OPTS: "--headless -u 10 -r 1" - volumes: - - ../common/locust-scripts:/locust diff --git a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml b/pkg/query-service/tests/test-deploy/otel-collector-config.yaml deleted file mode 100644 index 580179bbd0..0000000000 --- a/pkg/query-service/tests/test-deploy/otel-collector-config.yaml +++ /dev/null @@ -1,148 +0,0 @@ -receivers: - tcplog/docker: - listen_address: "0.0.0.0:2255" - operators: - - type: regex_parser - regex: '^<([0-9]+)>[0-9]+ (?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P\S+) (?P\S+) [0-9]+ - -( (?P.*))?' - timestamp: - parse_from: attributes.timestamp - layout: '%Y-%m-%dT%H:%M:%S.%LZ' - - type: move - from: attributes["body"] - to: body - - type: remove - field: attributes.timestamp - # please remove names from below if you want to collect logs from them - - type: filter - id: signoz_logs_filter - expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"' - opencensus: - endpoint: 0.0.0.0:55678 - otlp/spanmetrics: - protocols: - grpc: - endpoint: localhost:12345 - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:4317 - http: - endpoint: 0.0.0.0:4318 - jaeger: - protocols: - grpc: - endpoint: 0.0.0.0:14250 - thrift_http: - endpoint: 0.0.0.0:14268 - # thrift_compact: - # endpoint: 0.0.0.0:6831 - # thrift_binary: - # endpoint: 0.0.0.0:6832 - hostmetrics: - collection_interval: 30s - root_path: /hostfs - scrapers: - cpu: {} - load: {} - memory: {} - disk: {} - filesystem: {} - network: {} - prometheus: - config: - global: - scrape_interval: 60s - scrape_configs: - # otel-collector internal metrics - - job_name: otel-collector - static_configs: - - targets: - - otel-collector:8888 - -processors: - batch: - send_batch_size: 10000 - send_batch_max_size: 11000 - timeout: 10s - signozspanmetrics/cumulative: - metrics_exporter: clickhousemetricswrite - latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 100000 - dimensions: - - name: service.namespace - default: default - - name: deployment.environment - default: default - # memory_limiter: - # # 80% of maximum memory up to 2G - # limit_mib: 1500 - # # 25% of limit up to 2G - # spike_limit_mib: 512 - # check_interval: 5s - # - # # 50% of the maximum memory - # limit_percentage: 50 - # # 20% of max memory usage spike expected - # spike_limit_percentage: 20 - # queued_retry: - # num_workers: 4 - # queue_size: 100 - # retry_on_failure: true - resourcedetection: - detectors: [env, system] - timeout: 2s - -extensions: - health_check: - endpoint: 0.0.0.0:13133 - zpages: - endpoint: 0.0.0.0:55679 - pprof: - endpoint: 0.0.0.0:1777 - -exporters: - clickhousetraces: - datasource: tcp://clickhouse:9000/signoz_traces - docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER} - low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING} - clickhousemetricswrite: - endpoint: tcp://clickhouse:9000/signoz_metrics - resource_to_telemetry_conversion: - enabled: true - prometheus: - endpoint: 0.0.0.0:8889 - clickhouselogsexporter: - dsn: tcp://clickhouse:9000/signoz_logs - docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER} - timeout: 10s - use_new_schema: true - # logging: {} - -service: - telemetry: - metrics: - address: 0.0.0.0:8888 - extensions: - - health_check - - zpages - - pprof - pipelines: - traces: - receivers: [jaeger, otlp] - processors: [signozspanmetrics/cumulative, batch] - exporters: [clickhousetraces] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [clickhousemetricswrite] - metrics/generic: - receivers: [hostmetrics, prometheus] - processors: [resourcedetection, batch] - exporters: [clickhousemetricswrite] - metrics/spanmetrics: - receivers: [otlp/spanmetrics] - exporters: [prometheus] - logs: - receivers: [otlp, tcplog/docker] - processors: [batch] - exporters: [clickhouselogsexporter] diff --git a/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml b/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml deleted file mode 100644 index e408b55ef6..0000000000 --- a/pkg/query-service/tests/test-deploy/otel-collector-opamp-config.yaml +++ /dev/null @@ -1 +0,0 @@ -server_endpoint: ws://query-service:4320/v1/opamp diff --git a/pkg/query-service/tests/test-deploy/prometheus.yml b/pkg/query-service/tests/test-deploy/prometheus.yml deleted file mode 100644 index d7c52893c5..0000000000 --- a/pkg/query-service/tests/test-deploy/prometheus.yml +++ /dev/null @@ -1,25 +0,0 @@ -# my global config -global: - scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - -# Alertmanager configuration -alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 - -# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. -rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - - 'alerts.yml' - -# A scrape configuration containing exactly one endpoint to scrape: -# Here it's Prometheus itself. -scrape_configs: [] - -remote_read: - - url: tcp://clickhouse:9000/signoz_metrics diff --git a/pkg/query-service/utils/format_test.go b/pkg/query-service/utils/format_test.go index 3a2a7f1265..e51d510a55 100644 --- a/pkg/query-service/utils/format_test.go +++ b/pkg/query-service/utils/format_test.go @@ -419,28 +419,28 @@ var testGetClickhouseColumnName = []struct { typeName: string(v3.AttributeKeyTypeTag), dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "attribute_int64_tag1", + want: "`attribute_int64_tag1`", }, { name: "resource", typeName: string(v3.AttributeKeyTypeResource), dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "resource_int64_tag1", + want: "`resource_int64_tag1`", }, { name: "attribute old parser", typeName: constants.Attributes, dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "attribute_int64_tag1", + want: "`attribute_int64_tag1`", }, { name: "resource old parser", typeName: constants.Resources, dataType: string(v3.AttributeKeyDataTypeInt64), field: "tag1", - want: "resource_int64_tag1", + want: "`resource_int64_tag1`", }, }