chore: run all query-service tests (#6098)

This commit is contained in:
Akira Hayashi 2024-10-21 00:24:12 +09:00 committed by GitHub
parent 8466e31e02
commit 2d732ae4a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 33 additions and 2368 deletions

View File

@ -79,7 +79,7 @@ build-query-service-static:
@if [ $(DEV_BUILD) != "" ]; then \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
-ldflags "-linkmode external -extldflags '-static' -s -w ${LD_FLAGS} ${DEV_LD_FLAGS}"; \
else \
cd $(QUERY_SERVICE_DIRECTORY) && \
CGO_ENABLED=1 go build -tags timetzdata -a -o ./bin/query-service-${GOOS}-${GOARCH} \
@ -188,13 +188,4 @@ check-no-ee-references:
fi
test:
go test ./pkg/query-service/app/metrics/...
go test ./pkg/query-service/cache/...
go test ./pkg/query-service/app/...
go test ./pkg/query-service/app/querier/...
go test ./pkg/query-service/converter/...
go test ./pkg/query-service/formatter/...
go test ./pkg/query-service/tests/integration/...
go test ./pkg/query-service/rules/...
go test ./pkg/query-service/collectorsimulator/...
go test ./pkg/query-service/postprocess/...
go test ./pkg/query-service/...

View File

@ -18,42 +18,42 @@ var testCases = []struct {
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "="},
}},
Expr: `attributes.key == "checkbody"`,
Expr: `attributes["key"] == "checkbody"`,
},
{
Name: "not equal",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "checkbody", Operator: "!="},
}},
Expr: `attributes.key != "checkbody"`,
Expr: `attributes["key"] != "checkbody"`,
},
{
Name: "less than",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<"},
}},
Expr: "attributes.key < 10",
Expr: `attributes["key"] != nil && attributes["key"] < 10`,
},
{
Name: "greater than",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">"},
}},
Expr: "attributes.key > 10",
Expr: `attributes["key"] != nil && attributes["key"] > 10`,
},
{
Name: "less than equal",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: "<="},
}},
Expr: "attributes.key <= 10",
Expr: `attributes["key"] != nil && attributes["key"] <= 10`,
},
{
Name: "greater than equal",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: ">="},
}},
Expr: "attributes.key >= 10",
Expr: `attributes["key"] != nil && attributes["key"] >= 10`,
},
// case sensitive
{
@ -61,42 +61,42 @@ var testCases = []struct {
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "contains"},
}},
Expr: `body contains "checkbody"`,
Expr: `body != nil && lower(body) contains lower("checkbody")`,
},
{
Name: "body ncontains",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "checkbody", Operator: "ncontains"},
}},
Expr: `body not contains "checkbody"`,
Expr: `body != nil && lower(body) not contains lower("checkbody")`,
},
{
Name: "body regex",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "regex"},
}},
Expr: `body matches "[0-1]+regex$"`,
Expr: `body != nil && body matches "[0-1]+regex$"`,
},
{
Name: "body not regex",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"},
}},
Expr: `body not matches "[0-1]+regex$"`,
Expr: `body != nil && body not matches "[0-1]+regex$"`,
},
{
Name: "regex with escape characters",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: `^Executing \[\S+@\S+:[0-9]+\] \S+".*`, Operator: "regex"},
}},
Expr: `body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`,
Expr: `body != nil && body matches "^Executing \\[\\S+@\\S+:[0-9]+\\] \\S+\".*"`,
},
{
Name: "invalid regex",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"},
}},
Expr: `body not matches "[0-9]++"`,
Expr: `body != nil && lower(body) not matches "[0-9]++"`,
ExpectError: true,
},
{
@ -104,14 +104,14 @@ var testCases = []struct {
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{1, 2, 3, 4}, Operator: "in"},
}},
Expr: "attributes.key in [1,2,3,4]",
Expr: `attributes["key"] != nil && attributes["key"] in [1,2,3,4]`,
},
{
Name: "not in",
Query: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"1", "2"}, Operator: "nin"},
}},
Expr: "attributes.key not in ['1','2']",
Expr: `attributes["key"] != nil && attributes["key"] not in ['1','2']`,
},
{
Name: "exists",
@ -134,7 +134,7 @@ var testCases = []struct {
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-1]+regex$", Operator: "nregex"},
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"},
}},
Expr: `attributes.key <= 10 and body not matches "[0-1]+regex$" and "key" not in attributes`,
Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body != nil && body not matches "[0-1]+regex$" and "key" not in attributes`,
},
{
Name: "incorrect multi filter",
@ -143,7 +143,7 @@ var testCases = []struct {
{Key: v3.AttributeKey{Key: "body", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}, Value: "[0-9]++", Operator: "nregex"},
{Key: v3.AttributeKey{Key: "key", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: "nexists"},
}},
Expr: `attributes.key <= 10 and body not matches "[0-9]++" and "key" not in attributes`,
Expr: `attributes["key"] != nil && attributes["key"] <= 10 and body not matches "[0-9]++" and "key" not in attributes`,
ExpectError: true,
},
}

View File

@ -142,9 +142,18 @@ func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3.
}
seriesesByLabels[h].Points = append(seriesesByLabels[h].Points, series.Points...)
}
hashes := make([]uint64, 0, len(seriesesByLabels))
for h := range seriesesByLabels {
hashes = append(hashes, h)
}
sort.Slice(hashes, func(i, j int) bool {
return hashes[i] < hashes[j]
})
// Sort the points in each series by timestamp
for idx := range seriesesByLabels {
series := seriesesByLabels[idx]
for _, h := range hashes {
series := seriesesByLabels[h]
series.SortPoints()
series.RemoveDuplicatePoints()
mergedSeries = append(mergedSeries, series)

View File

@ -1,126 +0,0 @@
package tests
import (
"bytes"
"encoding/json"
"fmt"
"io"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/auth"
"go.signoz.io/signoz/pkg/query-service/model"
)
func invite(t *testing.T, email string) *model.InviteResponse {
q := endpoint + fmt.Sprintf("/api/v1/invite?email=%s", email)
resp, err := client.Get(q)
require.NoError(t, err)
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var inviteResp model.InviteResponse
err = json.Unmarshal(b, &inviteResp)
require.NoError(t, err)
return &inviteResp
}
func register(email, password, token string) (string, error) {
q := endpoint + "/api/v1/register"
req := auth.RegisterRequest{
Email: email,
Password: password,
InviteToken: token,
}
b, err := json.Marshal(req)
if err != nil {
return "", err
}
resp, err := client.Post(q, "application/json", bytes.NewBuffer(b))
if err != nil {
return "", err
}
defer resp.Body.Close()
b, err = io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(b), nil
}
func login(email, password, refreshToken string) (*model.LoginResponse, error) {
q := endpoint + "/api/v1/login"
req := model.LoginRequest{
Email: email,
Password: password,
RefreshToken: refreshToken,
}
b, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal")
}
resp, err := client.Post(q, "application/json", bytes.NewBuffer(b))
if err != nil {
return nil, errors.Wrap(err, "failed to post")
}
defer resp.Body.Close()
b, err = io.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrap(err, "failed to read body")
}
loginResp := &model.LoginResponse{}
err = json.Unmarshal(b, loginResp)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal")
}
return loginResp, nil
}
func TestAuthInviteAPI(t *testing.T) {
t.Skip()
email := "abc@signoz.io"
resp := invite(t, email)
require.Equal(t, email, resp.Email)
require.NotNil(t, resp.InviteToken)
}
func TestAuthRegisterAPI(t *testing.T) {
email := "alice@signoz.io"
resp, err := register(email, "Password@123", "")
require.NoError(t, err)
require.Contains(t, resp, "user registered successfully")
}
func TestAuthLoginAPI(t *testing.T) {
t.Skip()
email := "abc-login@signoz.io"
password := "Password@123"
inv := invite(t, email)
resp, err := register(email, password, inv.InviteToken)
require.NoError(t, err)
require.Contains(t, resp, "user registered successfully")
loginResp, err := login(email, password, "")
require.NoError(t, err)
loginResp2, err := login("", "", loginResp.RefreshJwt)
require.NoError(t, err)
require.NotNil(t, loginResp2.AccessJwt)
}

View File

@ -1,223 +0,0 @@
package tests
import (
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.signoz.io/signoz/pkg/query-service/model"
)
const (
endpoint = "http://localhost:8180"
)
var (
client http.Client
)
func setTTL(table, coldStorage, toColdTTL, deleteTTL string, jwtToken string) ([]byte, error) {
params := fmt.Sprintf("type=%s&duration=%s", table, deleteTTL)
if len(toColdTTL) > 0 {
params += fmt.Sprintf("&coldStorage=%s&toColdDuration=%s", coldStorage, toColdTTL)
}
var bearer = "Bearer " + jwtToken
req, err := http.NewRequest("POST", endpoint+"/api/v1/settings/ttl?"+params, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", bearer)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
if err != nil {
return b, err
}
return b, nil
}
func TestListDisks(t *testing.T) {
t.Skip()
email := "alice@signoz.io"
password := "Password@123"
loginResp, err := login(email, password, "")
require.NoError(t, err)
var bearer = "Bearer " + loginResp.AccessJwt
req, err := http.NewRequest("POST", endpoint+"/api/v1/disks", nil)
req.Header.Add("Authorization", bearer)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.JSONEq(t, `[{"name":"default","type":"local"}, {"name":"s3","type":"s3"}]`, string(b))
}
func TestSetTTL(t *testing.T) {
email := "alice@signoz.io"
password := "Password@123"
loginResp, err := login(email, password, "")
require.NoError(t, err)
testCases := []struct {
caseNo int
coldStorage string
table string
coldTTL string
deleteTTL string
expected string
}{
{
1, "s3", "traces", "100h", "60h",
"Delete TTL should be greater than cold storage move TTL.",
},
{
2, "s3", "traces", "100", "60s",
"Not a valid toCold TTL duration 100",
},
{
3, "s3", "traces", "100s", "100",
"Not a valid TTL duration 100",
},
{
4, "s3", "metrics", "1h", "2h",
"move ttl has been successfully set up",
},
{
5, "s3", "traces", "10s", "6h",
"move ttl has been successfully set up",
},
}
for _, tc := range testCases {
r, err := setTTL(tc.table, tc.coldStorage, tc.coldTTL, tc.deleteTTL, loginResp.AccessJwt)
require.NoErrorf(t, err, "Failed case: %d", tc.caseNo)
require.Containsf(t, string(r), tc.expected, "Failed case: %d", tc.caseNo)
}
time.Sleep(20 * time.Second)
doneCh := make(chan struct{})
defer close(doneCh)
count := 0
for range minioClient.ListObjects(bucketName, "", false, doneCh) {
count++
}
require.True(t, count > 0, "No objects are present in Minio")
fmt.Printf("=== Found %d objects in Minio\n", count)
}
func getTTL(t *testing.T, table string, jwtToken string) *model.GetTTLResponseItem {
url := endpoint + fmt.Sprintf("/api/v1/settings/ttl?type=%s", table)
if len(table) == 0 {
url = endpoint + "/api/v1/settings/ttl"
}
var bearer = "Bearer " + jwtToken
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
req.Header.Add("Authorization", bearer)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
res := &model.GetTTLResponseItem{}
require.NoError(t, json.Unmarshal(b, res))
return res
}
func TestGetTTL(t *testing.T) {
email := "alice@signoz.io"
password := "Password@123"
loginResp, err := login(email, password, "")
require.NoError(t, err)
resp := getTTL(t, "traces", loginResp.AccessJwt)
for resp.Status == "pending" {
time.Sleep(time.Second)
}
require.Equal(t, "success", resp.Status)
r, err := setTTL("traces", "s3", "1h", "2h", loginResp.AccessJwt)
require.NoError(t, err)
require.Contains(t, string(r), "successfully set up")
resp = getTTL(t, "traces", loginResp.AccessJwt)
for resp.Status == "pending" {
time.Sleep(time.Second)
resp = getTTL(t, "traces", loginResp.AccessJwt)
require.Equal(t, 1, resp.ExpectedTracesMoveTime)
require.Equal(t, 2, resp.ExpectedTracesTime)
}
resp = getTTL(t, "traces", loginResp.AccessJwt)
require.Equal(t, "success", resp.Status)
require.Equal(t, 1, resp.TracesMoveTime)
require.Equal(t, 2, resp.TracesTime)
resp = getTTL(t, "metrics", loginResp.AccessJwt)
for resp.Status == "pending" {
time.Sleep(time.Second)
}
require.Equal(t, "success", resp.Status)
r, err = setTTL("traces", "s3", "10h", "20h", loginResp.AccessJwt)
require.NoError(t, err)
require.Contains(t, string(r), "successfully set up")
resp = getTTL(t, "traces", loginResp.AccessJwt)
for resp.Status == "pending" {
time.Sleep(time.Second)
resp = getTTL(t, "traces", loginResp.AccessJwt)
}
require.Equal(t, "success", resp.Status)
require.Equal(t, 10, resp.TracesMoveTime)
require.Equal(t, 20, resp.TracesTime)
resp = getTTL(t, "metrics", loginResp.AccessJwt)
for resp.Status != "success" && resp.Status != "failed" {
time.Sleep(time.Second)
resp = getTTL(t, "metrics", loginResp.AccessJwt)
}
require.Equal(t, "success", resp.Status)
require.Equal(t, 1, resp.MetricsMoveTime)
require.Equal(t, 2, resp.MetricsTime)
r, err = setTTL("metrics", "s3", "0s", "0s", loginResp.AccessJwt)
require.NoError(t, err)
require.Contains(t, string(r), "Not a valid TTL duration 0s")
r, err = setTTL("traces", "s3", "0s", "0s", loginResp.AccessJwt)
require.NoError(t, err)
require.Contains(t, string(r), "Not a valid TTL duration 0s")
}
func TestMain(m *testing.M) {
if err := startCluster(); err != nil {
fmt.Println(err)
}
defer stopCluster()
m.Run()
}

View File

@ -1,117 +0,0 @@
package tests
import (
"context"
"fmt"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"time"
"log"
minio "github.com/minio/minio-go/v6"
)
const (
prefix = "signoz_test"
minioEndpoint = "localhost:9100"
accessKey = "ash"
secretKey = "password"
bucketName = "test"
)
var (
minioClient *minio.Client
composeFile string
)
func init() {
goArch := runtime.GOARCH
if goArch == "arm64" {
composeFile = "./test-deploy/docker-compose.arm.yaml"
} else if goArch == "amd64" {
composeFile = "./test-deploy/docker-compose.yaml"
} else {
log.Fatalf("Unsupported architecture: %s", goArch)
}
}
func getCmd(args ...string) *exec.Cmd {
cmd := exec.CommandContext(context.Background(), args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = os.Environ()
return cmd
}
func startMinio() error {
log.Printf("Starting minio")
cmd := getCmd("docker", "run", "-d", "-p", "9100:9000", "-p", "9101:9001",
"--name", "signoz-minio-test", "-e", "MINIO_ROOT_USER=ash",
"-e", "MINIO_ROOT_PASSWORD=password",
"quay.io/minio/minio", "server", "/data", "--console-address", ":9001")
if err := cmd.Run(); err != nil {
return err
}
var err error
minioClient, err = minio.New(minioEndpoint, accessKey, secretKey, false)
if err != nil {
return err
}
if err = minioClient.MakeBucket(bucketName, ""); err != nil {
return err
}
return nil
}
func startCluster() error {
if err := os.MkdirAll("./test-deploy/data/minio/test", 0777); err != nil {
return err
}
if err := startMinio(); err != nil {
return err
}
cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix,
"up", "--force-recreate", "--build", "--remove-orphans", "--detach")
log.Printf("Starting signoz cluster...\n")
if err := cmd.Run(); err != nil {
log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err)
return err
}
client := http.Client{}
for i := 0; i < 10; i++ {
if _, err := client.Get("http://localhost:8180/api/v1/health"); err != nil {
time.Sleep(2 * time.Second)
} else {
log.Printf("CLUSTER UP\n")
return nil
}
}
return fmt.Errorf("query-service is not healthy")
}
func stopCluster() {
cmd := getCmd("docker-compose", "-f", composeFile, "-p", prefix, "down", "-v")
if err := cmd.Run(); err != nil {
log.Printf("Error while stopping the cluster. Error: %v\n", err)
}
if err := os.RemoveAll("./test-deploy/data"); err != nil {
log.Printf("Error while cleaning temporary dir. Error: %v\n", err)
}
cmd = getCmd("docker", "container", "rm", "-f", "signoz-minio-test")
if err := cmd.Run(); err != nil {
log.Printf("While running command: %q Error: %v\n", strings.Join(cmd.Args, " "), err)
}
log.Printf("CLUSTER DOWN: %s\n", prefix)
}

View File

@ -1,35 +0,0 @@
global:
resolve_timeout: 1m
slack_api_url: 'https://hooks.slack.com/services/xxx'
route:
receiver: 'slack-notifications'
receivers:
- name: 'slack-notifications'
slack_configs:
- channel: '#alerts'
send_resolved: true
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
{{- if gt (len .CommonLabels) (len .GroupLabels) -}}
{{" "}}(
{{- with .CommonLabels.Remove .GroupLabels.Names }}
{{- range $index, $label := .SortedPairs -}}
{{ if $index }}, {{ end }}
{{- $label.Name }}="{{ $label.Value -}}"
{{- end }}
{{- end -}}
)
{{- end }}
text: >-
{{ range .Alerts -}}
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
*Description:* {{ .Annotations.description }}
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@ -1,11 +0,0 @@
groups:
- name: ExampleCPULoadGroup
rules:
- alert: HighCpuLoad
expr: system_cpu_load_average_1m > 0.1
for: 0m
labels:
severity: warning
annotations:
summary: High CPU load
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@ -1,75 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>
<default>
<keep_free_space_bytes>10485760</keep_free_space_bytes>
</default>
<s3>
<type>s3</type>
<endpoint>http://172.17.0.1:9100/test//</endpoint>
<access_key_id>ash</access_key_id>
<secret_access_key>password</secret_access_key>
</s3>
</disks>
<policies>
<tiered>
<volumes>
<default>
<disk>default</disk>
</default>
<s3>
<disk>s3</disk>
<perform_ttl_move_on_insert>0</perform_ttl_move_on_insert>
</s3>
</volumes>
</tiered>
</policies>
</storage_configuration>
</clickhouse>

View File

@ -1,123 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@ -1,283 +0,0 @@
version: "2.4"
x-clickhouse-defaults: &clickhouse-defaults
restart: on-failure
image: clickhouse/clickhouse-server:24.1.2-alpine
tty: true
depends_on:
- zookeeper-1
# - zookeeper-2
# - zookeeper-3
logging:
options:
max-size: 50m
max-file: "3"
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test:
[
"CMD",
"wget",
"--spider",
"-q",
"0.0.0.0:8123/ping"
]
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
services:
zookeeper-1:
image: bitnami/zookeeper:3.7.1
container_name: signoz-zookeeper-1
user: root
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- ./data/zookeeper-1:/bitnami/zookeeper
environment:
- ZOO_SERVER_ID=1
- ZOO_SERVERS=0.0.0.0:2888:3888
# - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-2:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-2
# user: root
# ports:
# - "2182:2181"
# - "2889:2888"
# - "3889:3888"
# volumes:
# - ./data/zookeeper-2:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=2
# - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
# zookeeper-3:
# image: bitnami/zookeeper:3.7.0
# container_name: signoz-zookeeper-3
# user: root
# ports:
# - "2183:2181"
# - "2890:2888"
# - "3890:3888"
# volumes:
# - ./data/zookeeper-3:/bitnami/zookeeper
# environment:
# - ZOO_SERVER_ID=3
# - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
# - ALLOW_ANONYMOUS_LOGIN=yes
# - ZOO_AUTOPURGE_INTERVAL=1
clickhouse:
<<: *clickhouse-defaults
container_name: signoz-clickhouse
hostname: clickhouse
ports:
- "9000:9000"
- "8123:8123"
- "9181:9181"
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
- ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
- ./data/clickhouse/:/var/lib/clickhouse/
# clickhouse-2:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-2
# hostname: clickhouse-2
# ports:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-2/:/var/lib/clickhouse/
# clickhouse-3:
# <<: *clickhouse-defaults
# container_name: signoz-clickhouse-3
# hostname: clickhouse-3
# ports:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
# volumes:
# - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
# - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
# - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
# # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
# - ./data/clickhouse-3/:/var/lib/clickhouse/
alertmanager:
image: signoz/alertmanager:0.23.7
container_name: signoz-alertmanager
volumes:
- ./data/alertmanager:/data
depends_on:
query-service:
condition: service_healthy
restart: on-failure
command:
- --queryService.url=http://query-service:8085
- --storage.path=/data
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:latest
container_name: signoz-query-service
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port
# - "8080:8080" # query-service port
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
restart: on-failure
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"-q",
"localhost:8080/api/v1/health"
]
interval: 30s
timeout: 5s
retries: 3
<<: *db-depend
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.10}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
depends_on:
clickhouse:
condition: service_healthy
# clickhouse-2:
# condition: service_healthy
# clickhouse-3:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:0.102.12
container_name: signoz-otel-collector
command:
[
"--config=/etc/otel-collector-config.yaml",
"--manager-config=/etc/manager-config.yaml",
"--copy-path=/var/tmp/collector-config.yaml",
"--feature-gates=-pkg.translator.prometheus.NormalizeName"
]
user: root # required for reading docker container logs
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /:/hostfs:ro
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- DOCKER_MULTI_NODE_CLUSTER=false
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
# - "8888:8888" # OtelCollector internal metrics
# - "8889:8889" # signoz spanmetrics exposed by the agent
# - "9411:9411" # Zipkin port
# - "13133:13133" # health check extension
# - "14250:14250" # Jaeger gRPC
# - "14268:14268" # Jaeger thrift HTTP
# - "55678:55678" # OpenCensus receiver
# - "55679:55679" # zPages extension
restart: on-failure
depends_on:
clickhouse:
condition: service_healthy
otel-collector-migrator:
condition: service_completed_successfully
query-service:
condition: service_healthy
logspout:
image: "gliderlabs/logspout:v3.2.14"
container_name: signoz-logspout
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
command: syslog+tcp://otel-collector:2255
depends_on:
- otel-collector
restart: on-failure
hotrod:
image: jaegertracing/example-hotrod:1.30
container_name: hotrod
logging:
options:
max-size: 50m
max-file: "3"
command: [ "all" ]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "signoz/locust:1.2.3"
container_name: load-hotrod
hostname: load-hotrod
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@ -1,148 +0,0 @@
receivers:
tcplog/docker:
listen_address: "0.0.0.0:2255"
operators:
- type: regex_parser
regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?'
timestamp:
parse_from: attributes.timestamp
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: move
from: attributes["body"]
to: body
- type: remove
field: attributes.timestamp
# please remove names from below if you want to collect logs from them
- type: filter
id: signoz_logs_filter
expr: 'attributes.container_name matches "^signoz-(logspout|frontend|alertmanager|query-service|otel-collector|clickhouse|zookeeper)"'
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
root_path: /hostfs
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- otel-collector:8888
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/cumulative:
metrics_exporter: clickhousemetricswrite
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
resourcedetection:
detectors: [env, system]
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:
enabled: true
prometheus:
endpoint: 0.0.0.0:8889
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
docker_multi_node_cluster: ${env:DOCKER_MULTI_NODE_CLUSTER}
timeout: 10s
use_new_schema: true
# logging: {}
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/cumulative, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics, prometheus]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
logs:
receivers: [otlp, tcplog/docker]
processors: [batch]
exporters: [clickhouselogsexporter]

View File

@ -1 +0,0 @@
server_endpoint: ws://query-service:4320/v1/opamp

View File

@ -1,25 +0,0 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/signoz_metrics

View File

@ -419,28 +419,28 @@ var testGetClickhouseColumnName = []struct {
typeName: string(v3.AttributeKeyTypeTag),
dataType: string(v3.AttributeKeyDataTypeInt64),
field: "tag1",
want: "attribute_int64_tag1",
want: "`attribute_int64_tag1`",
},
{
name: "resource",
typeName: string(v3.AttributeKeyTypeResource),
dataType: string(v3.AttributeKeyDataTypeInt64),
field: "tag1",
want: "resource_int64_tag1",
want: "`resource_int64_tag1`",
},
{
name: "attribute old parser",
typeName: constants.Attributes,
dataType: string(v3.AttributeKeyDataTypeInt64),
field: "tag1",
want: "attribute_int64_tag1",
want: "`attribute_int64_tag1`",
},
{
name: "resource old parser",
typeName: constants.Resources,
dataType: string(v3.AttributeKeyDataTypeInt64),
field: "tag1",
want: "resource_int64_tag1",
want: "`resource_int64_tag1`",
},
}