mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-11 23:18:59 +08:00
Chore/analytics (#1922)
* fix: reduced rate limit to 2 of each events in 1 min * feat: added new event for length of filters in logs search page * feat: added distributed cluster info * fix: length of filters in logs * feat: dashboard metadata with no rateLimit * feat: active user Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
This commit is contained in:
parent
895c721b37
commit
b11f79b4c7
@ -45,6 +45,7 @@ import (
|
||||
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
"go.signoz.io/signoz/pkg/query-service/telemetry"
|
||||
"go.signoz.io/signoz/pkg/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -3088,6 +3089,20 @@ func (r *ClickHouseReader) GetSamplesInfoInLastHeartBeatInterval(ctx context.Con
|
||||
|
||||
return totalSamples, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetDistributedInfoInLastHeartBeatInterval(ctx context.Context) (map[string]interface{}, error) {
|
||||
|
||||
clusterInfo := []model.ClusterInfo{}
|
||||
|
||||
queryStr := `SELECT shard_num, shard_weight, replica_num, errors_count, slowdowns_count, estimated_recovery_time FROM system.clusters where cluster='cluster';`
|
||||
r.db.Select(ctx, &clusterInfo, queryStr)
|
||||
if len(clusterInfo) == 1 {
|
||||
return clusterInfo[0].GetMapFromStruct(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetLogsInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error) {
|
||||
|
||||
var totalLogLines uint64
|
||||
@ -3233,11 +3248,16 @@ func (r *ClickHouseReader) GetLogs(ctx context.Context, params *model.LogsFilter
|
||||
}
|
||||
|
||||
isPaginatePrev := logs.CheckIfPrevousPaginateAndModifyOrder(params)
|
||||
filterSql, err := logs.GenerateSQLWhere(fields, params)
|
||||
filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, params)
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorBadData}
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"lenFilters": lenFilters,
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data)
|
||||
|
||||
query := fmt.Sprintf("%s from %s.%s", constants.LogsSQLSelect, r.logsDB, r.logsTable)
|
||||
|
||||
if filterSql != "" {
|
||||
@ -3267,10 +3287,15 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
||||
return
|
||||
}
|
||||
|
||||
filterSql, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{
|
||||
filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{
|
||||
Query: client.Filter.Query,
|
||||
})
|
||||
|
||||
data := map[string]interface{}{
|
||||
"lenFilters": lenFilters,
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data)
|
||||
|
||||
if err != nil {
|
||||
client.Error <- err
|
||||
return
|
||||
@ -3347,13 +3372,18 @@ func (r *ClickHouseReader) AggregateLogs(ctx context.Context, params *model.Logs
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
filterSql, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{
|
||||
filterSql, lenFilters, err := logs.GenerateSQLWhere(fields, &model.LogsFilterParams{
|
||||
Query: params.Query,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, &model.ApiError{Err: err, Typ: model.ErrorBadData}
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"lenFilters": lenFilters,
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LOGS_FILTERS, data)
|
||||
|
||||
query := ""
|
||||
if params.GroupBy != "" {
|
||||
query = fmt.Sprintf("SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, toString(%s) as groupBy, "+
|
||||
|
@ -1333,6 +1333,9 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data)
|
||||
if (data["number"] != 0) || (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) {
|
||||
telemetry.GetInstance().AddActiveTracesUser()
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
}
|
||||
|
@ -279,20 +279,23 @@ func CheckIfPrevousPaginateAndModifyOrder(params *model.LogsFilterParams) (isPag
|
||||
return
|
||||
}
|
||||
|
||||
func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilterParams) (string, error) {
|
||||
func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilterParams) (string, int, error) {
|
||||
var tokens []string
|
||||
var err error
|
||||
var sqlWhere string
|
||||
var lenTokens = 0
|
||||
if params.Query != "" {
|
||||
tokens, err = parseLogQuery(params.Query)
|
||||
|
||||
if err != nil {
|
||||
return sqlWhere, err
|
||||
return sqlWhere, -1, err
|
||||
}
|
||||
lenTokens = len(tokens)
|
||||
}
|
||||
|
||||
tokens, err = replaceInterestingFields(allFields, tokens)
|
||||
if err != nil {
|
||||
return sqlWhere, err
|
||||
return sqlWhere, -1, err
|
||||
}
|
||||
|
||||
filterTokens := []string{}
|
||||
@ -342,5 +345,5 @@ func GenerateSQLWhere(allFields *model.GetFieldsResponse, params *model.LogsFilt
|
||||
|
||||
sqlWhere = strings.Join(tokens, "")
|
||||
|
||||
return sqlWhere, nil
|
||||
return sqlWhere, lenTokens, nil
|
||||
}
|
||||
|
@ -1,8 +1,11 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
@ -235,15 +238,77 @@ func (lrw *loggingResponseWriter) Flush() {
|
||||
lrw.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func extractDashboardMetaData(path string, r *http.Request) (map[string]interface{}, bool) {
|
||||
pathToExtractBodyFrom := "/api/v2/metrics/query_range"
|
||||
var requestBody map[string]interface{}
|
||||
data := map[string]interface{}{}
|
||||
|
||||
if path == pathToExtractBodyFrom && (r.Method == "POST") {
|
||||
bodyBytes, _ := ioutil.ReadAll(r.Body)
|
||||
r.Body.Close() // must close
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
|
||||
|
||||
json.Unmarshal(bodyBytes, &requestBody)
|
||||
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
compositeMetricQuery, compositeMetricQueryExists := requestBody["compositeMetricQuery"]
|
||||
compositeMetricQueryMap := compositeMetricQuery.(map[string]interface{})
|
||||
if compositeMetricQueryExists {
|
||||
queryType, queryTypeExists := compositeMetricQueryMap["queryType"]
|
||||
if queryTypeExists {
|
||||
data["queryType"] = queryType
|
||||
}
|
||||
panelType, panelTypeExists := compositeMetricQueryMap["panelType"]
|
||||
if panelTypeExists {
|
||||
data["panelType"] = panelType
|
||||
}
|
||||
}
|
||||
|
||||
datasource, datasourceExists := requestBody["dataSource"]
|
||||
if datasourceExists {
|
||||
data["datasource"] = datasource
|
||||
}
|
||||
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_DASHBOARDS_METADATA, data, false)
|
||||
|
||||
return data, true
|
||||
}
|
||||
|
||||
func getActiveMetricsOrLogs(path string, r *http.Request) {
|
||||
if path == "/api/v1/dashboards/{uuid}" {
|
||||
telemetry.GetInstance().AddActiveMetricsUser()
|
||||
}
|
||||
if path == "/api/v1/logs" {
|
||||
hasFilters := len(r.URL.Query().Get("q"))
|
||||
if hasFilters > 0 {
|
||||
telemetry.GetInstance().AddActiveLogsUser()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r)
|
||||
path, _ := route.GetPathTemplate()
|
||||
|
||||
dashboardMetadata, metadataExists := extractDashboardMetaData(path, r)
|
||||
getActiveMetricsOrLogs(path, r)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
|
||||
if metadataExists {
|
||||
for key, value := range dashboardMetadata {
|
||||
data[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if telemetry.GetInstance().IsSampled() {
|
||||
if _, ok := telemetry.IgnoredPaths()[path]; !ok {
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
|
||||
|
@ -63,6 +63,7 @@ type Reader interface {
|
||||
GetSamplesInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error)
|
||||
GetLogsInfoInLastHeartBeatInterval(ctx context.Context) (uint64, error)
|
||||
GetTagsInfoInLastHeartBeatInterval(ctx context.Context) (*model.TagsInfo, error)
|
||||
GetDistributedInfoInLastHeartBeatInterval(ctx context.Context) (map[string]interface{}, error)
|
||||
// Logs
|
||||
GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
|
||||
UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError
|
||||
|
@ -569,3 +569,19 @@ type TagTelemetryData struct {
|
||||
Env string `json:"env" ch:"env"`
|
||||
Language string `json:"language" ch:"language"`
|
||||
}
|
||||
|
||||
type ClusterInfo struct {
|
||||
ShardNum uint32 `json:"shard_num" ch:"shard_num"`
|
||||
ShardWeight uint32 `json:"shard_weight" ch:"shard_weight"`
|
||||
ReplicaNum uint32 `json:"replica_num" ch:"replica_num"`
|
||||
ErrorsCount uint32 `json:"errors_count" ch:"errors_count"`
|
||||
SlowdownsCount uint32 `json:"slowdowns_count" ch:"slowdowns_count"`
|
||||
EstimatedRecoveryTime uint32 `json:"estimated_recovery_time" ch:"estimated_recovery_time"`
|
||||
}
|
||||
|
||||
func (ci *ClusterInfo) GetMapFromStruct() map[string]interface{} {
|
||||
var clusterInfoMap map[string]interface{}
|
||||
data, _ := json.Marshal(*ci)
|
||||
json.Unmarshal(data, &clusterInfoMap)
|
||||
return clusterInfoMap
|
||||
}
|
||||
|
@ -32,19 +32,24 @@ const (
|
||||
TELEMETRY_LICENSE_ACT_FAILED = "License Activation Failed"
|
||||
TELEMETRY_EVENT_ENVIRONMENT = "Environment"
|
||||
TELEMETRY_EVENT_LANGUAGE = "Language"
|
||||
TELEMETRY_EVENT_LOGS_FILTERS = "Logs Filters"
|
||||
TELEMETRY_EVENT_DISTRIBUTED = "Distributed"
|
||||
TELEMETRY_EVENT_DASHBOARDS_METADATA = "Dashboards Metadata"
|
||||
TELEMETRY_EVENT_ACTIVE_USER = "Active User"
|
||||
)
|
||||
|
||||
const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz"
|
||||
const ph_api_key = "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"
|
||||
|
||||
const IP_NOT_FOUND_PLACEHOLDER = "NA"
|
||||
const DEFAULT_NUMBER_OF_SERVICES = 6
|
||||
|
||||
const HEART_BEAT_DURATION = 6 * time.Hour
|
||||
|
||||
// const HEART_BEAT_DURATION = 10 * time.Second
|
||||
|
||||
const RATE_LIMIT_CHECK_DURATION = 1 * time.Minute
|
||||
const RATE_LIMIT_VALUE = 10
|
||||
const RATE_LIMIT_VALUE = 2
|
||||
|
||||
// const RATE_LIMIT_CHECK_DURATION = 20 * time.Second
|
||||
// const RATE_LIMIT_VALUE = 5
|
||||
@ -64,6 +69,16 @@ func (a *Telemetry) IsSampled() bool {
|
||||
|
||||
}
|
||||
|
||||
func (telemetry *Telemetry) AddActiveTracesUser() {
|
||||
telemetry.activeUser["traces"] = 1
|
||||
}
|
||||
func (telemetry *Telemetry) AddActiveMetricsUser() {
|
||||
telemetry.activeUser["metrics"] = 1
|
||||
}
|
||||
func (telemetry *Telemetry) AddActiveLogsUser() {
|
||||
telemetry.activeUser["logs"] = 1
|
||||
}
|
||||
|
||||
type Telemetry struct {
|
||||
operator analytics.Client
|
||||
phOperator ph.Client
|
||||
@ -76,6 +91,7 @@ type Telemetry struct {
|
||||
minRandInt int
|
||||
maxRandInt int
|
||||
rateLimits map[string]int8
|
||||
activeUser map[string]int8
|
||||
}
|
||||
|
||||
func createTelemetry() {
|
||||
@ -111,6 +127,13 @@ func createTelemetry() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
|
||||
if (telemetry.activeUser["traces"] != 0) || (telemetry.activeUser["metrics"] != 0) || (telemetry.activeUser["logs"] != 0) {
|
||||
telemetry.activeUser["any"] = 1
|
||||
}
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_ACTIVE_USER, map[string]interface{}{"traces": telemetry.activeUser["traces"], "metrics": telemetry.activeUser["metrics"], "logs": telemetry.activeUser["logs"], "any": telemetry.activeUser["any"]})
|
||||
telemetry.activeUser = map[string]int8{"traces": 0, "metrics": 0, "logs": 0, "any": 0}
|
||||
|
||||
tagsInfo, _ := telemetry.reader.GetTagsInfoInLastHeartBeatInterval(context.Background())
|
||||
|
||||
if len(tagsInfo.Env) != 0 {
|
||||
@ -138,6 +161,10 @@ func createTelemetry() {
|
||||
data[key] = value
|
||||
}
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_HEART_BEAT, data)
|
||||
|
||||
getDistributedInfoInLastHeartBeatInterval, _ := telemetry.reader.GetDistributedInfoInLastHeartBeatInterval(context.Background())
|
||||
telemetry.SendEvent(TELEMETRY_EVENT_DISTRIBUTED, getDistributedInfoInLastHeartBeatInterval)
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -207,7 +234,12 @@ func (a *Telemetry) checkEvents(event string) bool {
|
||||
return sendEvent
|
||||
}
|
||||
|
||||
func (a *Telemetry) SendEvent(event string, data map[string]interface{}) {
|
||||
func (a *Telemetry) SendEvent(event string, data map[string]interface{}, opts ...bool) {
|
||||
|
||||
rateLimitFlag := true
|
||||
if len(opts) > 0 {
|
||||
rateLimitFlag = opts[0]
|
||||
}
|
||||
|
||||
if !a.isTelemetryEnabled() {
|
||||
return
|
||||
@ -218,10 +250,12 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
if a.rateLimits[event] < RATE_LIMIT_VALUE {
|
||||
a.rateLimits[event] += 1
|
||||
} else {
|
||||
return
|
||||
if rateLimitFlag {
|
||||
if a.rateLimits[event] < RATE_LIMIT_VALUE {
|
||||
a.rateLimits[event] += 1
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// zap.S().Info(data)
|
||||
|
Loading…
x
Reference in New Issue
Block a user