From 190767fd0a75ae9abddd8e65d8aa9449667468b5 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Thu, 24 Oct 2024 12:17:24 +0530 Subject: [PATCH] chore: add k8s nodes, namespaces, and cluster list (#6230) --- pkg/query-service/app/http_handler.go | 30 +- pkg/query-service/app/infra.go | 159 ++++++++ .../app/inframetrics/clusters.go | 342 +++++++++++++++++ pkg/query-service/app/inframetrics/common.go | 12 + .../app/inframetrics/namespaces.go | 329 +++++++++++++++++ pkg/query-service/app/inframetrics/nodes.go | 349 ++++++++++++++++++ .../app/inframetrics/nodes_query.go | 118 ++++++ pkg/query-service/app/inframetrics/pods.go | 3 +- pkg/query-service/model/infra.go | 73 ++++ 9 files changed, 1411 insertions(+), 4 deletions(-) create mode 100644 pkg/query-service/app/inframetrics/clusters.go create mode 100644 pkg/query-service/app/inframetrics/namespaces.go create mode 100644 pkg/query-service/app/inframetrics/nodes.go create mode 100644 pkg/query-service/app/inframetrics/nodes_query.go diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 51198f65fa..0adc8a735f 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -112,9 +112,12 @@ type APIHandler struct { UseLogsNewSchema bool - hostsRepo *inframetrics.HostsRepo - processesRepo *inframetrics.ProcessesRepo - podsRepo *inframetrics.PodsRepo + hostsRepo *inframetrics.HostsRepo + processesRepo *inframetrics.ProcessesRepo + podsRepo *inframetrics.PodsRepo + nodesRepo *inframetrics.NodesRepo + namespacesRepo *inframetrics.NamespacesRepo + clustersRepo *inframetrics.ClustersRepo } type APIHandlerOpts struct { @@ -187,6 +190,9 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { hostsRepo := inframetrics.NewHostsRepo(opts.Reader, querierv2) processesRepo := inframetrics.NewProcessesRepo(opts.Reader, querierv2) podsRepo := inframetrics.NewPodsRepo(opts.Reader, querierv2) + nodesRepo := inframetrics.NewNodesRepo(opts.Reader, querierv2) + namespacesRepo := inframetrics.NewNamespacesRepo(opts.Reader, querierv2) + clustersRepo := inframetrics.NewClustersRepo(opts.Reader, querierv2) aH := &APIHandler{ reader: opts.Reader, @@ -208,6 +214,9 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { hostsRepo: hostsRepo, processesRepo: processesRepo, podsRepo: podsRepo, + nodesRepo: nodesRepo, + namespacesRepo: namespacesRepo, + clustersRepo: clustersRepo, } logsQueryBuilder := logsv3.PrepareLogsQuery @@ -371,6 +380,21 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid podsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getPodAttributeKeys)).Methods(http.MethodGet) podsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPodAttributeValues)).Methods(http.MethodGet) podsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPodList)).Methods(http.MethodPost) + + nodesSubRouter := router.PathPrefix("/api/v1/nodes").Subrouter() + nodesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNodeAttributeKeys)).Methods(http.MethodGet) + nodesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNodeAttributeValues)).Methods(http.MethodGet) + nodesSubRouter.HandleFunc("/list", am.ViewAccess(aH.getNodeList)).Methods(http.MethodPost) + + namespacesSubRouter := router.PathPrefix("/api/v1/namespaces").Subrouter() + namespacesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNamespaceAttributeKeys)).Methods(http.MethodGet) + namespacesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNamespaceAttributeValues)).Methods(http.MethodGet) + namespacesSubRouter.HandleFunc("/list", am.ViewAccess(aH.getNamespaceList)).Methods(http.MethodPost) + + clustersSubRouter := router.PathPrefix("/api/v1/clusters").Subrouter() + clustersSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getClusterAttributeKeys)).Methods(http.MethodGet) + clustersSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getClusterAttributeValues)).Methods(http.MethodGet) + clustersSubRouter.HandleFunc("/list", am.ViewAccess(aH.getClusterList)).Methods(http.MethodPost) } func (aH *APIHandler) RegisterWebSocketPaths(router *mux.Router, am *AuthMiddleware) { diff --git a/pkg/query-service/app/infra.go b/pkg/query-service/app/infra.go index 413c1f0810..73d10bdddb 100644 --- a/pkg/query-service/app/infra.go +++ b/pkg/query-service/app/infra.go @@ -175,3 +175,162 @@ func (aH *APIHandler) getPodList(w http.ResponseWriter, r *http.Request) { aH.Respond(w, podList) } + +func (aH *APIHandler) getNodeAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.nodesRepo.GetNodeAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getNodeAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.nodesRepo.GetNodeAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getNodeList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.NodeListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + nodeList, err := aH.nodesRepo.GetNodeList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, nodeList) +} + +func (aH *APIHandler) getNamespaceAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.namespacesRepo.GetNamespaceAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getNamespaceAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.namespacesRepo.GetNamespaceAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getNamespaceList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.NamespaceListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + namespaceList, err := aH.namespacesRepo.GetNamespaceList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, namespaceList) +} + +func (aH *APIHandler) getClusterAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.clustersRepo.GetClusterAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getClusterAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.clustersRepo.GetClusterAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.ClusterListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + clusterList, err := aH.clustersRepo.GetClusterList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, clusterList) +} diff --git a/pkg/query-service/app/inframetrics/clusters.go b/pkg/query-service/app/inframetrics/clusters.go new file mode 100644 index 0000000000..70ef5ca95a --- /dev/null +++ b/pkg/query-service/app/inframetrics/clusters.go @@ -0,0 +1,342 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForClusters = "k8s_node_cpu_utilization" + + clusterAttrsToEnrich = []string{"k8s_cluster_name"} + + k8sClusterUIDAttrKey = "k8s_cluster_uid" + + queryNamesForClusters = map[string][]string{ + "cpu": {"A"}, + "cpu_allocatable": {"B"}, + "memory": {"C"}, + "memory_allocatable": {"D"}, + } + clusterQueryNames = []string{"A", "B", "C", "D"} +) + +type ClustersRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewClustersRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *ClustersRepo { + return &ClustersRepo{reader: reader, querierV2: querierV2} +} + +func (n *ClustersRepo) GetClusterAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForClusters + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := n.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + return attributeKeysResponse, nil +} + +func (n *ClustersRepo) GetClusterAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForClusters + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := n.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (p *ClustersRepo) getMetadataAttributes(ctx context.Context, req model.ClusterListRequest) (map[string]map[string]string, error) { + clusterAttrs := map[string]map[string]string{} + + for _, key := range clusterAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForClusters, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + clusterUID := stringData[k8sClusterUIDAttrKey] + if _, ok := clusterAttrs[clusterUID]; !ok { + clusterAttrs[clusterUID] = map[string]string{} + } + + for _, key := range req.GroupBy { + clusterAttrs[clusterUID][key.Key] = stringData[key.Key] + } + } + + return clusterAttrs, nil +} + +func (p *ClustersRepo) getTopClusterGroups(ctx context.Context, req model.ClusterListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopClusters(req) + + queryNames := queryNamesForClusters[req.OrderBy.ColumnName] + topClusterGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topClusterGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topClusterGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topClusterGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + max := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopClusterGroupsSeries := formattedResponse[0].Series[req.Offset:int(max)] + + topClusterGroups := []map[string]string{} + for _, series := range paginatedTopClusterGroupsSeries { + topClusterGroups = append(topClusterGroups, series.Labels) + } + allClusterGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allClusterGroups = append(allClusterGroups, series.Labels) + } + + return topClusterGroups, allClusterGroups, nil +} + +func (p *ClustersRepo) GetClusterList(ctx context.Context, req model.ClusterListRequest) (model.ClusterListResponse, error) { + resp := model.ClusterListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sClusterUIDAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := NodesTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + } + + clusterAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topClusterGroups, allClusterGroups, err := p.getTopClusterGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topClusterGroup := range topClusterGroups { + for k, v := range topClusterGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.ClusterListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.ClusterListRecord{ + CPUUsage: -1, + CPUAllocatable: -1, + MemoryUsage: -1, + MemoryAllocatable: -1, + } + + if clusterUID, ok := row.Data[k8sClusterUIDAttrKey].(string); ok { + record.ClusterUID = clusterUID + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + + if cpuAllocatable, ok := row.Data["B"].(float64); ok { + record.CPUAllocatable = cpuAllocatable + } + + if mem, ok := row.Data["C"].(float64); ok { + record.MemoryUsage = mem + } + + if memoryAllocatable, ok := row.Data["D"].(float64); ok { + record.MemoryAllocatable = memoryAllocatable + } + + record.Meta = map[string]string{} + if _, ok := clusterAttrs[record.ClusterUID]; ok { + record.Meta = clusterAttrs[record.ClusterUID] + } + + for k, v := range row.Data { + if slices.Contains(clusterQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allClusterGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/common.go b/pkg/query-service/app/inframetrics/common.go index da9b047462..6f83a6d46c 100644 --- a/pkg/query-service/app/inframetrics/common.go +++ b/pkg/query-service/app/inframetrics/common.go @@ -57,6 +57,18 @@ func getParamsForTopPods(req model.PodListRequest) (int64, string, string) { return getParamsForTopItems(req.Start, req.End) } +func getParamsForTopNodes(req model.NodeListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopNamespaces(req model.NamespaceListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopClusters(req model.ClusterListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + // TODO(srikanthccv): remove this // What is happening here? // The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint diff --git a/pkg/query-service/app/inframetrics/namespaces.go b/pkg/query-service/app/inframetrics/namespaces.go new file mode 100644 index 0000000000..3e336672c5 --- /dev/null +++ b/pkg/query-service/app/inframetrics/namespaces.go @@ -0,0 +1,329 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForNamespaces = "k8s_pod_cpu_utilization" + + namespaceAttrsToEnrich = []string{ + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForNamespaces = map[string][]string{ + "cpu": {"A"}, + "memory": {"D"}, + } + namespaceQueryNames = []string{"A", "D"} + + attributesKeysForNamespaces = []v3.AttributeKey{ + {Key: "k8s_namespace_name"}, + {Key: "k8s_cluster_name"}, + } + + k8sNamespaceNameAttrKey = "k8s_namespace_name" +) + +type NamespacesRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewNamespacesRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *NamespacesRepo { + return &NamespacesRepo{reader: reader, querierV2: querierV2} +} + +func (p *NamespacesRepo) GetNamespaceAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + return &v3.FilterAttributeKeyResponse{AttributeKeys: attributesKeysForNamespaces}, nil +} + +func (p *NamespacesRepo) GetNamespaceAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNamespaces + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + return attributeValuesResponse, nil +} + +func (p *NamespacesRepo) getMetadataAttributes(ctx context.Context, req model.NamespaceListRequest) (map[string]map[string]string, error) { + namespaceAttrs := map[string]map[string]string{} + + for _, key := range namespaceAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForNamespaces, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + namespaceName := stringData[k8sNamespaceNameAttrKey] + if _, ok := namespaceAttrs[namespaceName]; !ok { + namespaceAttrs[namespaceName] = map[string]string{} + } + + for _, key := range req.GroupBy { + namespaceAttrs[namespaceName][key.Key] = stringData[key.Key] + } + } + + return namespaceAttrs, nil +} + +func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.NamespaceListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopNamespaces(req) + + queryNames := queryNamesForNamespaces[req.OrderBy.ColumnName] + topNamespaceGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topNamespaceGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topNamespaceGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topNamespaceGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + paginatedTopNamespaceGroupsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit] + + topNamespaceGroups := []map[string]string{} + for _, series := range paginatedTopNamespaceGroupsSeries { + topNamespaceGroups = append(topNamespaceGroups, series.Labels) + } + allNamespaceGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allNamespaceGroups = append(allNamespaceGroups, series.Labels) + } + + return topNamespaceGroups, allNamespaceGroups, nil +} + +func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.NamespaceListRequest) (model.NamespaceListResponse, error) { + resp := model.NamespaceListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sNamespaceNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := PodsTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, q := range query.CompositeQuery.BuilderQueries { + + if !slices.Contains(namespaceQueryNames, q.QueryName) { + delete(query.CompositeQuery.BuilderQueries, q.QueryName) + } + + q.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if q.Filters == nil { + q.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + q.Filters.Items = append(q.Filters.Items, req.Filters.Items...) + } + q.GroupBy = req.GroupBy + } + + namespaceAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topNamespaceGroups, allNamespaceGroups, err := p.getTopNamespaceGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topNamespaceGroup := range topNamespaceGroups { + for k, v := range topNamespaceGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.NamespaceListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.NamespaceListRecord{ + CPUUsage: -1, + MemoryUsage: -1, + } + + if name, ok := row.Data[k8sNamespaceNameAttrKey].(string); ok { + record.NamespaceName = name + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + record.Meta = map[string]string{} + if _, ok := namespaceAttrs[record.NamespaceName]; ok { + record.Meta = namespaceAttrs[record.NamespaceName] + } + + for k, v := range row.Data { + if slices.Contains(namespaceQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allNamespaceGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/nodes.go b/pkg/query-service/app/inframetrics/nodes.go new file mode 100644 index 0000000000..796d6acb73 --- /dev/null +++ b/pkg/query-service/app/inframetrics/nodes.go @@ -0,0 +1,349 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForNodes = "k8s_node_cpu_utilization" + + nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid"} + + k8sNodeUIDAttrKey = "k8s_node_uid" + + queryNamesForNodes = map[string][]string{ + "cpu": {"A"}, + "cpu_allocatable": {"B"}, + "memory": {"C"}, + "memory_allocatable": {"D"}, + } + nodeQueryNames = []string{"A", "B", "C", "D"} + + metricNamesForNodes = map[string]string{ + "cpu": "k8s_node_cpu_utilization", + "cpu_allocatable": "k8s_node_allocatable_cpu", + "memory": "k8s_node_memory_usage", + "memory_allocatable": "k8s_node_allocatable_memory", + } +) + +type NodesRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewNodesRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *NodesRepo { + return &NodesRepo{reader: reader, querierV2: querierV2} +} + +func (n *NodesRepo) GetNodeAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNodes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := n.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + return attributeKeysResponse, nil +} + +func (n *NodesRepo) GetNodeAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForNodes + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := n.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeListRequest) (map[string]map[string]string, error) { + nodeAttrs := map[string]map[string]string{} + + for _, key := range nodeAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForNodes, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := p.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + nodeUID := stringData[k8sNodeUIDAttrKey] + if _, ok := nodeAttrs[nodeUID]; !ok { + nodeAttrs[nodeUID] = map[string]string{} + } + + for _, key := range req.GroupBy { + nodeAttrs[nodeUID][key.Key] = stringData[key.Key] + } + } + + return nodeAttrs, nil +} + +func (p *NodesRepo) getTopNodeGroups(ctx context.Context, req model.NodeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopNodes(req) + + queryNames := queryNamesForNodes[req.OrderBy.ColumnName] + topNodeGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topNodeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, topNodeGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topNodeGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + max := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopNodeGroupsSeries := formattedResponse[0].Series[req.Offset:int(max)] + + topNodeGroups := []map[string]string{} + for _, series := range paginatedTopNodeGroupsSeries { + topNodeGroups = append(topNodeGroups, series.Labels) + } + allNodeGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allNodeGroups = append(allNodeGroups, series.Labels) + } + + return topNodeGroups, allNodeGroups, nil +} + +func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest) (model.NodeListResponse, error) { + resp := model.NodeListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sNodeUIDAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := NodesTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + } + + nodeAttrs, err := p.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topNodeGroups, allNodeGroups, err := p.getTopNodeGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topNodeGroup := range topNodeGroups { + for k, v := range topNodeGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := p.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.NodeListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.NodeListRecord{ + NodeCPUUsage: -1, + NodeCPUAllocatable: -1, + NodeMemoryUsage: -1, + NodeMemoryAllocatable: -1, + } + + if nodeUID, ok := row.Data[k8sNodeUIDAttrKey].(string); ok { + record.NodeUID = nodeUID + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.NodeCPUUsage = cpu + } + + if cpuAllocatable, ok := row.Data["B"].(float64); ok { + record.NodeCPUAllocatable = cpuAllocatable + } + + if mem, ok := row.Data["C"].(float64); ok { + record.NodeMemoryUsage = mem + } + + if memory, ok := row.Data["D"].(float64); ok { + record.NodeMemoryAllocatable = memory + } + + record.Meta = map[string]string{} + if _, ok := nodeAttrs[record.NodeUID]; ok { + record.Meta = nodeAttrs[record.NodeUID] + } + + for k, v := range row.Data { + if slices.Contains(nodeQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allNodeGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/nodes_query.go b/pkg/query-service/app/inframetrics/nodes_query.go new file mode 100644 index 0000000000..36503e4bd9 --- /dev/null +++ b/pkg/query-service/app/inframetrics/nodes_query.go @@ -0,0 +1,118 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var NodesTableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + // node cpu utilization + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["cpu"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node cpu allocatable + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["cpu_allocatable"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node memory utilization + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["memory"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // node memory allocatable + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForNodes["memory_allocatable"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{ + { + Key: k8sNodeUIDAttrKey, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/app/inframetrics/pods.go b/pkg/query-service/app/inframetrics/pods.go index fc2ee624dc..2bf101f746 100644 --- a/pkg/query-service/app/inframetrics/pods.go +++ b/pkg/query-service/app/inframetrics/pods.go @@ -2,6 +2,7 @@ package inframetrics import ( "context" + "math" "sort" "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" @@ -248,7 +249,7 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo resp.Type = model.ResponseTypeGroupedList } - step := common.MinAllowedStepInterval(req.Start, req.End) + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) query := PodsTableListQuery.Clone() diff --git a/pkg/query-service/model/infra.go b/pkg/query-service/model/infra.go index dafda38487..6832113b0c 100644 --- a/pkg/query-service/model/infra.go +++ b/pkg/query-service/model/infra.go @@ -126,3 +126,76 @@ type PodCountByPhase struct { Failed int `json:"failed"` Unknown int `json:"unknown"` } + +type NodeListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type NodeListResponse struct { + Type ResponseType `json:"type"` + Records []NodeListRecord `json:"records"` + Total int `json:"total"` +} + +type NodeListRecord struct { + NodeUID string `json:"nodeUID,omitempty"` + NodeCPUUsage float64 `json:"nodeCPUUsage"` + NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"` + NodeMemoryUsage float64 `json:"nodeMemoryUsage"` + NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"` + Meta map[string]string `json:"meta"` +} + +type NamespaceListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type NamespaceListResponse struct { + Type ResponseType `json:"type"` + Records []NamespaceListRecord `json:"records"` + Total int `json:"total"` +} + +type NamespaceListRecord struct { + NamespaceName string `json:"namespaceName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + Meta map[string]string `json:"meta"` +} + +type ClusterListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type ClusterListResponse struct { + Type ResponseType `json:"type"` + Records []ClusterListRecord `json:"records"` + Total int `json:"total"` +} + +type ClusterListRecord struct { + ClusterUID string `json:"clusterUID"` + CPUUsage float64 `json:"cpuUsage"` + CPUAllocatable float64 `json:"cpuAllocatable"` + MemoryUsage float64 `json:"memoryUsage"` + MemoryAllocatable float64 `json:"memoryAllocatable"` + Meta map[string]string `json:"meta"` +}