diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..028b1e410b
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,6 @@
+.git
+.github
+.vscode
+README.md
+deploy
+sample-apps
\ No newline at end of file
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 8f346bf882..a7bcbd1ad2 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -32,7 +32,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v2
- - name: Build query-service image
+ - name: Build query-service image
shell: bash
run: |
make build-query-service-amd64
+
+ build-ee-query-service:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - name: Build EE query-service image
+ shell: bash
+ run: |
+ make build-ee-query-service-amd64
diff --git a/.github/workflows/e2e-k3s.yaml b/.github/workflows/e2e-k3s.yaml
index f25b8afdbe..6b14a9c975 100644
--- a/.github/workflows/e2e-k3s.yaml
+++ b/.github/workflows/e2e-k3s.yaml
@@ -16,7 +16,7 @@ jobs:
uses: actions/checkout@v2
- name: Build query-service image
- run: make build-query-service-amd64
+ run: make build-ee-query-service-amd64
- name: Build frontend image
run: make build-frontend-amd64
diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml
index 2f39f28af1..b497db5001 100644
--- a/.github/workflows/push.yaml
+++ b/.github/workflows/push.yaml
@@ -11,6 +11,41 @@ on:
jobs:
image-build-and-push-query-service:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v1
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+ with:
+ version: latest
+ - name: Login to DockerHub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+ - uses: benjlevesque/short-sha@v1.2
+ id: short-sha
+ - name: Get branch name
+ id: branch-name
+ uses: tj-actions/branch-names@v5.1
+ - name: Set docker tag environment
+ run: |
+ if [ '${{ steps.branch-name.outputs.is_tag }}' == 'true' ]; then
+ tag="${{ steps.branch-name.outputs.tag }}"
+ tag="${tag:1}"
+ echo "DOCKER_TAG=${tag}-oss" >> $GITHUB_ENV
+ elif [ '${{ steps.branch-name.outputs.current_branch }}' == 'main' ]; then
+ echo "DOCKER_TAG=latest-oss" >> $GITHUB_ENV
+ else
+ echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}-oss" >> $GITHUB_ENV
+ fi
+ - name: Build and push docker image
+ run: make build-push-query-service
+
+ image-build-and-push-ee-query-service:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -43,7 +78,7 @@ jobs:
echo "DOCKER_TAG=${{ steps.branch-name.outputs.current_branch }}" >> $GITHUB_ENV
fi
- name: Build and push docker image
- run: make build-push-query-service
+ run: make build-push-ee-query-service
image-build-and-push-frontend:
runs-on: ubuntu-latest
diff --git a/.gitignore b/.gitignore
index f584e2c656..9e422ac336 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+
node_modules
yarn.lock
package.json
@@ -43,8 +44,12 @@ pkg/query-service/signoz.db
pkg/query-service/tests/test-deploy/data/
+ee/query-service/signoz.db
+
+ee/query-service/tests/test-deploy/data/
# local data
-
+*.db
/deploy/docker/clickhouse-setup/data/
/deploy/docker-swarm/clickhouse-setup/data/
+bin/
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 686bcdba58..5fd438eda0 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -207,7 +207,7 @@ If you don't want to install the SigNoz backend just for doing frontend developm
Please ping us in the [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) channel or ask `@Prashant Shahi` in our [Slack Community](https://signoz.io/slack) and we will DM you with `
-
@@ -15,10 +14,10 @@
Dokumentation •
- ReadMe auf Chinesisch •
- ReadMe auf Portugiesisch •
+ ReadMe auf Chinesisch •
+ ReadMe auf Portugiesisch •
Slack Community •
- Twitter
+ Twitter
##
diff --git a/README.md b/README.md
index 094415aadb..6b45706c44 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,6 @@
-
@@ -15,9 +14,9 @@
Documentation •
- ReadMe in Chinese •
- ReadMe in German •
- ReadMe in Portuguese •
+ ReadMe in Chinese •
+ ReadMe in German •
+ ReadMe in Portuguese •
Slack Community •
Twitter
@@ -117,7 +116,7 @@ Our goal is to provide an integrated UI between metrics & traces - similar to wh
### SigNoz vs Jaeger
-Jaeger only does distributed tracing. SigNoz does both metrics and traces, and we also have log management in our roadmap.
+Jaeger only does distributed tracing. SigNoz supports metrics, traces and logs - all the 3 pillars of observability.
Moreover, SigNoz has few more advanced features wrt Jaeger:
@@ -146,7 +145,6 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
-- [Pranshu Chittora](https://github.com/pranshuchittora)
#### DevOps
diff --git a/README.pt-br.md b/README.pt-br.md
index e8113cd7ca..ce168b4101 100644
--- a/README.pt-br.md
+++ b/README.pt-br.md
@@ -5,7 +5,6 @@
-
diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
index 88b022d34a..5bc37de791 100644
--- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
+++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml
@@ -40,7 +40,7 @@ services:
condition: on-failure
query-service:
- image: signoz/query-service:0.11.1
+ image: signoz/query-service:0.11.2
command: ["-config=/root/config/prometheus.yml"]
# ports:
# - "6060:6060" # pprof port
@@ -52,12 +52,12 @@ services:
environment:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
-
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
interval: 30s
@@ -70,7 +70,7 @@ services:
- clickhouse
frontend:
- image: signoz/frontend:0.11.1
+ image: signoz/frontend:0.11.2
deploy:
restart_policy:
condition: on-failure
@@ -83,7 +83,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
- image: signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -111,7 +111,7 @@ services:
- clickhouse
otel-collector-metrics:
- image: signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
diff --git a/deploy/docker-swarm/common/nginx-config.conf b/deploy/docker-swarm/common/nginx-config.conf
index d822e68c40..738805f89f 100644
--- a/deploy/docker-swarm/common/nginx-config.conf
+++ b/deploy/docker-swarm/common/nginx-config.conf
@@ -13,7 +13,7 @@ server {
# to handle uri issue 414 from nginx
client_max_body_size 24M;
- large_client_header_buffers 8 16k;
+ large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml
index da338d1dd7..a7d265d3f5 100644
--- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml
+++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml
@@ -41,7 +41,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: otel-collector
- image: signoz/signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-config.yaml"]
# user: root # required for reading docker container logs
volumes:
@@ -67,7 +67,7 @@ services:
otel-collector-metrics:
container_name: otel-collector-metrics
- image: signoz/signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
diff --git a/deploy/docker/clickhouse-setup/docker-compose-local.yaml b/deploy/docker/clickhouse-setup/docker-compose-local.yaml
index 1a17da83d1..d5747c8e9d 100644
--- a/deploy/docker/clickhouse-setup/docker-compose-local.yaml
+++ b/deploy/docker/clickhouse-setup/docker-compose-local.yaml
@@ -13,10 +13,11 @@ services:
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
volumes:
- ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards
diff --git a/deploy/docker/clickhouse-setup/docker-compose-prod.yaml b/deploy/docker/clickhouse-setup/docker-compose-prod.yaml
index f2a4d73269..2aa522026e 100644
--- a/deploy/docker/clickhouse-setup/docker-compose-prod.yaml
+++ b/deploy/docker/clickhouse-setup/docker-compose-prod.yaml
@@ -2,7 +2,7 @@ version: "2.4"
services:
query-service:
- image: signoz/query-service:0.11.1
+ image: signoz/query-service:0.11.2
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@@ -15,11 +15,12 @@ services:
environment:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
+ - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
restart: on-failure
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/version"]
@@ -31,7 +32,7 @@ services:
condition: service_healthy
frontend:
- image: signoz/frontend:0.11.1
+ image: signoz/frontend:0.11.2
container_name: frontend
restart: on-failure
depends_on:
diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml
index ae330e363f..964a835e26 100644
--- a/deploy/docker/clickhouse-setup/docker-compose.yaml
+++ b/deploy/docker/clickhouse-setup/docker-compose.yaml
@@ -39,7 +39,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
- image: signoz/query-service:0.11.1
+ image: signoz/query-service:0.11.2
container_name: query-service
command: ["-config=/root/config/prometheus.yml"]
# ports:
@@ -53,6 +53,7 @@ services:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
+ - DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
@@ -68,7 +69,7 @@ services:
condition: service_healthy
frontend:
- image: signoz/frontend:0.11.1
+ image: signoz/frontend:0.11.2
container_name: frontend
restart: on-failure
depends_on:
@@ -80,7 +81,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
- image: signoz/signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-config.yaml"]
user: root # required for reading docker container logs
volumes:
@@ -106,7 +107,7 @@ services:
condition: service_healthy
otel-collector-metrics:
- image: signoz/signoz-otel-collector:0.55.1
+ image: signoz/signoz-otel-collector:0.55.3
command: ["--config=/etc/otel-collector-metrics-config.yaml"]
volumes:
- ./otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
diff --git a/deploy/docker/common/nginx-config.conf b/deploy/docker/common/nginx-config.conf
index d822e68c40..738805f89f 100644
--- a/deploy/docker/common/nginx-config.conf
+++ b/deploy/docker/common/nginx-config.conf
@@ -13,7 +13,7 @@ server {
# to handle uri issue 414 from nginx
client_max_body_size 24M;
- large_client_header_buffers 8 16k;
+ large_client_header_buffers 8 128k;
location / {
if ( $uri = '/index.html' ) {
diff --git a/ee/LICENSE b/ee/LICENSE
new file mode 100644
index 0000000000..c024dbdaf4
--- /dev/null
+++ b/ee/LICENSE
@@ -0,0 +1,37 @@
+
+The SigNoz Enterprise license (the "Enterprise License")
+Copyright (c) 2020 - present SigNoz Inc.
+
+With regard to the SigNoz Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the SigNoz Subscription Terms of Service, available
+via email (hello@signoz.io) (the "Enterprise Terms"), or other
+agreement governing the use of the Software, as agreed by you and SigNoz,
+and otherwise have a valid SigNoz Enterprise license for the
+correct number of user seats. Subject to the foregoing sentence, you are free to
+modify this Software and publish patches to the Software. You agree that SigNoz
+and/or its licensors (as applicable) retain all right, title and interest in and
+to all such modifications and/or patches, and all such modifications and/or
+patches may only be used, copied, modified, displayed, distributed, or otherwise
+exploited with a valid SigNoz Enterprise license for the correct
+number of user seats. Notwithstanding the foregoing, you may copy and modify
+the Software for development and testing purposes, without requiring a
+subscription. You agree that SigNoz and/or its licensors (as applicable) retain
+all right, title and interest in and to all such modifications. You are not
+granted any other rights beyond what is expressly stated herein. Subject to the
+foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
+and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the SigNoz Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
\ No newline at end of file
diff --git a/ee/query-service/.dockerignore b/ee/query-service/.dockerignore
new file mode 100644
index 0000000000..9521c5060b
--- /dev/null
+++ b/ee/query-service/.dockerignore
@@ -0,0 +1,4 @@
+.vscode
+README.md
+signoz.db
+bin
\ No newline at end of file
diff --git a/ee/query-service/Dockerfile b/ee/query-service/Dockerfile
new file mode 100644
index 0000000000..7def5c0982
--- /dev/null
+++ b/ee/query-service/Dockerfile
@@ -0,0 +1,48 @@
+FROM golang:1.17-buster AS builder
+
+# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
+ARG LD_FLAGS
+ARG TARGETPLATFORM
+
+ENV CGO_ENABLED=1
+ENV GOPATH=/go
+
+RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
+ export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2)
+
+# Prepare and enter src directory
+WORKDIR /go/src/github.com/signoz/signoz
+
+# Add the sources and proceed with build
+ADD . .
+RUN cd ee/query-service \
+ && go build -tags timetzdata -a -o ./bin/query-service \
+ -ldflags "-linkmode external -extldflags '-static' -s -w $LD_FLAGS" \
+ && chmod +x ./bin/query-service
+
+
+# use a minimal alpine image
+FROM alpine:3.7
+
+# Add Maintainer Info
+LABEL maintainer="signoz"
+
+# add ca-certificates in case you need them
+RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
+
+# set working directory
+WORKDIR /root
+
+# copy the binary from builder
+COPY --from=builder /go/src/github.com/signoz/signoz/ee/query-service/bin/query-service .
+
+# copy prometheus YAML config
+COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
+
+# run the binary
+ENTRYPOINT ["./query-service"]
+
+CMD ["-config", "../config/prometheus.yml"]
+# CMD ["./query-service -config /root/config/prometheus.yml"]
+
+EXPOSE 8080
diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go
new file mode 100644
index 0000000000..a6497b615e
--- /dev/null
+++ b/ee/query-service/app/api/api.go
@@ -0,0 +1,124 @@
+package api
+
+import (
+ "net/http"
+
+ "github.com/gorilla/mux"
+ "go.signoz.io/signoz/ee/query-service/dao"
+ "go.signoz.io/signoz/ee/query-service/interfaces"
+ "go.signoz.io/signoz/ee/query-service/license"
+ baseapp "go.signoz.io/signoz/pkg/query-service/app"
+ baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
+ rules "go.signoz.io/signoz/pkg/query-service/rules"
+ "go.signoz.io/signoz/pkg/query-service/version"
+)
+
+type APIHandlerOptions struct {
+ DataConnector interfaces.DataConnector
+ AppDao dao.ModelDao
+ RulesManager *rules.Manager
+ FeatureFlags baseint.FeatureLookup
+ LicenseManager *license.Manager
+}
+
+type APIHandler struct {
+ opts APIHandlerOptions
+ baseapp.APIHandler
+}
+
+// NewAPIHandler returns an APIHandler
+func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) {
+
+ baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
+ Reader: opts.DataConnector,
+ AppDao: opts.AppDao,
+ RuleManager: opts.RulesManager,
+ FeatureFlags: opts.FeatureFlags})
+
+ if err != nil {
+ return nil, err
+ }
+
+ ah := &APIHandler{
+ opts: opts,
+ APIHandler: *baseHandler,
+ }
+ return ah, nil
+}
+
+func (ah *APIHandler) FF() baseint.FeatureLookup {
+ return ah.opts.FeatureFlags
+}
+
+func (ah *APIHandler) RM() *rules.Manager {
+ return ah.opts.RulesManager
+}
+
+func (ah *APIHandler) LM() *license.Manager {
+ return ah.opts.LicenseManager
+}
+
+func (ah *APIHandler) AppDao() dao.ModelDao {
+ return ah.opts.AppDao
+}
+
+func (ah *APIHandler) CheckFeature(f string) bool {
+ err := ah.FF().CheckFeature(f)
+ return err == nil
+}
+
+// RegisterRoutes registers routes for this handler on the given router
+func (ah *APIHandler) RegisterRoutes(router *mux.Router) {
+ // note: add ee override methods first
+
+ // routes available only in ee version
+ router.HandleFunc("/api/v1/licenses",
+ baseapp.AdminAccess(ah.listLicenses)).
+ Methods(http.MethodGet)
+
+ router.HandleFunc("/api/v1/licenses",
+ baseapp.AdminAccess(ah.applyLicense)).
+ Methods(http.MethodPost)
+
+ router.HandleFunc("/api/v1/featureFlags",
+ baseapp.OpenAccess(ah.getFeatureFlags)).
+ Methods(http.MethodGet)
+
+ router.HandleFunc("/api/v1/loginPrecheck",
+ baseapp.OpenAccess(ah.precheckLogin)).
+ Methods(http.MethodGet)
+
+ // paid plans specific routes
+ router.HandleFunc("/api/v1/complete/saml",
+ baseapp.OpenAccess(ah.receiveSAML)).
+ Methods(http.MethodPost)
+
+ router.HandleFunc("/api/v1/orgs/{orgId}/domains",
+ baseapp.AdminAccess(ah.listDomainsByOrg)).
+ Methods(http.MethodGet)
+
+ router.HandleFunc("/api/v1/domains",
+ baseapp.AdminAccess(ah.postDomain)).
+ Methods(http.MethodPost)
+
+ router.HandleFunc("/api/v1/domains/{id}",
+ baseapp.AdminAccess(ah.putDomain)).
+ Methods(http.MethodPut)
+
+ router.HandleFunc("/api/v1/domains/{id}",
+ baseapp.AdminAccess(ah.deleteDomain)).
+ Methods(http.MethodDelete)
+
+ // base overrides
+ router.HandleFunc("/api/v1/version", baseapp.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
+ router.HandleFunc("/api/v1/invite/{token}", baseapp.OpenAccess(ah.getInvite)).Methods(http.MethodGet)
+ router.HandleFunc("/api/v1/register", baseapp.OpenAccess(ah.registerUser)).Methods(http.MethodPost)
+ router.HandleFunc("/api/v1/login", baseapp.OpenAccess(ah.loginUser)).Methods(http.MethodPost)
+ ah.APIHandler.RegisterRoutes(router)
+
+}
+
+func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
+ version := version.GetVersion()
+ ah.WriteJSON(w, r, map[string]string{"version": version, "ee": "Y"})
+}
diff --git a/ee/query-service/app/api/auth.go b/ee/query-service/app/api/auth.go
new file mode 100644
index 0000000000..0c99edfc36
--- /dev/null
+++ b/ee/query-service/app/api/auth.go
@@ -0,0 +1,297 @@
+package api
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/gorilla/mux"
+ "go.signoz.io/signoz/ee/query-service/constants"
+ "go.signoz.io/signoz/ee/query-service/model"
+ "go.signoz.io/signoz/pkg/query-service/auth"
+ baseauth "go.signoz.io/signoz/pkg/query-service/auth"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+ "go.uber.org/zap"
+)
+
+func parseRequest(r *http.Request, req interface{}) error {
+ defer r.Body.Close()
+ requestBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(requestBody, &req)
+ return err
+}
+
+// loginUser overrides base handler and considers SSO case.
+func (ah *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) {
+
+ req := basemodel.LoginRequest{}
+ err := parseRequest(r, &req)
+ if err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ ctx := context.Background()
+
+ if req.Email != "" && ah.CheckFeature(model.SSO) {
+ var apierr basemodel.BaseApiError
+ _, apierr = ah.AppDao().CanUsePassword(ctx, req.Email)
+ if apierr != nil && !apierr.IsNil() {
+ RespondError(w, apierr, nil)
+ }
+ }
+
+ // if all looks good, call auth
+ resp, err := auth.Login(ctx, &req)
+ if ah.HandleError(w, err, http.StatusUnauthorized) {
+ return
+ }
+
+ ah.WriteJSON(w, r, resp)
+}
+
+// registerUser registers a user and responds with a precheck
+// so the front-end can decide the login method
+func (ah *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
+
+ if !ah.CheckFeature(model.SSO) {
+ ah.APIHandler.Register(w, r)
+ return
+ }
+
+ ctx := context.Background()
+ var req *baseauth.RegisterRequest
+
+ defer r.Body.Close()
+ requestBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ zap.S().Errorf("received no input in api\n", err)
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ err = json.Unmarshal(requestBody, &req)
+
+ if err != nil {
+ zap.S().Errorf("received invalid user registration request", zap.Error(err))
+ RespondError(w, model.BadRequest(fmt.Errorf("failed to register user")), nil)
+ return
+ }
+
+ // get invite object
+ invite, err := baseauth.ValidateInvite(ctx, req)
+ if err != nil || invite == nil {
+ zap.S().Errorf("failed to validate invite token", err)
+ RespondError(w, model.BadRequest(basemodel.ErrSignupFailed{}), nil)
+ }
+
+ // get auth domain from email domain
+ domain, apierr := ah.AppDao().GetDomainByEmail(ctx, invite.Email)
+ if apierr != nil {
+ zap.S().Errorf("failed to get domain from email", apierr)
+ RespondError(w, model.InternalError(basemodel.ErrSignupFailed{}), nil)
+ }
+
+ precheckResp := &model.PrecheckResponse{
+ SSO: false,
+ IsUser: false,
+ }
+
+ if domain != nil && domain.SsoEnabled {
+ // so is enabled, create user and respond precheck data
+ user, apierr := baseauth.RegisterInvitedUser(ctx, req, true)
+ if apierr != nil {
+ RespondError(w, apierr, nil)
+ return
+ }
+
+ var precheckError basemodel.BaseApiError
+
+ precheckResp, precheckError = ah.AppDao().PrecheckLogin(ctx, user.Email, req.SourceUrl)
+ if precheckError != nil {
+ RespondError(w, precheckError, precheckResp)
+ }
+
+ } else {
+ // no-sso, validate password
+ if err := auth.ValidatePassword(req.Password); err != nil {
+ RespondError(w, model.InternalError(fmt.Errorf("password is not in a valid format")), nil)
+ return
+ }
+
+ _, registerError := baseauth.Register(ctx, req)
+ if !registerError.IsNil() {
+ RespondError(w, apierr, nil)
+ return
+ }
+
+ precheckResp.IsUser = true
+ }
+
+ ah.Respond(w, precheckResp)
+}
+
+// getInvite returns the invite object details for the given invite token. We do not need to
+// protect this API because invite token itself is meant to be private.
+func (ah *APIHandler) getInvite(w http.ResponseWriter, r *http.Request) {
+ token := mux.Vars(r)["token"]
+ sourceUrl := r.URL.Query().Get("ref")
+ ctx := context.Background()
+
+ inviteObject, err := baseauth.GetInvite(context.Background(), token)
+ if err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ resp := model.GettableInvitation{
+ InvitationResponseObject: inviteObject,
+ }
+
+ precheck, apierr := ah.AppDao().PrecheckLogin(ctx, inviteObject.Email, sourceUrl)
+ resp.Precheck = precheck
+
+ if apierr != nil {
+ RespondError(w, apierr, resp)
+ }
+
+ ah.WriteJSON(w, r, resp)
+}
+
+// PrecheckLogin enables browser login page to display appropriate
+// login methods
+func (ah *APIHandler) precheckLogin(w http.ResponseWriter, r *http.Request) {
+ ctx := context.Background()
+
+ email := r.URL.Query().Get("email")
+ sourceUrl := r.URL.Query().Get("ref")
+
+ resp, apierr := ah.AppDao().PrecheckLogin(ctx, email, sourceUrl)
+ if apierr != nil {
+ RespondError(w, apierr, resp)
+ }
+
+ ah.Respond(w, resp)
+}
+
+func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
+ // this is the source url that initiated the login request
+ redirectUri := constants.GetDefaultSiteURL()
+ ctx := context.Background()
+
+ var apierr basemodel.BaseApiError
+
+ redirectOnError := func() {
+ ssoError := []byte("Login failed. Please contact your system administrator")
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
+ base64.StdEncoding.Encode(dst, ssoError)
+
+ http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, string(dst)), http.StatusMovedPermanently)
+ }
+
+ if !ah.CheckFeature(model.SSO) {
+ zap.S().Errorf("[ReceiveSAML] sso requested but feature unavailable %s in org domain %s", model.SSO)
+ http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
+ return
+ }
+
+ err := r.ParseForm()
+ if err != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
+ redirectOnError()
+ return
+ }
+
+ // the relay state is sent when a login request is submitted to
+ // Idp.
+ relayState := r.FormValue("RelayState")
+ zap.S().Debug("[ReceiveML] relay state", zap.String("relayState", relayState))
+
+ parsedState, err := url.Parse(relayState)
+ if err != nil || relayState == "" {
+ zap.S().Errorf("[ReceiveSAML] failed to process response - invalid response from IDP", err, r)
+ redirectOnError()
+ return
+ }
+
+ // upgrade redirect url from the relay state for better accuracy
+ redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
+
+ // derive domain id from relay state now
+ var domainIdStr string
+ for k, v := range parsedState.Query() {
+ if k == "domainId" && len(v) > 0 {
+ domainIdStr = strings.Replace(v[0], ":", "-", -1)
+ }
+ }
+
+ domainId, err := uuid.Parse(domainIdStr)
+ if err != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to process request- failed to parse domain id ifrom relay", zap.Error(err))
+ redirectOnError()
+ return
+ }
+
+ domain, apierr := ah.AppDao().GetDomain(ctx, domainId)
+ if (apierr != nil) || domain == nil {
+ zap.S().Errorf("[ReceiveSAML] failed to process request- invalid domain", domainIdStr, zap.Error(apierr))
+ redirectOnError()
+ return
+ }
+
+ sp, err := domain.PrepareSamlRequest(parsedState)
+ if err != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to prepare saml request for domain (%s): %v", domainId, err)
+ redirectOnError()
+ return
+ }
+
+ assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
+ if err != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to retrieve assertion info from saml response for organization (%s): %v", domainId, err)
+ redirectOnError()
+ return
+ }
+
+ if assertionInfo.WarningInfo.InvalidTime {
+ zap.S().Errorf("[ReceiveSAML] expired saml response for organization (%s): %v", domainId, err)
+ redirectOnError()
+ return
+ }
+
+ email := assertionInfo.NameID
+
+ // user email found, now start preparing jwt response
+ userPayload, baseapierr := ah.AppDao().GetUserByEmail(ctx, email)
+ if baseapierr != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to find or register a new user for email %s and org %s", email, domainId, zap.Error(baseapierr.Err))
+ redirectOnError()
+ return
+ }
+
+ tokenStore, err := baseauth.GenerateJWTForUser(&userPayload.User)
+ if err != nil {
+ zap.S().Errorf("[ReceiveSAML] failed to generate access token for email %s and org %s", email, domainId, zap.Error(err))
+ redirectOnError()
+ return
+ }
+
+ userID := userPayload.User.Id
+ nextPage := fmt.Sprintf("%s?jwt=%s&usr=%s&refreshjwt=%s",
+ redirectUri,
+ tokenStore.AccessJwt,
+ userID,
+ tokenStore.RefreshJwt)
+
+ http.Redirect(w, r, nextPage, http.StatusMovedPermanently)
+}
diff --git a/ee/query-service/app/api/domains.go b/ee/query-service/app/api/domains.go
new file mode 100644
index 0000000000..6456928c75
--- /dev/null
+++ b/ee/query-service/app/api/domains.go
@@ -0,0 +1,90 @@
+package api
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/google/uuid"
+ "github.com/gorilla/mux"
+ "go.signoz.io/signoz/ee/query-service/model"
+)
+
+func (ah *APIHandler) listDomainsByOrg(w http.ResponseWriter, r *http.Request) {
+ orgId := mux.Vars(r)["orgId"]
+ domains, apierr := ah.AppDao().ListDomains(context.Background(), orgId)
+ if apierr != nil {
+ RespondError(w, apierr, domains)
+ return
+ }
+ ah.Respond(w, domains)
+}
+
+func (ah *APIHandler) postDomain(w http.ResponseWriter, r *http.Request) {
+ ctx := context.Background()
+
+ req := model.OrgDomain{}
+
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ if err := req.ValidNew(); err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ if apierr := ah.AppDao().CreateDomain(ctx, &req); apierr != nil {
+ RespondError(w, apierr, nil)
+ return
+ }
+
+ ah.Respond(w, &req)
+}
+
+func (ah *APIHandler) putDomain(w http.ResponseWriter, r *http.Request) {
+ ctx := context.Background()
+
+ domainIdStr := mux.Vars(r)["id"]
+ domainId, err := uuid.Parse(domainIdStr)
+ if err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ req := model.OrgDomain{Id: domainId}
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+ req.Id = domainId
+ if err := req.Valid(nil); err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ }
+
+ if apierr := ah.AppDao().UpdateDomain(ctx, &req); apierr != nil {
+ RespondError(w, apierr, nil)
+ return
+ }
+
+ ah.Respond(w, &req)
+}
+
+func (ah *APIHandler) deleteDomain(w http.ResponseWriter, r *http.Request) {
+ domainIdStr := mux.Vars(r)["id"]
+
+ domainId, err := uuid.Parse(domainIdStr)
+ if err != nil {
+ RespondError(w, model.BadRequest(fmt.Errorf("invalid domain id")), nil)
+ return
+ }
+
+ apierr := ah.AppDao().DeleteDomain(context.Background(), domainId)
+ if apierr != nil {
+ RespondError(w, apierr, nil)
+ return
+ }
+ ah.Respond(w, nil)
+}
diff --git a/ee/query-service/app/api/featureFlags.go b/ee/query-service/app/api/featureFlags.go
new file mode 100644
index 0000000000..9c979d17ba
--- /dev/null
+++ b/ee/query-service/app/api/featureFlags.go
@@ -0,0 +1,10 @@
+package api
+
+import (
+ "net/http"
+)
+
+func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
+ featureSet := ah.FF().GetFeatureFlags()
+ ah.Respond(w, featureSet)
+}
diff --git a/ee/query-service/app/api/license.go b/ee/query-service/app/api/license.go
new file mode 100644
index 0000000000..e5f5b0ca0a
--- /dev/null
+++ b/ee/query-service/app/api/license.go
@@ -0,0 +1,40 @@
+package api
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go.signoz.io/signoz/ee/query-service/model"
+ "net/http"
+)
+
+func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) {
+ licenses, apiError := ah.LM().GetLicenses(context.Background())
+ if apiError != nil {
+ RespondError(w, apiError, nil)
+ }
+ ah.Respond(w, licenses)
+}
+
+func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) {
+ ctx := context.Background()
+ var l model.License
+
+ if err := json.NewDecoder(r.Body).Decode(&l); err != nil {
+ RespondError(w, model.BadRequest(err), nil)
+ return
+ }
+
+ if l.Key == "" {
+ RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil)
+ return
+ }
+
+ license, apiError := ah.LM().Activate(ctx, l.Key)
+ if apiError != nil {
+ RespondError(w, apiError, nil)
+ return
+ }
+
+ ah.Respond(w, license)
+}
diff --git a/ee/query-service/app/api/response.go b/ee/query-service/app/api/response.go
new file mode 100644
index 0000000000..fef5f89798
--- /dev/null
+++ b/ee/query-service/app/api/response.go
@@ -0,0 +1,12 @@
+package api
+
+import (
+ "net/http"
+
+ baseapp "go.signoz.io/signoz/pkg/query-service/app"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+func RespondError(w http.ResponseWriter, apiErr basemodel.BaseApiError, data interface{}) {
+ baseapp.RespondError(w, apiErr, data)
+}
diff --git a/ee/query-service/app/db/reader.go b/ee/query-service/app/db/reader.go
new file mode 100644
index 0000000000..e948ee430b
--- /dev/null
+++ b/ee/query-service/app/db/reader.go
@@ -0,0 +1,28 @@
+package db
+
+import (
+ "github.com/ClickHouse/clickhouse-go/v2"
+
+ "github.com/jmoiron/sqlx"
+
+ basechr "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader"
+)
+
+type ClickhouseReader struct {
+ conn clickhouse.Conn
+ appdb *sqlx.DB
+ *basechr.ClickHouseReader
+}
+
+func NewDataConnector(localDB *sqlx.DB, promConfigPath string) *ClickhouseReader {
+ ch := basechr.NewReader(localDB, promConfigPath)
+ return &ClickhouseReader{
+ conn: ch.GetConn(),
+ appdb: localDB,
+ ClickHouseReader: ch,
+ }
+}
+
+func (r *ClickhouseReader) Start(readerReady chan bool) {
+ r.ClickHouseReader.Start(readerReady)
+}
diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go
new file mode 100644
index 0000000000..7002af3f41
--- /dev/null
+++ b/ee/query-service/app/server.go
@@ -0,0 +1,431 @@
+package app
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ _ "net/http/pprof" // http profiler
+ "os"
+ "time"
+
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+
+ "github.com/rs/cors"
+ "github.com/soheilhy/cmux"
+ "go.signoz.io/signoz/ee/query-service/app/api"
+ "go.signoz.io/signoz/ee/query-service/app/db"
+ "go.signoz.io/signoz/ee/query-service/dao"
+ "go.signoz.io/signoz/ee/query-service/interfaces"
+ licensepkg "go.signoz.io/signoz/ee/query-service/license"
+
+ "go.signoz.io/signoz/pkg/query-service/app/dashboards"
+ baseconst "go.signoz.io/signoz/pkg/query-service/constants"
+ "go.signoz.io/signoz/pkg/query-service/healthcheck"
+ basealm "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
+ baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
+ pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine"
+ rules "go.signoz.io/signoz/pkg/query-service/rules"
+ "go.signoz.io/signoz/pkg/query-service/telemetry"
+ "go.signoz.io/signoz/pkg/query-service/utils"
+ "go.uber.org/zap"
+)
+
+type ServerOptions struct {
+ PromConfigPath string
+ HTTPHostPort string
+ PrivateHostPort string
+ // alert specific params
+ DisableRules bool
+ RuleRepoURL string
+}
+
+// Server runs HTTP api service
+type Server struct {
+ serverOptions *ServerOptions
+ conn net.Listener
+ ruleManager *rules.Manager
+ separatePorts bool
+
+ // public http router
+ httpConn net.Listener
+ httpServer *http.Server
+
+ // private http
+ privateConn net.Listener
+ privateHTTP *http.Server
+
+ // feature flags
+ featureLookup baseint.FeatureLookup
+
+ unavailableChannel chan healthcheck.Status
+}
+
+// HealthCheckStatus returns health check status channel a client can subscribe to
+func (s Server) HealthCheckStatus() chan healthcheck.Status {
+ return s.unavailableChannel
+}
+
+// NewServer creates and initializes Server
+func NewServer(serverOptions *ServerOptions) (*Server, error) {
+
+ modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH)
+ if err != nil {
+ return nil, err
+ }
+
+ localDB, err := dashboards.InitDB(baseconst.RELATIONAL_DATASOURCE_PATH)
+
+ if err != nil {
+ return nil, err
+ }
+
+ localDB.SetMaxOpenConns(10)
+
+ // initiate license manager
+ lm, err := licensepkg.StartManager("sqlite", localDB)
+ if err != nil {
+ return nil, err
+ }
+
+ // set license manager as feature flag provider in dao
+ modelDao.SetFlagProvider(lm)
+ readerReady := make(chan bool)
+
+ var reader interfaces.DataConnector
+ storage := os.Getenv("STORAGE")
+ if storage == "clickhouse" {
+ zap.S().Info("Using ClickHouse as datastore ...")
+ qb := db.NewDataConnector(localDB, serverOptions.PromConfigPath)
+ go qb.Start(readerReady)
+ reader = qb
+ } else {
+ return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
+ }
+
+ <-readerReady
+ rm, err := makeRulesManager(serverOptions.PromConfigPath,
+ baseconst.GetAlertManagerApiPrefix(),
+ serverOptions.RuleRepoURL,
+ localDB,
+ reader,
+ serverOptions.DisableRules)
+
+ if err != nil {
+ return nil, err
+ }
+
+ telemetry.GetInstance().SetReader(reader)
+
+ apiOpts := api.APIHandlerOptions{
+ DataConnector: reader,
+ AppDao: modelDao,
+ RulesManager: rm,
+ FeatureFlags: lm,
+ LicenseManager: lm,
+ }
+
+ apiHandler, err := api.NewAPIHandler(apiOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Server{
+ // logger: logger,
+ // tracer: tracer,
+ ruleManager: rm,
+ serverOptions: serverOptions,
+ unavailableChannel: make(chan healthcheck.Status),
+ }
+
+ httpServer, err := s.createPublicServer(apiHandler)
+
+ if err != nil {
+ return nil, err
+ }
+
+ s.httpServer = httpServer
+
+ privateServer, err := s.createPrivateServer(apiHandler)
+ if err != nil {
+ return nil, err
+ }
+
+ s.privateHTTP = privateServer
+
+ return s, nil
+}
+
+func (s *Server) createPrivateServer(apiHandler *api.APIHandler) (*http.Server, error) {
+
+ r := mux.NewRouter()
+
+ r.Use(setTimeoutMiddleware)
+ r.Use(s.analyticsMiddleware)
+ r.Use(loggingMiddlewarePrivate)
+
+ apiHandler.RegisterPrivateRoutes(r)
+
+ c := cors.New(cors.Options{
+ //todo(amol): find out a way to add exact domain or
+ // ip here for alert manager
+ AllowedOrigins: []string{"*"},
+ AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH"},
+ AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
+ })
+
+ handler := c.Handler(r)
+ handler = handlers.CompressHandler(handler)
+
+ return &http.Server{
+ Handler: handler,
+ }, nil
+}
+
+func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, error) {
+
+ r := mux.NewRouter()
+
+ r.Use(setTimeoutMiddleware)
+ r.Use(s.analyticsMiddleware)
+ r.Use(loggingMiddleware)
+
+ apiHandler.RegisterRoutes(r)
+ apiHandler.RegisterMetricsRoutes(r)
+ apiHandler.RegisterLogsRoutes(r)
+
+ c := cors.New(cors.Options{
+ AllowedOrigins: []string{"*"},
+ AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
+ AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "cache-control"},
+ })
+
+ handler := c.Handler(r)
+
+ handler = handlers.CompressHandler(handler)
+
+ return &http.Server{
+ Handler: handler,
+ }, nil
+}
+
+// loggingMiddleware is used for logging public api calls
+func loggingMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ route := mux.CurrentRoute(r)
+ path, _ := route.GetPathTemplate()
+ startTime := time.Now()
+ next.ServeHTTP(w, r)
+ zap.S().Info(path, "\ttimeTaken: ", time.Now().Sub(startTime))
+ })
+}
+
+// loggingMiddlewarePrivate is used for logging private api calls
+// from internal services like alert manager
+func loggingMiddlewarePrivate(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ route := mux.CurrentRoute(r)
+ path, _ := route.GetPathTemplate()
+ startTime := time.Now()
+ next.ServeHTTP(w, r)
+ zap.S().Info(path, "\tprivatePort: true", "\ttimeTaken: ", time.Now().Sub(startTime))
+ })
+}
+
+type loggingResponseWriter struct {
+ http.ResponseWriter
+ statusCode int
+}
+
+func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
+ // WriteHeader(int) is not called if our response implicitly returns 200 OK, so
+ // we default to that status code.
+ return &loggingResponseWriter{w, http.StatusOK}
+}
+
+func (lrw *loggingResponseWriter) WriteHeader(code int) {
+ lrw.statusCode = code
+ lrw.ResponseWriter.WriteHeader(code)
+}
+
+// Flush implements the http.Flush interface.
+func (lrw *loggingResponseWriter) Flush() {
+ lrw.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (s *Server) analyticsMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ route := mux.CurrentRoute(r)
+ path, _ := route.GetPathTemplate()
+
+ lrw := NewLoggingResponseWriter(w)
+ next.ServeHTTP(lrw, r)
+
+ data := map[string]interface{}{"path": path, "statusCode": lrw.statusCode}
+
+ if _, ok := telemetry.IgnoredPaths()[path]; !ok {
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_PATH, data)
+ }
+
+ })
+}
+
+func setTimeoutMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ var cancel context.CancelFunc
+ // check if route is not excluded
+ url := r.URL.Path
+ if _, ok := baseconst.TimeoutExcludedRoutes[url]; !ok {
+ ctx, cancel = context.WithTimeout(r.Context(), baseconst.ContextTimeout*time.Second)
+ defer cancel()
+ }
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+ })
+}
+
+// initListeners initialises listeners of the server
+func (s *Server) initListeners() error {
+ // listen on public port
+ var err error
+ publicHostPort := s.serverOptions.HTTPHostPort
+ if publicHostPort == "" {
+ return fmt.Errorf("baseconst.HTTPHostPort is required")
+ }
+
+ s.httpConn, err = net.Listen("tcp", publicHostPort)
+ if err != nil {
+ return err
+ }
+
+ zap.S().Info(fmt.Sprintf("Query server started listening on %s...", s.serverOptions.HTTPHostPort))
+
+ // listen on private port to support internal services
+ privateHostPort := s.serverOptions.PrivateHostPort
+
+ if privateHostPort == "" {
+ return fmt.Errorf("baseconst.PrivateHostPort is required")
+ }
+
+ s.privateConn, err = net.Listen("tcp", privateHostPort)
+ if err != nil {
+ return err
+ }
+ zap.S().Info(fmt.Sprintf("Query server started listening on private port %s...", s.serverOptions.PrivateHostPort))
+
+ return nil
+}
+
+// Start listening on http and private http port concurrently
+func (s *Server) Start() error {
+
+ // initiate rule manager first
+ if !s.serverOptions.DisableRules {
+ s.ruleManager.Start()
+ } else {
+ zap.S().Info("msg: Rules disabled as rules.disable is set to TRUE")
+ }
+
+ err := s.initListeners()
+ if err != nil {
+ return err
+ }
+
+ var httpPort int
+ if port, err := utils.GetPort(s.httpConn.Addr()); err == nil {
+ httpPort = port
+ }
+
+ go func() {
+ zap.S().Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.serverOptions.HTTPHostPort))
+
+ switch err := s.httpServer.Serve(s.httpConn); err {
+ case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
+ // normal exit, nothing to do
+ default:
+ zap.S().Error("Could not start HTTP server", zap.Error(err))
+ }
+ s.unavailableChannel <- healthcheck.Unavailable
+ }()
+
+ go func() {
+ zap.S().Info("Starting pprof server", zap.String("addr", baseconst.DebugHttpPort))
+
+ err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
+ if err != nil {
+ zap.S().Error("Could not start pprof server", zap.Error(err))
+ }
+ }()
+
+ var privatePort int
+ if port, err := utils.GetPort(s.privateConn.Addr()); err == nil {
+ privatePort = port
+ }
+ fmt.Println("starting private http")
+ go func() {
+ zap.S().Info("Starting Private HTTP server", zap.Int("port", privatePort), zap.String("addr", s.serverOptions.PrivateHostPort))
+
+ switch err := s.privateHTTP.Serve(s.privateConn); err {
+ case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
+ // normal exit, nothing to do
+ zap.S().Info("private http server closed")
+ default:
+ zap.S().Error("Could not start private HTTP server", zap.Error(err))
+ }
+
+ s.unavailableChannel <- healthcheck.Unavailable
+
+ }()
+
+ return nil
+}
+
+func makeRulesManager(
+ promConfigPath,
+ alertManagerURL string,
+ ruleRepoURL string,
+ db *sqlx.DB,
+ ch baseint.Reader,
+ disableRules bool) (*rules.Manager, error) {
+
+ // create engine
+ pqle, err := pqle.FromConfigPath(promConfigPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create pql engine : %v", err)
+ }
+
+ // notifier opts
+ notifierOpts := basealm.NotifierOptions{
+ QueueCapacity: 10000,
+ Timeout: 1 * time.Second,
+ AlertManagerURLs: []string{alertManagerURL},
+ }
+
+ // create manager opts
+ managerOpts := &rules.ManagerOptions{
+ NotifierOpts: notifierOpts,
+ Queriers: &rules.Queriers{
+ PqlEngine: pqle,
+ Ch: ch.GetConn(),
+ },
+ RepoURL: ruleRepoURL,
+ DBConn: db,
+ Context: context.Background(),
+ Logger: nil,
+ DisableRules: disableRules,
+ }
+
+ // create Manager
+ manager, err := rules.NewManager(managerOpts)
+ if err != nil {
+ return nil, fmt.Errorf("rule manager error: %v", err)
+ }
+
+ zap.S().Info("rules manager is ready")
+
+ return manager, nil
+}
diff --git a/ee/query-service/constants/constants.go b/ee/query-service/constants/constants.go
new file mode 100644
index 0000000000..ba9bb141a5
--- /dev/null
+++ b/ee/query-service/constants/constants.go
@@ -0,0 +1,28 @@
+package constants
+
+import (
+ "os"
+)
+
+const (
+ DefaultSiteURL = "https://localhost:3301"
+)
+
+var LicenseSignozIo = "https://license.signoz.io/api/v1"
+
+func GetOrDefaultEnv(key string, fallback string) string {
+ v := os.Getenv(key)
+ if len(v) == 0 {
+ return fallback
+ }
+ return v
+}
+
+// constant functions that override env vars
+
+// GetDefaultSiteURL returns default site url, primarily
+// used to send saml request and allowing backend to
+// handle http redirect
+func GetDefaultSiteURL() string {
+ return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
+}
diff --git a/ee/query-service/dao/factory.go b/ee/query-service/dao/factory.go
new file mode 100644
index 0000000000..f623e17783
--- /dev/null
+++ b/ee/query-service/dao/factory.go
@@ -0,0 +1,18 @@
+package dao
+
+import (
+ "fmt"
+
+ "go.signoz.io/signoz/ee/query-service/dao/sqlite"
+)
+
+func InitDao(engine, path string) (ModelDao, error) {
+
+ switch engine {
+ case "sqlite":
+ return sqlite.InitDB(path)
+ default:
+ return nil, fmt.Errorf("qsdb type: %s is not supported in query service", engine)
+ }
+
+}
diff --git a/ee/query-service/dao/interface.go b/ee/query-service/dao/interface.go
new file mode 100644
index 0000000000..7e17dcb635
--- /dev/null
+++ b/ee/query-service/dao/interface.go
@@ -0,0 +1,33 @@
+package dao
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
+ "go.signoz.io/signoz/ee/query-service/model"
+ basedao "go.signoz.io/signoz/pkg/query-service/dao"
+ baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+type ModelDao interface {
+ basedao.ModelDao
+
+ // SetFlagProvider sets the feature lookup provider
+ SetFlagProvider(flags baseint.FeatureLookup)
+
+ DB() *sqlx.DB
+
+ // auth methods
+ PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError)
+ CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError)
+
+ // org domain (auth domains) CRUD ops
+ ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError)
+ GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError)
+ CreateDomain(ctx context.Context, d *model.OrgDomain) basemodel.BaseApiError
+ UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError
+ DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError
+ GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError)
+}
diff --git a/ee/query-service/dao/sqlite/auth.go b/ee/query-service/dao/sqlite/auth.go
new file mode 100644
index 0000000000..13fd57259f
--- /dev/null
+++ b/ee/query-service/dao/sqlite/auth.go
@@ -0,0 +1,112 @@
+package sqlite
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "go.signoz.io/signoz/ee/query-service/constants"
+ "go.signoz.io/signoz/ee/query-service/model"
+ baseconst "go.signoz.io/signoz/pkg/query-service/constants"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+ "go.uber.org/zap"
+)
+
+func (m *modelDao) CanUsePassword(ctx context.Context, email string) (bool, basemodel.BaseApiError) {
+ domain, apierr := m.GetDomainByEmail(ctx, email)
+ if apierr != nil {
+ return false, apierr
+ }
+
+ if domain != nil && domain.SsoEnabled {
+ // sso is enabled, check if the user has admin role
+ userPayload, baseapierr := m.GetUserByEmail(ctx, email)
+
+ if baseapierr != nil || userPayload == nil {
+ return false, baseapierr
+ }
+
+ if userPayload.Role != baseconst.AdminGroup {
+ return false, model.BadRequest(fmt.Errorf("auth method not supported"))
+ }
+
+ }
+
+ return true, nil
+}
+
+// PrecheckLogin is called when the login or signup page is loaded
+// to check sso login is to be prompted
+func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (*model.PrecheckResponse, basemodel.BaseApiError) {
+
+ // assume user is valid unless proven otherwise
+ resp := &model.PrecheckResponse{IsUser: true, CanSelfRegister: false}
+
+ // check if email is a valid user
+ userPayload, baseApiErr := m.GetUserByEmail(ctx, email)
+ if baseApiErr != nil {
+ return resp, baseApiErr
+ }
+
+ if userPayload == nil {
+ resp.IsUser = false
+ }
+ ssoAvailable := true
+ err := m.checkFeature(model.SSO)
+ if err != nil {
+ switch err.(type) {
+ case basemodel.ErrFeatureUnavailable:
+ // do nothing, just skip sso
+ ssoAvailable = false
+ default:
+ zap.S().Errorf("feature check failed", zap.String("featureKey", model.SSO), zap.Error(err))
+ return resp, model.BadRequest(err)
+ }
+ }
+
+ if ssoAvailable {
+
+ // find domain from email
+ orgDomain, apierr := m.GetDomainByEmail(ctx, email)
+ if apierr != nil {
+ var emailDomain string
+ emailComponents := strings.Split(email, "@")
+ if len(emailComponents) > 0 {
+ emailDomain = emailComponents[1]
+ }
+ zap.S().Errorf("failed to get org domain from email", zap.String("emailDomain", emailDomain), apierr.ToError())
+ return resp, apierr
+ }
+
+ if orgDomain != nil && orgDomain.SsoEnabled {
+ // saml is enabled for this domain, lets prepare sso url
+
+ if sourceUrl == "" {
+ sourceUrl = constants.GetDefaultSiteURL()
+ }
+
+ // parse source url that generated the login request
+ var err error
+ escapedUrl, _ := url.QueryUnescape(sourceUrl)
+ siteUrl, err := url.Parse(escapedUrl)
+ if err != nil {
+ zap.S().Errorf("failed to parse referer", err)
+ return resp, model.InternalError(fmt.Errorf("failed to generate login request"))
+ }
+
+ // build Idp URL that will authenticat the user
+ // the front-end will redirect user to this url
+ resp.SsoUrl, err = orgDomain.BuildSsoUrl(siteUrl)
+
+ if err != nil {
+ zap.S().Errorf("failed to prepare saml request for domain", zap.String("domain", orgDomain.Name), err)
+ return resp, model.InternalError(err)
+ }
+
+ // set SSO to true, as the url is generated correctly
+ resp.SSO = true
+ }
+ }
+ return resp, nil
+}
diff --git a/ee/query-service/dao/sqlite/domain.go b/ee/query-service/dao/sqlite/domain.go
new file mode 100644
index 0000000000..b98bc70cdb
--- /dev/null
+++ b/ee/query-service/dao/sqlite/domain.go
@@ -0,0 +1,183 @@
+package sqlite
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "go.signoz.io/signoz/ee/query-service/model"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+ "go.uber.org/zap"
+)
+
+// StoredDomain represents stored database record for org domain
+
+type StoredDomain struct {
+ Id uuid.UUID `db:"id"`
+ Name string `db:"name"`
+ OrgId string `db:"org_id"`
+ Data string `db:"data"`
+ CreatedAt int64 `db:"created_at"`
+ UpdatedAt int64 `db:"updated_at"`
+}
+
+// GetDomain returns org domain for a given domain id
+func (m *modelDao) GetDomain(ctx context.Context, id uuid.UUID) (*model.OrgDomain, basemodel.BaseApiError) {
+
+ stored := StoredDomain{}
+ err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE id=$1 LIMIT 1`, id)
+
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, model.BadRequest(fmt.Errorf("invalid domain id"))
+ }
+ return nil, model.InternalError(err)
+ }
+
+ domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
+ if err := domain.LoadConfig(stored.Data); err != nil {
+ return domain, model.InternalError(err)
+ }
+ return domain, nil
+}
+
+// ListDomains gets the list of auth domains by org id
+func (m *modelDao) ListDomains(ctx context.Context, orgId string) ([]model.OrgDomain, basemodel.BaseApiError) {
+ domains := []model.OrgDomain{}
+
+ stored := []StoredDomain{}
+ err := m.DB().SelectContext(ctx, &stored, `SELECT * FROM org_domains WHERE org_id=$1`, orgId)
+
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []model.OrgDomain{}, nil
+ }
+ return nil, model.InternalError(err)
+ }
+
+ for _, s := range stored {
+ domain := model.OrgDomain{Id: s.Id, Name: s.Name, OrgId: s.OrgId}
+ if err := domain.LoadConfig(s.Data); err != nil {
+ zap.S().Errorf("ListDomains() failed", zap.Error(err))
+ }
+ domains = append(domains, domain)
+ }
+
+ return domains, nil
+}
+
+// CreateDomain creates a new auth domain
+func (m *modelDao) CreateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
+
+ if domain.Id == uuid.Nil {
+ domain.Id = uuid.New()
+ }
+
+ if domain.OrgId == "" || domain.Name == "" {
+ return model.BadRequest(fmt.Errorf("domain creation failed, missing fields: OrgId, Name "))
+ }
+
+ configJson, err := json.Marshal(domain)
+ if err != nil {
+ zap.S().Errorf("failed to unmarshal domain config", zap.Error(err))
+ return model.InternalError(fmt.Errorf("domain creation failed"))
+ }
+
+ _, err = m.DB().ExecContext(ctx,
+ "INSERT INTO org_domains (id, name, org_id, data, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6)",
+ domain.Id,
+ domain.Name,
+ domain.OrgId,
+ configJson,
+ time.Now().Unix(),
+ time.Now().Unix())
+
+ if err != nil {
+ zap.S().Errorf("failed to insert domain in db", zap.Error(err))
+ return model.InternalError(fmt.Errorf("domain creation failed"))
+ }
+
+ return nil
+}
+
+// UpdateDomain updates stored config params for a domain
+func (m *modelDao) UpdateDomain(ctx context.Context, domain *model.OrgDomain) basemodel.BaseApiError {
+
+ if domain.Id == uuid.Nil {
+ zap.S().Errorf("domain update failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
+ return model.InternalError(fmt.Errorf("domain update failed"))
+ }
+
+ configJson, err := json.Marshal(domain)
+ if err != nil {
+ zap.S().Errorf("domain update failed", zap.Error(err))
+ return model.InternalError(fmt.Errorf("domain update failed"))
+ }
+
+ _, err = m.DB().ExecContext(ctx,
+ "UPDATE org_domains SET data = $1, updated_at = $2 WHERE id = $3",
+ configJson,
+ time.Now().Unix(),
+ domain.Id)
+
+ if err != nil {
+ zap.S().Errorf("domain update failed", zap.Error(err))
+ return model.InternalError(fmt.Errorf("domain update failed"))
+ }
+
+ return nil
+}
+
+// DeleteDomain deletes an org domain
+func (m *modelDao) DeleteDomain(ctx context.Context, id uuid.UUID) basemodel.BaseApiError {
+
+ if id == uuid.Nil {
+ zap.S().Errorf("domain delete failed", zap.Error(fmt.Errorf("OrgDomain.Id is null")))
+ return model.InternalError(fmt.Errorf("domain delete failed"))
+ }
+
+ _, err := m.DB().ExecContext(ctx,
+ "DELETE FROM org_domains WHERE id = $1",
+ id)
+
+ if err != nil {
+ zap.S().Errorf("domain delete failed", zap.Error(err))
+ return model.InternalError(fmt.Errorf("domain delete failed"))
+ }
+
+ return nil
+}
+
+func (m *modelDao) GetDomainByEmail(ctx context.Context, email string) (*model.OrgDomain, basemodel.BaseApiError) {
+
+ if email == "" {
+ return nil, model.BadRequest(fmt.Errorf("could not find auth domain, missing fields: email "))
+ }
+
+ components := strings.Split(email, "@")
+ if len(components) < 2 {
+ return nil, model.BadRequest(fmt.Errorf("invalid email address"))
+ }
+
+ parsedDomain := components[1]
+
+ stored := StoredDomain{}
+ err := m.DB().Get(&stored, `SELECT * FROM org_domains WHERE name=$1 LIMIT 1`, parsedDomain)
+
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ return nil, model.InternalError(err)
+ }
+
+ domain := &model.OrgDomain{Id: stored.Id, Name: stored.Name, OrgId: stored.OrgId}
+ if err := domain.LoadConfig(stored.Data); err != nil {
+ return domain, model.InternalError(err)
+ }
+ return domain, nil
+}
diff --git a/ee/query-service/dao/sqlite/modelDao.go b/ee/query-service/dao/sqlite/modelDao.go
new file mode 100644
index 0000000000..156f6b30e7
--- /dev/null
+++ b/ee/query-service/dao/sqlite/modelDao.go
@@ -0,0 +1,63 @@
+package sqlite
+
+import (
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+ basedao "go.signoz.io/signoz/pkg/query-service/dao"
+ basedsql "go.signoz.io/signoz/pkg/query-service/dao/sqlite"
+ baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
+)
+
+type modelDao struct {
+ *basedsql.ModelDaoSqlite
+ flags baseint.FeatureLookup
+}
+
+// SetFlagProvider sets the feature lookup provider
+func (m *modelDao) SetFlagProvider(flags baseint.FeatureLookup) {
+ m.flags = flags
+}
+
+// CheckFeature confirms if a feature is available
+func (m *modelDao) checkFeature(key string) error {
+ if m.flags == nil {
+ return fmt.Errorf("flag provider not set")
+ }
+
+ return m.flags.CheckFeature(key)
+}
+
+// InitDB creates and extends base model DB repository
+func InitDB(dataSourceName string) (*modelDao, error) {
+ dao, err := basedsql.InitDB(dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ // set package variable so dependent base methods (e.g. AuthCache) will work
+ basedao.SetDB(dao)
+ m := &modelDao{ModelDaoSqlite: dao}
+
+ table_schema := `
+ PRAGMA foreign_keys = ON;
+ CREATE TABLE IF NOT EXISTS org_domains(
+ id TEXT PRIMARY KEY,
+ org_id TEXT NOT NULL,
+ name VARCHAR(50) NOT NULL UNIQUE,
+ created_at INTEGER NOT NULL,
+ updated_at INTEGER,
+ data TEXT NOT NULL,
+ FOREIGN KEY(org_id) REFERENCES organizations(id)
+ );`
+
+ _, err = m.DB().Exec(table_schema)
+ if err != nil {
+ return nil, fmt.Errorf("error in creating tables: %v", err.Error())
+ }
+
+ return m, nil
+}
+
+func (m *modelDao) DB() *sqlx.DB {
+ return m.ModelDaoSqlite.DB()
+}
diff --git a/ee/query-service/integrations/signozio/response.go b/ee/query-service/integrations/signozio/response.go
new file mode 100644
index 0000000000..c8812105f1
--- /dev/null
+++ b/ee/query-service/integrations/signozio/response.go
@@ -0,0 +1,20 @@
+package signozio
+
+type status string
+
+const (
+ statusSuccess status = "success"
+ statusError status = "error"
+)
+
+type ActivationResult struct {
+ Status status `json:"status"`
+ Data *ActivationResponse `json:"data,omitempty"`
+ ErrorType string `json:"errorType,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+type ActivationResponse struct {
+ ActivationId string `json:"ActivationId"`
+ PlanDetails string `json:"PlanDetails"`
+}
diff --git a/ee/query-service/integrations/signozio/signozio.go b/ee/query-service/integrations/signozio/signozio.go
new file mode 100644
index 0000000000..ac9d4128ab
--- /dev/null
+++ b/ee/query-service/integrations/signozio/signozio.go
@@ -0,0 +1,159 @@
+package signozio
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/pkg/errors"
+ "go.signoz.io/signoz/ee/query-service/constants"
+ "go.signoz.io/signoz/ee/query-service/model"
+ "go.uber.org/zap"
+)
+
+var C *Client
+
+const (
+ POST = "POST"
+ APPLICATION_JSON = "application/json"
+)
+
+type Client struct {
+ Prefix string
+}
+
+func New() *Client {
+ return &Client{
+ Prefix: constants.LicenseSignozIo,
+ }
+}
+
+func init() {
+ C = New()
+}
+
+// ActivateLicense sends key to license.signoz.io and gets activation data
+func ActivateLicense(key, siteId string) (*ActivationResponse, *model.ApiError) {
+ licenseReq := map[string]string{
+ "key": key,
+ "siteId": siteId,
+ }
+
+ reqString, _ := json.Marshal(licenseReq)
+ httpResponse, err := http.Post(C.Prefix+"/licenses/activate", APPLICATION_JSON, bytes.NewBuffer(reqString))
+
+ if err != nil {
+ zap.S().Errorf("failed to connect to license.signoz.io", err)
+ return nil, model.BadRequest(fmt.Errorf("unable to connect with license.signoz.io, please check your network connection"))
+ }
+
+ httpBody, err := ioutil.ReadAll(httpResponse.Body)
+ if err != nil {
+ zap.S().Errorf("failed to read activation response from license.signoz.io", err)
+ return nil, model.BadRequest(fmt.Errorf("failed to read activation response from license.signoz.io"))
+ }
+
+ defer httpResponse.Body.Close()
+
+ // read api request result
+ result := ActivationResult{}
+ err = json.Unmarshal(httpBody, &result)
+ if err != nil {
+ zap.S().Errorf("failed to marshal activation response from license.signoz.io", err)
+ return nil, model.InternalError(errors.Wrap(err, "failed to marshal license activation response"))
+ }
+
+ switch httpResponse.StatusCode {
+ case 200, 201:
+ return result.Data, nil
+ case 400, 401:
+ return nil, model.BadRequest(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
+ default:
+ return nil, model.InternalError(fmt.Errorf(fmt.Sprintf("failed to activate: %s", result.Error)))
+ }
+
+}
+
+// ValidateLicense validates the license key
+func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) {
+ validReq := map[string]string{
+ "activationId": activationId,
+ }
+
+ reqString, _ := json.Marshal(validReq)
+ response, err := http.Post(C.Prefix+"/licenses/validate", APPLICATION_JSON, bytes.NewBuffer(reqString))
+
+ if err != nil {
+ return nil, model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
+ }
+
+ body, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return nil, model.BadRequest(errors.Wrap(err, "failed to read validation response from license.signoz.io"))
+ }
+
+ defer response.Body.Close()
+
+ switch response.StatusCode {
+ case 200, 201:
+ a := ActivationResult{}
+ err = json.Unmarshal(body, &a)
+ if err != nil {
+ return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response"))
+ }
+ return a.Data, nil
+ case 400, 401:
+ return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
+ "bad request error received from license.signoz.io"))
+ default:
+ return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
+ "internal error received from license.signoz.io"))
+ }
+
+}
+
+func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, POST, url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("Content-Type", contentType)
+ return req, err
+
+}
+
+// SendUsage reports the usage of signoz to license server
+func SendUsage(ctx context.Context, usage *model.UsagePayload) *model.ApiError {
+ reqString, _ := json.Marshal(usage)
+ req, err := NewPostRequestWithCtx(ctx, C.Prefix+"/usage", APPLICATION_JSON, bytes.NewBuffer(reqString))
+ if err != nil {
+ return model.BadRequest(errors.Wrap(err, "unable to create http request"))
+ }
+
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return model.BadRequest(errors.Wrap(err, "unable to connect with license.signoz.io, please check your network connection"))
+ }
+
+ body, err := io.ReadAll(res.Body)
+ if err != nil {
+ return model.BadRequest(errors.Wrap(err, "failed to read usage response from license.signoz.io"))
+ }
+
+ defer res.Body.Close()
+
+ switch res.StatusCode {
+ case 200, 201:
+ return nil
+ case 400, 401:
+ return model.BadRequest(errors.Wrap(fmt.Errorf(string(body)),
+ "bad request error received from license.signoz.io"))
+ default:
+ return model.InternalError(errors.Wrap(fmt.Errorf(string(body)),
+ "internal error received from license.signoz.io"))
+ }
+}
diff --git a/ee/query-service/interfaces/connector.go b/ee/query-service/interfaces/connector.go
new file mode 100644
index 0000000000..5428e421fa
--- /dev/null
+++ b/ee/query-service/interfaces/connector.go
@@ -0,0 +1,12 @@
+package interfaces
+
+import (
+ baseint "go.signoz.io/signoz/pkg/query-service/interfaces"
+)
+
+// Connector defines methods for interaction
+// with o11y data. for example - clickhouse
+type DataConnector interface {
+ Start(readerReady chan bool)
+ baseint.Reader
+}
diff --git a/ee/query-service/license/db.go b/ee/query-service/license/db.go
new file mode 100644
index 0000000000..a82f0377e2
--- /dev/null
+++ b/ee/query-service/license/db.go
@@ -0,0 +1,127 @@
+package license
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+
+ "go.signoz.io/signoz/ee/query-service/license/sqlite"
+ "go.signoz.io/signoz/ee/query-service/model"
+ "go.uber.org/zap"
+)
+
+// Repo is license repo. stores license keys in a secured DB
+type Repo struct {
+ db *sqlx.DB
+}
+
+// NewLicenseRepo initiates a new license repo
+func NewLicenseRepo(db *sqlx.DB) Repo {
+ return Repo{
+ db: db,
+ }
+}
+
+func (r *Repo) InitDB(engine string) error {
+ switch engine {
+ case "sqlite3", "sqlite":
+ return sqlite.InitDB(r.db)
+ default:
+ return fmt.Errorf("unsupported db")
+ }
+}
+
+func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) {
+ licenses := []model.License{}
+
+ query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
+
+ err := r.db.Select(&licenses, query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get licenses from db: %v", err)
+ }
+
+ return licenses, nil
+}
+
+// GetActiveLicense fetches the latest active license from DB
+func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, error) {
+ var err error
+ licenses := []model.License{}
+
+ query := "SELECT key, activationId, planDetails, validationMessage FROM licenses"
+
+ err = r.db.Select(&licenses, query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get active licenses from db: %v", err)
+ }
+
+ var active *model.License
+ for _, l := range licenses {
+ l.ParsePlan()
+ if active == nil &&
+ (l.ValidFrom != 0) &&
+ (l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
+ active = &l
+ }
+ if active != nil &&
+ l.ValidFrom > active.ValidFrom &&
+ (l.ValidUntil == -1 || l.ValidUntil > time.Now().Unix()) {
+ active = &l
+ }
+ }
+
+ return active, nil
+}
+
+// InsertLicense inserts a new license in db
+func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error {
+
+ if l.Key == "" {
+ return fmt.Errorf("insert license failed: license key is required")
+ }
+
+ query := `INSERT INTO licenses
+ (key, planDetails, activationId, validationmessage)
+ VALUES ($1, $2, $3, $4)`
+
+ _, err := r.db.ExecContext(ctx,
+ query,
+ l.Key,
+ l.PlanDetails,
+ l.ActivationId,
+ l.ValidationMessage)
+
+ if err != nil {
+ zap.S().Errorf("error in inserting license data: ", zap.Error(err))
+ return fmt.Errorf("failed to insert license in db: %v", err)
+ }
+
+ return nil
+}
+
+// UpdatePlanDetails writes new plan details to the db
+func (r *Repo) UpdatePlanDetails(ctx context.Context,
+ key,
+ planDetails string) error {
+
+ if key == "" {
+ return fmt.Errorf("Update Plan Details failed: license key is required")
+ }
+
+ query := `UPDATE licenses
+ SET planDetails = $1,
+ updatedAt = $2
+ WHERE key = $3`
+
+ _, err := r.db.ExecContext(ctx, query, planDetails, time.Now(), key)
+
+ if err != nil {
+ zap.S().Errorf("error in updating license: ", zap.Error(err))
+ return fmt.Errorf("failed to update license in db: %v", err)
+ }
+
+ return nil
+}
diff --git a/ee/query-service/license/manager.go b/ee/query-service/license/manager.go
new file mode 100644
index 0000000000..306fa5a8d1
--- /dev/null
+++ b/ee/query-service/license/manager.go
@@ -0,0 +1,295 @@
+package license
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+
+ "sync"
+
+ validate "go.signoz.io/signoz/ee/query-service/integrations/signozio"
+ "go.signoz.io/signoz/ee/query-service/model"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+ "go.signoz.io/signoz/pkg/query-service/telemetry"
+ "go.uber.org/zap"
+)
+
+var LM *Manager
+
+// validate and update license every 24 hours
+var validationFrequency = 24 * 60 * time.Minute
+
+type Manager struct {
+ repo *Repo
+ mutex sync.Mutex
+
+ validatorRunning bool
+
+ // end the license validation, this is important to gracefully
+ // stopping validation and protect in-consistent updates
+ done chan struct{}
+
+ // terminated waits for the validate go routine to end
+ terminated chan struct{}
+
+ // last time the license was validated
+ lastValidated int64
+
+ // keep track of validation failure attempts
+ failedAttempts uint64
+
+ // keep track of active license and features
+ activeLicense *model.License
+ activeFeatures basemodel.FeatureSet
+}
+
+func StartManager(dbType string, db *sqlx.DB) (*Manager, error) {
+
+ if LM != nil {
+ return LM, nil
+ }
+
+ repo := NewLicenseRepo(db)
+ err := repo.InitDB(dbType)
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to initiate license repo: %v", err)
+ }
+
+ m := &Manager{
+ repo: &repo,
+ }
+
+ if err := m.start(); err != nil {
+ return m, err
+ }
+ LM = m
+ return m, nil
+}
+
+// start loads active license in memory and initiates validator
+func (lm *Manager) start() error {
+ err := lm.LoadActiveLicense()
+
+ return err
+}
+
+func (lm *Manager) Stop() {
+ close(lm.done)
+ <-lm.terminated
+}
+
+func (lm *Manager) SetActive(l *model.License) {
+ lm.mutex.Lock()
+ defer lm.mutex.Unlock()
+
+ if l == nil {
+ return
+ }
+
+ lm.activeLicense = l
+ lm.activeFeatures = l.FeatureSet
+ if !lm.validatorRunning {
+ // we want to make sure only one validator runs,
+ // we already have lock() so good to go
+ lm.validatorRunning = true
+ go lm.Validator(context.Background())
+ }
+
+}
+
+// LoadActiveLicense loads the most recent active licenseex
+func (lm *Manager) LoadActiveLicense() error {
+ var err error
+ active, err := lm.repo.GetActiveLicense(context.Background())
+ if err != nil {
+ return err
+ }
+ if active != nil {
+ lm.SetActive(active)
+ } else {
+ zap.S().Info("No active license found.")
+ }
+
+ return nil
+}
+
+func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) {
+
+ licenses, err := lm.repo.GetLicenses(ctx)
+ if err != nil {
+ return nil, model.InternalError(err)
+ }
+
+ for _, l := range licenses {
+ l.ParsePlan()
+
+ if l.Key == lm.activeLicense.Key {
+ l.IsCurrent = true
+ }
+
+ if l.ValidUntil == -1 {
+ // for subscriptions, there is no end-date as such
+ // but for showing user some validity we default one year timespan
+ l.ValidUntil = l.ValidFrom + 31556926
+ }
+
+ response = append(response, l)
+ }
+
+ return
+}
+
+// Validator validates license after an epoch of time
+func (lm *Manager) Validator(ctx context.Context) {
+ defer close(lm.terminated)
+ tick := time.NewTicker(validationFrequency)
+ defer tick.Stop()
+
+ lm.Validate(ctx)
+
+ for {
+ select {
+ case <-lm.done:
+ return
+ default:
+ select {
+ case <-lm.done:
+ return
+ case <-tick.C:
+ lm.Validate(ctx)
+ }
+ }
+
+ }
+}
+
+// Validate validates the current active license
+func (lm *Manager) Validate(ctx context.Context) (reterr error) {
+ zap.S().Info("License validation started")
+ if lm.activeLicense == nil {
+ return nil
+ }
+
+ defer func() {
+ lm.mutex.Lock()
+
+ lm.lastValidated = time.Now().Unix()
+ if reterr != nil {
+ zap.S().Errorf("License validation completed with error", reterr)
+ atomic.AddUint64(&lm.failedAttempts, 1)
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED,
+ map[string]interface{}{"err": reterr.Error()})
+ } else {
+ zap.S().Info("License validation completed with no errors")
+ }
+
+ lm.mutex.Unlock()
+ }()
+
+ response, apiError := validate.ValidateLicense(lm.activeLicense.ActivationId)
+ if apiError != nil {
+ zap.S().Errorf("failed to validate license", apiError)
+ return apiError.Err
+ }
+
+ if response.PlanDetails == lm.activeLicense.PlanDetails {
+ // license plan hasnt changed, nothing to do
+ return nil
+ }
+
+ if response.PlanDetails != "" {
+
+ // copy and replace the active license record
+ l := model.License{
+ Key: lm.activeLicense.Key,
+ CreatedAt: lm.activeLicense.CreatedAt,
+ PlanDetails: response.PlanDetails,
+ ValidationMessage: lm.activeLicense.ValidationMessage,
+ ActivationId: lm.activeLicense.ActivationId,
+ }
+
+ if err := l.ParsePlan(); err != nil {
+ zap.S().Errorf("failed to parse updated license", zap.Error(err))
+ return err
+ }
+
+ // updated plan is parsable, check if plan has changed
+ if lm.activeLicense.PlanDetails != response.PlanDetails {
+ err := lm.repo.UpdatePlanDetails(ctx, lm.activeLicense.Key, response.PlanDetails)
+ if err != nil {
+ // unexpected db write issue but we can let the user continue
+ // and wait for update to work in next cycle.
+ zap.S().Errorf("failed to validate license", zap.Error(err))
+ }
+ }
+
+ // activate the update license plan
+ lm.SetActive(&l)
+ }
+
+ return nil
+}
+
+// Activate activates a license key with signoz server
+func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) {
+ defer func() {
+ if errResponse != nil {
+ telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED,
+ map[string]interface{}{"err": errResponse.Err.Error()})
+ }
+ }()
+
+ response, apiError := validate.ActivateLicense(key, "")
+ if apiError != nil {
+ zap.S().Errorf("failed to activate license", zap.Error(apiError.Err))
+ return nil, apiError
+ }
+
+ l := &model.License{
+ Key: key,
+ ActivationId: response.ActivationId,
+ PlanDetails: response.PlanDetails,
+ }
+
+ // parse validity and features from the plan details
+ err := l.ParsePlan()
+
+ if err != nil {
+ zap.S().Errorf("failed to activate license", zap.Error(err))
+ return nil, model.InternalError(err)
+ }
+
+ // store the license before activating it
+ err = lm.repo.InsertLicense(ctx, l)
+ if err != nil {
+ zap.S().Errorf("failed to activate license", zap.Error(err))
+ return nil, model.InternalError(err)
+ }
+
+ // license is valid, activate it
+ lm.SetActive(l)
+ return l, nil
+}
+
+// CheckFeature will be internally used by backend routines
+// for feature gating
+func (lm *Manager) CheckFeature(featureKey string) error {
+ if _, ok := lm.activeFeatures[featureKey]; ok {
+ return nil
+ }
+ return basemodel.ErrFeatureUnavailable{Key: featureKey}
+}
+
+// GetFeatureFlags returns current active features
+func (lm *Manager) GetFeatureFlags() basemodel.FeatureSet {
+ return lm.activeFeatures
+}
+
+// GetRepo return the license repo
+func (lm *Manager) GetRepo() *Repo {
+ return lm.repo
+}
diff --git a/ee/query-service/license/sqlite/init.go b/ee/query-service/license/sqlite/init.go
new file mode 100644
index 0000000000..a03153659c
--- /dev/null
+++ b/ee/query-service/license/sqlite/init.go
@@ -0,0 +1,37 @@
+package sqlite
+
+import (
+ "fmt"
+ "github.com/jmoiron/sqlx"
+)
+
+func InitDB(db *sqlx.DB) error {
+ var err error
+ if db == nil {
+ return fmt.Errorf("invalid db connection")
+ }
+
+ table_schema := `CREATE TABLE IF NOT EXISTS licenses(
+ key TEXT PRIMARY KEY,
+ createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ updatedAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ planDetails TEXT,
+ activationId TEXT,
+ validationMessage TEXT,
+ lastValidated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+
+ CREATE TABLE IF NOT EXISTS sites(
+ uuid TEXT PRIMARY KEY,
+ alias VARCHAR(180) DEFAULT 'PROD',
+ url VARCHAR(300),
+ createdAt TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+ `
+
+ _, err = db.Exec(table_schema)
+ if err != nil {
+ return fmt.Errorf("Error in creating licenses table: %s", err.Error())
+ }
+ return nil
+}
diff --git a/ee/query-service/main.go b/ee/query-service/main.go
new file mode 100644
index 0000000000..e29b86797a
--- /dev/null
+++ b/ee/query-service/main.go
@@ -0,0 +1,90 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "go.signoz.io/signoz/ee/query-service/app"
+ "go.signoz.io/signoz/pkg/query-service/auth"
+ baseconst "go.signoz.io/signoz/pkg/query-service/constants"
+ "go.signoz.io/signoz/pkg/query-service/version"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+func initZapLog() *zap.Logger {
+ config := zap.NewDevelopmentConfig()
+ config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
+ config.EncoderConfig.TimeKey = "timestamp"
+ config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ logger, _ := config.Build()
+ return logger
+}
+
+func main() {
+ var promConfigPath string
+
+ // disables rule execution but allows change to the rule definition
+ var disableRules bool
+
+ // the url used to build link in the alert messages in slack and other systems
+ var ruleRepoURL string
+
+ flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
+ flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)")
+ flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)")
+ flag.Parse()
+
+ loggerMgr := initZapLog()
+ zap.ReplaceGlobals(loggerMgr)
+ defer loggerMgr.Sync() // flushes buffer, if any
+
+ logger := loggerMgr.Sugar()
+ version.PrintVersion()
+
+ serverOptions := &app.ServerOptions{
+ HTTPHostPort: baseconst.HTTPHostPort,
+ PromConfigPath: promConfigPath,
+ PrivateHostPort: baseconst.PrivateHostPort,
+ DisableRules: disableRules,
+ RuleRepoURL: ruleRepoURL,
+ }
+
+ // Read the jwt secret key
+ auth.JwtSecret = os.Getenv("SIGNOZ_JWT_SECRET")
+
+ if len(auth.JwtSecret) == 0 {
+ zap.S().Warn("No JWT secret key is specified.")
+ } else {
+ zap.S().Info("No JWT secret key set successfully.")
+ }
+
+ server, err := app.NewServer(serverOptions)
+ if err != nil {
+ logger.Fatal("Failed to create server", zap.Error(err))
+ }
+
+ if err := server.Start(); err != nil {
+ logger.Fatal("Could not start servers", zap.Error(err))
+ }
+
+ if err := auth.InitAuthCache(context.Background()); err != nil {
+ logger.Fatal("Failed to initialize auth cache", zap.Error(err))
+ }
+
+ signalsChannel := make(chan os.Signal, 1)
+ signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
+
+ for {
+ select {
+ case status := <-server.HealthCheckStatus():
+ logger.Info("Received HealthCheck status: ", zap.Int("status", int(status)))
+ case <-signalsChannel:
+ logger.Fatal("Received OS Interrupt Signal ... ")
+ }
+ }
+}
diff --git a/ee/query-service/model/auth.go b/ee/query-service/model/auth.go
new file mode 100644
index 0000000000..8c3447a00d
--- /dev/null
+++ b/ee/query-service/model/auth.go
@@ -0,0 +1,21 @@
+package model
+
+import (
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+// PrecheckResponse contains login precheck response
+type PrecheckResponse struct {
+ SSO bool `json:"sso"`
+ SsoUrl string `json:"ssoUrl"`
+ CanSelfRegister bool `json:"canSelfRegister"`
+ IsUser bool `json:"isUser"`
+ SsoError string `json:"ssoError"`
+}
+
+// GettableInvitation overrides base object and adds precheck into
+// response
+type GettableInvitation struct {
+ *basemodel.InvitationResponseObject
+ Precheck *PrecheckResponse `json:"precheck"`
+}
diff --git a/ee/query-service/model/domain.go b/ee/query-service/model/domain.go
new file mode 100644
index 0000000000..acde0e2194
--- /dev/null
+++ b/ee/query-service/model/domain.go
@@ -0,0 +1,142 @@
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+ saml2 "github.com/russellhaering/gosaml2"
+ "go.signoz.io/signoz/ee/query-service/saml"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+type SSOType string
+
+const (
+ SAML SSOType = "SAML"
+ GoogleAuth SSOType = "GOOGLE_AUTH"
+)
+
+type SamlConfig struct {
+ SamlEntity string `json:"samlEntity"`
+ SamlIdp string `json:"samlIdp"`
+ SamlCert string `json:"samlCert"`
+}
+
+// OrgDomain identify org owned web domains for auth and other purposes
+type OrgDomain struct {
+ Id uuid.UUID `json:"id"`
+ Name string `json:"name"`
+ OrgId string `json:"orgId"`
+ SsoEnabled bool `json:"ssoEnabled"`
+ SsoType SSOType `json:"ssoType"`
+ SamlConfig *SamlConfig `json:"samlConfig"`
+ Org *basemodel.Organization
+}
+
+// Valid is used a pipeline function to check if org domain
+// loaded from db is valid
+func (od *OrgDomain) Valid(err error) error {
+ if err != nil {
+ return err
+ }
+
+ if od.Id == uuid.Nil || od.OrgId == "" {
+ return fmt.Errorf("both id and orgId are required")
+ }
+
+ return nil
+}
+
+// ValidNew cheks if the org domain is valid for insertion in db
+func (od *OrgDomain) ValidNew() error {
+
+ if od.OrgId == "" {
+ return fmt.Errorf("orgId is required")
+ }
+
+ if od.Name == "" {
+ return fmt.Errorf("name is required")
+ }
+
+ return nil
+}
+
+// LoadConfig loads config params from json text
+func (od *OrgDomain) LoadConfig(jsondata string) error {
+ d := *od
+ err := json.Unmarshal([]byte(jsondata), &d)
+ if err != nil {
+ return errors.Wrap(err, "failed to marshal json to OrgDomain{}")
+ }
+ *od = d
+ return nil
+}
+
+func (od *OrgDomain) GetSAMLEntityID() string {
+ if od.SamlConfig != nil {
+ return od.SamlConfig.SamlEntity
+ }
+ return ""
+}
+
+func (od *OrgDomain) GetSAMLIdpURL() string {
+ if od.SamlConfig != nil {
+ return od.SamlConfig.SamlIdp
+ }
+ return ""
+}
+
+func (od *OrgDomain) GetSAMLCert() string {
+ if od.SamlConfig != nil {
+ return od.SamlConfig.SamlCert
+ }
+ return ""
+}
+
+// PrepareSamlRequest creates a request accordingly gosaml2
+func (od *OrgDomain) PrepareSamlRequest(siteUrl *url.URL) (*saml2.SAMLServiceProvider, error) {
+
+ // this is the url Idp will call after login completion
+ acs := fmt.Sprintf("%s://%s/%s",
+ siteUrl.Scheme,
+ siteUrl.Host,
+ "api/v1/complete/saml")
+
+ // this is the address of the calling url, useful to redirect user
+ sourceUrl := fmt.Sprintf("%s://%s%s",
+ siteUrl.Scheme,
+ siteUrl.Host,
+ siteUrl.Path)
+
+ // ideally this should be some unique ID for each installation
+ // but since we dont have UI to support it, we default it to
+ // host. this issuer is an identifier of service provider (signoz)
+ // on id provider (e.g. azure, okta). Azure requires this id to be configured
+ // in their system, while others seem to not care about it.
+ // currently we default it to host from window.location (received from browser)
+ issuer := siteUrl.Host
+
+ return saml.PrepareRequest(issuer, acs, sourceUrl, od.GetSAMLEntityID(), od.GetSAMLIdpURL(), od.GetSAMLCert())
+}
+
+func (od *OrgDomain) BuildSsoUrl(siteUrl *url.URL) (ssoUrl string, err error) {
+
+ sp, err := od.PrepareSamlRequest(siteUrl)
+ if err != nil {
+ return "", err
+ }
+
+ fmtDomainId := strings.Replace(od.Id.String(), "-", ":", -1)
+
+ relayState := fmt.Sprintf("%s://%s%s?domainId=%s",
+ siteUrl.Scheme,
+ siteUrl.Host,
+ siteUrl.Path,
+ fmtDomainId)
+
+ return sp.BuildAuthURL(relayState)
+}
diff --git a/ee/query-service/model/errors.go b/ee/query-service/model/errors.go
new file mode 100644
index 0000000000..4c49f515c1
--- /dev/null
+++ b/ee/query-service/model/errors.go
@@ -0,0 +1,91 @@
+package model
+
+import (
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+type ApiError struct {
+ Typ basemodel.ErrorType
+ Err error
+}
+
+func (a *ApiError) Type() basemodel.ErrorType {
+ return a.Typ
+}
+
+func (a *ApiError) ToError() error {
+ if a != nil {
+ return a.Err
+ }
+ return a.Err
+}
+
+func (a *ApiError) Error() string {
+ return a.Err.Error()
+}
+
+func (a *ApiError) IsNil() bool {
+ return a == nil || a.Err == nil
+}
+
+// NewApiError returns a ApiError object of given type
+func NewApiError(typ basemodel.ErrorType, err error) *ApiError {
+ return &ApiError{
+ Typ: typ,
+ Err: err,
+ }
+}
+
+// BadRequest returns a ApiError object of bad request
+func BadRequest(err error) *ApiError {
+ return &ApiError{
+ Typ: basemodel.ErrorBadData,
+ Err: err,
+ }
+}
+
+// InternalError returns a ApiError object of internal type
+func InternalError(err error) *ApiError {
+ return &ApiError{
+ Typ: basemodel.ErrorInternal,
+ Err: err,
+ }
+}
+
+var (
+ ErrorNone basemodel.ErrorType = ""
+ ErrorTimeout basemodel.ErrorType = "timeout"
+ ErrorCanceled basemodel.ErrorType = "canceled"
+ ErrorExec basemodel.ErrorType = "execution"
+ ErrorBadData basemodel.ErrorType = "bad_data"
+ ErrorInternal basemodel.ErrorType = "internal"
+ ErrorUnavailable basemodel.ErrorType = "unavailable"
+ ErrorNotFound basemodel.ErrorType = "not_found"
+ ErrorNotImplemented basemodel.ErrorType = "not_implemented"
+ ErrorUnauthorized basemodel.ErrorType = "unauthorized"
+ ErrorForbidden basemodel.ErrorType = "forbidden"
+ ErrorConflict basemodel.ErrorType = "conflict"
+ ErrorStreamingNotSupported basemodel.ErrorType = "streaming is not supported"
+)
+
+func init() {
+ ErrorNone = basemodel.ErrorNone
+ ErrorTimeout = basemodel.ErrorTimeout
+ ErrorCanceled = basemodel.ErrorCanceled
+ ErrorExec = basemodel.ErrorExec
+ ErrorBadData = basemodel.ErrorBadData
+ ErrorInternal = basemodel.ErrorInternal
+ ErrorUnavailable = basemodel.ErrorUnavailable
+ ErrorNotFound = basemodel.ErrorNotFound
+ ErrorNotImplemented = basemodel.ErrorNotImplemented
+ ErrorUnauthorized = basemodel.ErrorUnauthorized
+ ErrorForbidden = basemodel.ErrorForbidden
+ ErrorConflict = basemodel.ErrorConflict
+ ErrorStreamingNotSupported = basemodel.ErrorStreamingNotSupported
+}
+
+type ErrUnsupportedAuth struct{}
+
+func (errUnsupportedAuth ErrUnsupportedAuth) Error() string {
+ return "this authentication method not supported"
+}
diff --git a/ee/query-service/model/license.go b/ee/query-service/model/license.go
new file mode 100644
index 0000000000..e1e6a997da
--- /dev/null
+++ b/ee/query-service/model/license.go
@@ -0,0 +1,91 @@
+package model
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "time"
+
+ "github.com/pkg/errors"
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+type License struct {
+ Key string `json:"key" db:"key"`
+ ActivationId string `json:"activationId" db:"activationId"`
+ CreatedAt time.Time `db:"created_at"`
+
+ // PlanDetails contains the encrypted plan info
+ PlanDetails string `json:"planDetails" db:"planDetails"`
+
+ // stores parsed license details
+ LicensePlan
+
+ FeatureSet basemodel.FeatureSet
+
+ // populated in case license has any errors
+ ValidationMessage string `db:"validationMessage"`
+
+ // used only for sending details to front-end
+ IsCurrent bool `json:"isCurrent"`
+}
+
+func (l *License) MarshalJSON() ([]byte, error) {
+
+ return json.Marshal(&struct {
+ Key string `json:"key" db:"key"`
+ ActivationId string `json:"activationId" db:"activationId"`
+ ValidationMessage string `db:"validationMessage"`
+ IsCurrent bool `json:"isCurrent"`
+ PlanKey string `json:"planKey"`
+ ValidFrom time.Time `json:"ValidFrom"`
+ ValidUntil time.Time `json:"ValidUntil"`
+ Status string `json:"status"`
+ }{
+ Key: l.Key,
+ ActivationId: l.ActivationId,
+ IsCurrent: l.IsCurrent,
+ PlanKey: l.PlanKey,
+ ValidFrom: time.Unix(l.ValidFrom, 0),
+ ValidUntil: time.Unix(l.ValidUntil, 0),
+ Status: l.Status,
+ ValidationMessage: l.ValidationMessage,
+ })
+}
+
+type LicensePlan struct {
+ PlanKey string `json:"planKey"`
+ ValidFrom int64 `json:"validFrom"`
+ ValidUntil int64 `json:"validUntil"`
+ Status string `json:"status"`
+}
+
+func (l *License) ParsePlan() error {
+ l.LicensePlan = LicensePlan{}
+
+ planData, err := base64.StdEncoding.DecodeString(l.PlanDetails)
+ if err != nil {
+ return err
+ }
+
+ plan := LicensePlan{}
+ err = json.Unmarshal([]byte(planData), &plan)
+ if err != nil {
+ l.ValidationMessage = "failed to parse plan from license"
+ return errors.Wrap(err, "failed to parse plan from license")
+ }
+
+ l.LicensePlan = plan
+ l.ParseFeatures()
+ return nil
+}
+
+func (l *License) ParseFeatures() {
+ switch l.PlanKey {
+ case Pro:
+ l.FeatureSet = ProPlan
+ case Enterprise:
+ l.FeatureSet = EnterprisePlan
+ default:
+ l.FeatureSet = BasicPlan
+ }
+}
diff --git a/ee/query-service/model/plans.go b/ee/query-service/model/plans.go
new file mode 100644
index 0000000000..e68217730a
--- /dev/null
+++ b/ee/query-service/model/plans.go
@@ -0,0 +1,27 @@
+package model
+
+import (
+ basemodel "go.signoz.io/signoz/pkg/query-service/model"
+)
+
+const SSO = "SSO"
+const Basic = "BASIC_PLAN"
+const Pro = "PRO_PLAN"
+const Enterprise = "ENTERPRISE_PLAN"
+const DisableUpsell = "DISABLE_UPSELL"
+
+var BasicPlan = basemodel.FeatureSet{
+ Basic: true,
+ SSO: false,
+ DisableUpsell: false,
+}
+
+var ProPlan = basemodel.FeatureSet{
+ Pro: true,
+ SSO: true,
+}
+
+var EnterprisePlan = basemodel.FeatureSet{
+ Enterprise: true,
+ SSO: true,
+}
diff --git a/ee/query-service/model/usage.go b/ee/query-service/model/usage.go
new file mode 100644
index 0000000000..7d6eec91cc
--- /dev/null
+++ b/ee/query-service/model/usage.go
@@ -0,0 +1,35 @@
+package model
+
+import (
+ "time"
+
+ "github.com/google/uuid"
+)
+
+type UsageSnapshot struct {
+ CurrentLogSizeBytes uint64 `json:"currentLogSizeBytes"`
+ CurrentLogSizeBytesColdStorage uint64 `json:"currentLogSizeBytesColdStorage"`
+ CurrentSpansCount uint64 `json:"currentSpansCount"`
+ CurrentSpansCountColdStorage uint64 `json:"currentSpansCountColdStorage"`
+ CurrentSamplesCount uint64 `json:"currentSamplesCount"`
+ CurrentSamplesCountColdStorage uint64 `json:"currentSamplesCountColdStorage"`
+}
+
+type UsageBase struct {
+ Id uuid.UUID `json:"id" db:"id"`
+ InstallationId uuid.UUID `json:"installationId" db:"installation_id"`
+ ActivationId uuid.UUID `json:"activationId" db:"activation_id"`
+ CreatedAt time.Time `json:"createdAt" db:"created_at"`
+ FailedSyncRequest int `json:"failedSyncRequest" db:"failed_sync_request_count"`
+}
+
+type UsagePayload struct {
+ UsageBase
+ Metrics UsageSnapshot `json:"metrics"`
+ SnapshotDate time.Time `json:"snapshotDate"`
+}
+
+type Usage struct {
+ UsageBase
+ Snapshot string `db:"snapshot"`
+}
diff --git a/ee/query-service/saml/request.go b/ee/query-service/saml/request.go
new file mode 100644
index 0000000000..01af7afe28
--- /dev/null
+++ b/ee/query-service/saml/request.go
@@ -0,0 +1,107 @@
+package saml
+
+import (
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "strings"
+
+ saml2 "github.com/russellhaering/gosaml2"
+ dsig "github.com/russellhaering/goxmldsig"
+ "go.signoz.io/signoz/pkg/query-service/constants"
+ "go.uber.org/zap"
+)
+
+func LoadCertificateStore(certString string) (dsig.X509CertificateStore, error) {
+ certStore := &dsig.MemoryX509CertificateStore{
+ Roots: []*x509.Certificate{},
+ }
+
+ certData, err := base64.StdEncoding.DecodeString(certString)
+ if err != nil {
+ return certStore, fmt.Errorf(fmt.Sprintf("failed to read certificate: %v", err))
+ }
+
+ idpCert, err := x509.ParseCertificate(certData)
+ if err != nil {
+ return certStore, fmt.Errorf(fmt.Sprintf("failed to prepare saml request, invalid cert: %s", err.Error()))
+ }
+
+ certStore.Roots = append(certStore.Roots, idpCert)
+
+ return certStore, nil
+}
+
+func LoadCertFromPem(certString string) (dsig.X509CertificateStore, error) {
+ certStore := &dsig.MemoryX509CertificateStore{
+ Roots: []*x509.Certificate{},
+ }
+
+ block, _ := pem.Decode([]byte(certString))
+ if block == nil {
+ return certStore, fmt.Errorf("no valid pem cert found")
+ }
+
+ idpCert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return certStore, fmt.Errorf(fmt.Sprintf("failed to parse pem cert: %s", err.Error()))
+ }
+
+ certStore.Roots = append(certStore.Roots, idpCert)
+
+ return certStore, nil
+}
+
+// PrepareRequest prepares authorization URL (Idp Provider URL)
+func PrepareRequest(issuer, acsUrl, audience, entity, idp, certString string) (*saml2.SAMLServiceProvider, error) {
+ var certStore dsig.X509CertificateStore
+ if certString == "" {
+ return nil, fmt.Errorf("invalid certificate data")
+ }
+
+ var err error
+ if strings.Contains(certString, "-----BEGIN CERTIFICATE-----") {
+ certStore, err = LoadCertFromPem(certString)
+ } else {
+ certStore, err = LoadCertificateStore(certString)
+ }
+ // certificate store can not be created, throw error
+ if err != nil {
+ return nil, err
+ }
+
+ randomKeyStore := dsig.RandomKeyStoreForTest()
+
+ // SIGNOZ_SAML_RETURN_URL env var would support overriding window.location
+ // as return destination after saml request is complete from IdP side.
+ // this var is also useful for development, as it is easy to override with backend endpoint
+ // e.g. http://localhost:8080/api/v1/complete/saml
+ acsUrl = constants.GetOrDefaultEnv("SIGNOZ_SAML_RETURN_URL", acsUrl)
+
+ sp := &saml2.SAMLServiceProvider{
+ IdentityProviderSSOURL: idp,
+ IdentityProviderIssuer: entity,
+ ServiceProviderIssuer: issuer,
+ AssertionConsumerServiceURL: acsUrl,
+ SignAuthnRequests: true,
+ AllowMissingAttributes: true,
+
+ // about cert stores -sender(signoz app) and receiver (idp)
+ // The random key (random key store) is sender cert. The public cert store(IDPCertificateStore) that you see on org domain is receiver cert (idp provided).
+ // At the moment, the library we use doesn't bother about sender cert and IdP too. It just adds additional layer of security, which we can explore in future versions
+ // The receiver (Idp) cert will be different for each org domain. Imagine cloud setup where each company setups their domain that integrates with their Idp.
+ // @signoz.io
+ // @next.io
+ // Each of above will have their own Idp setup and hence separate public cert to decrypt the response.
+ // The way SAML request travels is -
+ // SigNoz Backend -> IdP Login Screen -> SigNoz Backend -> SigNoz Frontend
+ // ---------------- | -------------------| -------------------------------------
+ // The dotted lines indicate request boundries. So if you notice, the response from Idp starts a new request. hence we need relay state to pass the context around.
+
+ IDPCertificateStore: certStore,
+ SPKeyStore: randomKeyStore,
+ }
+ zap.S().Debugf("SAML request:", sp)
+ return sp, nil
+}
diff --git a/ee/query-service/usage/manager.go b/ee/query-service/usage/manager.go
new file mode 100644
index 0000000000..067b65e0b4
--- /dev/null
+++ b/ee/query-service/usage/manager.go
@@ -0,0 +1,317 @@
+package usage
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/ClickHouse/clickhouse-go/v2"
+ "github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
+ "go.uber.org/zap"
+
+ licenseserver "go.signoz.io/signoz/ee/query-service/integrations/signozio"
+ "go.signoz.io/signoz/ee/query-service/license"
+ "go.signoz.io/signoz/ee/query-service/model"
+ "go.signoz.io/signoz/ee/query-service/usage/repository"
+ "go.signoz.io/signoz/pkg/query-service/utils/encryption"
+)
+
+const (
+ MaxRetries = 3
+ RetryInterval = 5 * time.Second
+ stateUnlocked uint32 = 0
+ stateLocked uint32 = 1
+)
+
+var (
+ // collect usage every hour
+ collectionFrequency = 1 * time.Hour
+
+ // send usage every 24 hour
+ uploadFrequency = 24 * time.Hour
+
+ locker = stateUnlocked
+)
+
+type Manager struct {
+ repository *repository.Repository
+
+ clickhouseConn clickhouse.Conn
+
+ licenseRepo *license.Repo
+
+ // end the usage routine, this is important to gracefully
+ // stopping usage reporting and protect in-consistent updates
+ done chan struct{}
+
+ // terminated waits for the UsageExporter go routine to end
+ terminated chan struct{}
+}
+
+func New(dbType string, db *sqlx.DB, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn) (*Manager, error) {
+ repo := repository.New(db)
+
+ err := repo.Init(dbType)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initiate usage repo: %v", err)
+ }
+
+ m := &Manager{
+ repository: repo,
+ clickhouseConn: clickhouseConn,
+ licenseRepo: licenseRepo,
+ }
+ return m, nil
+}
+
+// start loads collects and exports any exported snapshot and starts the exporter
+func (lm *Manager) Start() error {
+ // compares the locker and stateUnlocked if both are same lock is applied else returns error
+ if !atomic.CompareAndSwapUint32(&locker, stateUnlocked, stateLocked) {
+ return fmt.Errorf("usage exporter is locked")
+ }
+
+ // check if license is present or not
+ license, err := lm.licenseRepo.GetActiveLicense(context.Background())
+ if err != nil {
+ return fmt.Errorf("failed to get active license")
+ }
+ if license == nil {
+ // we will not start the usage reporting if license is not present.
+ zap.S().Info("no license present, skipping usage reporting")
+ return nil
+ }
+
+ // upload previous snapshots if any
+ err = lm.UploadUsage(context.Background())
+ if err != nil {
+ return err
+ }
+
+ // collect snapshot if incase it wasn't collect in (t - collectionFrequency)
+ err = lm.CollectCurrentUsage(context.Background())
+ if err != nil {
+ return err
+ }
+
+ go lm.UsageExporter(context.Background())
+
+ return nil
+}
+
+// CollectCurrentUsage checks if needs to collect usage data
+func (lm *Manager) CollectCurrentUsage(ctx context.Context) error {
+ // check the DB if anything exist where timestamp > t - collectionFrequency
+ ts := time.Now().Add(-collectionFrequency)
+ alreadyCreated, err := lm.repository.CheckSnapshotGtCreatedAt(ctx, ts)
+ if err != nil {
+ return err
+ }
+ if !alreadyCreated {
+ zap.S().Info("Collecting current usage")
+ exportError := lm.CollectAndStoreUsage(ctx)
+ if exportError != nil {
+ return exportError
+ }
+ } else {
+ zap.S().Info("Nothing to collect")
+ }
+ return nil
+}
+
+func (lm *Manager) UsageExporter(ctx context.Context) {
+ defer close(lm.terminated)
+
+ collectionTicker := time.NewTicker(collectionFrequency)
+ defer collectionTicker.Stop()
+
+ uploadTicker := time.NewTicker(uploadFrequency)
+ defer uploadTicker.Stop()
+
+ for {
+ select {
+ case <-lm.done:
+ return
+ case <-collectionTicker.C:
+ lm.CollectAndStoreUsage(ctx)
+ case <-uploadTicker.C:
+ lm.UploadUsage(ctx)
+ // remove the old snapshots
+ lm.repository.DropOldSnapshots(ctx)
+ }
+ }
+}
+
+type TableSize struct {
+ Table string `ch:"table"`
+ DiskName string `ch:"disk_name"`
+ Rows uint64 `ch:"rows"`
+ UncompressedBytes uint64 `ch:"uncompressed_bytes"`
+}
+
+func (lm *Manager) CollectAndStoreUsage(ctx context.Context) error {
+ snap, err := lm.GetUsageFromClickHouse(ctx)
+ if err != nil {
+ return err
+ }
+
+ license, err := lm.licenseRepo.GetActiveLicense(ctx)
+ if err != nil {
+ return err
+ }
+
+ activationId, _ := uuid.Parse(license.ActivationId)
+ // TODO (nitya) : Add installation ID in the payload
+ payload := model.UsagePayload{
+ UsageBase: model.UsageBase{
+ ActivationId: activationId,
+ FailedSyncRequest: 0,
+ },
+ Metrics: *snap,
+ SnapshotDate: time.Now(),
+ }
+
+ err = lm.repository.InsertSnapshot(ctx, &payload)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (lm *Manager) GetUsageFromClickHouse(ctx context.Context) (*model.UsageSnapshot, error) {
+ tableSizes := []TableSize{}
+ snap := model.UsageSnapshot{}
+
+ // get usage from clickhouse
+ query := `
+ SELECT
+ table,
+ disk_name,
+ sum(rows) as rows,
+ sum(data_uncompressed_bytes) AS uncompressed_bytes
+ FROM system.parts
+ WHERE active AND (database in ('signoz_logs', 'signoz_metrics', 'signoz_traces')) AND (table in ('logs','samples_v2', 'signoz_index_v2'))
+ GROUP BY
+ table,
+ disk_name
+ ORDER BY table
+ `
+ err := lm.clickhouseConn.Select(ctx, &tableSizes, query)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, val := range tableSizes {
+ switch val.Table {
+ case "logs":
+ if val.DiskName == "default" {
+ snap.CurrentLogSizeBytes = val.UncompressedBytes
+ } else {
+ snap.CurrentLogSizeBytesColdStorage = val.UncompressedBytes
+ }
+ case "samples_v2":
+ if val.DiskName == "default" {
+ snap.CurrentSamplesCount = val.Rows
+ } else {
+ snap.CurrentSamplesCountColdStorage = val.Rows
+ }
+ case "signoz_index_v2":
+ if val.DiskName == "default" {
+ snap.CurrentSpansCount = val.Rows
+ } else {
+ snap.CurrentSpansCountColdStorage = val.Rows
+ }
+ }
+ }
+
+ return &snap, nil
+}
+
+func (lm *Manager) UploadUsage(ctx context.Context) error {
+ snapshots, err := lm.repository.GetSnapshotsNotSynced(ctx)
+ if err != nil {
+ return err
+ }
+
+ if len(snapshots) <= 0 {
+ zap.S().Info("no snapshots to upload, skipping.")
+ return nil
+ }
+
+ zap.S().Info("uploading snapshots")
+ for _, snap := range snapshots {
+ metricsBytes, err := encryption.Decrypt([]byte(snap.ActivationId.String()[:32]), []byte(snap.Snapshot))
+ if err != nil {
+ return err
+ }
+
+ metrics := model.UsageSnapshot{}
+ err = json.Unmarshal(metricsBytes, &metrics)
+ if err != nil {
+ return err
+ }
+
+ err = lm.UploadUsageWithExponentalBackOff(ctx, model.UsagePayload{
+ UsageBase: model.UsageBase{
+ Id: snap.Id,
+ InstallationId: snap.InstallationId,
+ ActivationId: snap.ActivationId,
+ FailedSyncRequest: snap.FailedSyncRequest,
+ },
+ SnapshotDate: snap.CreatedAt,
+ Metrics: metrics,
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (lm *Manager) UploadUsageWithExponentalBackOff(ctx context.Context, payload model.UsagePayload) error {
+ for i := 1; i <= MaxRetries; i++ {
+ apiErr := licenseserver.SendUsage(ctx, &payload)
+ if apiErr != nil && i == MaxRetries {
+ err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
+ if err != nil {
+ zap.S().Errorf("failed to updated the failure count for snapshot in DB : ", zap.Error(err))
+ return err
+ }
+ zap.S().Errorf("retries stopped : %v", zap.Error(err))
+ // not returning error here since it is captured in the failed count
+ return nil
+ } else if apiErr != nil {
+ // sleeping for exponential backoff
+ sleepDuration := RetryInterval * time.Duration(i)
+ zap.S().Errorf("failed to upload snapshot retrying after %v secs : %v", sleepDuration.Seconds(), zap.Error(apiErr.Err))
+ time.Sleep(sleepDuration)
+
+ // update the failed request count
+ err := lm.repository.IncrementFailedRequestCount(ctx, payload.Id)
+ if err != nil {
+ zap.S().Errorf("failed to updated the failure count for snapshot in DB : %v", zap.Error(err))
+ return err
+ }
+ } else {
+ break
+ }
+ }
+
+ // update the database that it is synced
+ err := lm.repository.MoveToSynced(ctx, payload.Id)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (lm *Manager) Stop() {
+ close(lm.done)
+ atomic.StoreUint32(&locker, stateUnlocked)
+ <-lm.terminated
+}
diff --git a/ee/query-service/usage/repository/repository.go b/ee/query-service/usage/repository/repository.go
new file mode 100644
index 0000000000..57bf5388b6
--- /dev/null
+++ b/ee/query-service/usage/repository/repository.go
@@ -0,0 +1,139 @@
+package repository
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
+ "go.uber.org/zap"
+
+ "go.signoz.io/signoz/ee/query-service/model"
+ "go.signoz.io/signoz/ee/query-service/usage/sqlite"
+ "go.signoz.io/signoz/pkg/query-service/utils/encryption"
+)
+
+const (
+ MaxFailedSyncCount = 9 // a snapshot will be ignored if the max failed count is greater than or equal to 9
+ SnapShotLife = 3 * 24 * time.Hour
+)
+
+// Repository is usage Repository which stores usage snapshot in a secured DB
+type Repository struct {
+ db *sqlx.DB
+}
+
+// New initiates a new usage Repository
+func New(db *sqlx.DB) *Repository {
+ return &Repository{
+ db: db,
+ }
+}
+
+func (r *Repository) Init(engine string) error {
+ switch engine {
+ case "sqlite3", "sqlite":
+ return sqlite.InitDB(r.db)
+ default:
+ return fmt.Errorf("unsupported db")
+ }
+}
+
+func (r *Repository) InsertSnapshot(ctx context.Context, usage *model.UsagePayload) error {
+
+ snapshotBytes, err := json.Marshal(usage.Metrics)
+ if err != nil {
+ return err
+ }
+
+ usage.Id = uuid.New()
+
+ encryptedSnapshot, err := encryption.Encrypt([]byte(usage.ActivationId.String()[:32]), snapshotBytes)
+ if err != nil {
+ return err
+ }
+
+ query := `INSERT INTO usage(id, activation_id, snapshot)
+ VALUES ($1, $2, $3)`
+ _, err = r.db.ExecContext(ctx,
+ query,
+ usage.Id,
+ usage.ActivationId,
+ string(encryptedSnapshot),
+ )
+ if err != nil {
+ zap.S().Errorf("error inserting usage data: %v", zap.Error(err))
+ return fmt.Errorf("failed to insert usage in db: %v", err)
+ }
+ return nil
+}
+
+func (r *Repository) MoveToSynced(ctx context.Context, id uuid.UUID) error {
+
+ query := `UPDATE usage
+ SET synced = 'true',
+ synced_at = $1
+ WHERE id = $2`
+
+ _, err := r.db.ExecContext(ctx, query, time.Now(), id)
+
+ if err != nil {
+ zap.S().Errorf("error in updating usage: %v", zap.Error(err))
+ return fmt.Errorf("failed to update usage in db: %v", err)
+ }
+
+ return nil
+}
+
+func (r *Repository) IncrementFailedRequestCount(ctx context.Context, id uuid.UUID) error {
+
+ query := `UPDATE usage SET failed_sync_request_count = failed_sync_request_count + 1 WHERE id = $1`
+ _, err := r.db.ExecContext(ctx, query, id)
+ if err != nil {
+ zap.S().Errorf("error in updating usage: %v", zap.Error(err))
+ return fmt.Errorf("failed to update usage in db: %v", err)
+ }
+
+ return nil
+}
+
+func (r *Repository) GetSnapshotsNotSynced(ctx context.Context) ([]*model.Usage, error) {
+ snapshots := []*model.Usage{}
+
+ query := `SELECT id,created_at, activation_id, snapshot, failed_sync_request_count from usage where synced!='true' and failed_sync_request_count < $1 order by created_at asc `
+
+ err := r.db.SelectContext(ctx, &snapshots, query, MaxFailedSyncCount)
+ if err != nil {
+ return nil, err
+ }
+
+ return snapshots, nil
+}
+
+func (r *Repository) DropOldSnapshots(ctx context.Context) error {
+ query := `delete from usage where created_at <= $1`
+
+ _, err := r.db.ExecContext(ctx, query, time.Now().Add(-(SnapShotLife)))
+ if err != nil {
+ zap.S().Errorf("failed to remove old snapshots from db: %v", zap.Error(err))
+ return err
+ }
+
+ return nil
+}
+
+// CheckSnapshotGtCreatedAt checks if there is any snapshot greater than the provided timestamp
+func (r *Repository) CheckSnapshotGtCreatedAt(ctx context.Context, ts time.Time) (bool, error) {
+
+ var snapshots uint64
+ query := `SELECT count() from usage where created_at > '$1'`
+
+ err := r.db.QueryRowContext(ctx, query, ts).Scan(&snapshots)
+ if err != nil {
+ return false, err
+ }
+
+ return snapshots > 0, err
+}
diff --git a/ee/query-service/usage/sqlite/init.go b/ee/query-service/usage/sqlite/init.go
new file mode 100644
index 0000000000..4fefa644ae
--- /dev/null
+++ b/ee/query-service/usage/sqlite/init.go
@@ -0,0 +1,32 @@
+package sqlite
+
+import (
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+)
+
+func InitDB(db *sqlx.DB) error {
+ var err error
+ if db == nil {
+ return fmt.Errorf("invalid db connection")
+ }
+
+ table_schema := `CREATE TABLE IF NOT EXISTS usage(
+ id UUID PRIMARY KEY,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ activation_id UUID,
+ snapshot TEXT,
+ synced BOOLEAN DEFAULT 'false',
+ synced_at TIMESTAMP,
+ failed_sync_request_count INTEGER DEFAULT 0
+ );
+ `
+
+ _, err = db.Exec(table_schema)
+ if err != nil {
+ return fmt.Errorf("error in creating usage table: %v", err.Error())
+ }
+ return nil
+}
diff --git a/frontend/conf/default.conf b/frontend/conf/default.conf
index 37b77463d4..8c1eafe956 100644
--- a/frontend/conf/default.conf
+++ b/frontend/conf/default.conf
@@ -13,7 +13,7 @@ server {
# to handle uri issue 414 from nginx
client_max_body_size 24M;
- large_client_header_buffers 8 16k;
+ large_client_header_buffers 8 128k;
location / {
root /usr/share/nginx/html;
diff --git a/frontend/public/locales/en-GB/licenses.json b/frontend/public/locales/en-GB/licenses.json
new file mode 100644
index 0000000000..5d46685f9d
--- /dev/null
+++ b/frontend/public/locales/en-GB/licenses.json
@@ -0,0 +1,13 @@
+{
+ "column_license_key": "License Key",
+ "column_valid_from": "Valid From",
+ "column_valid_until": "Valid Until",
+ "column_license_status": "Status",
+ "button_apply": "Apply",
+ "placeholder_license_key": "Enter a License Key",
+ "tab_current_license": "Current License",
+ "tab_license_history": "History",
+ "loading_licenses": "Loading licenses...",
+ "enter_license_key": "Please enter a license key",
+ "license_applied": "License applied successfully, please refresh the page to see changes."
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en-GB/login.json b/frontend/public/locales/en-GB/login.json
new file mode 100644
index 0000000000..84c9e1d3a9
--- /dev/null
+++ b/frontend/public/locales/en-GB/login.json
@@ -0,0 +1,22 @@
+{
+ "label_email": "Email",
+ "placeholder_email": "name@yourcompany.com",
+ "label_password": "Password",
+ "button_initiate_login": "Next",
+ "button_login": "Login",
+ "login_page_title": "Login with SigNoz",
+ "login_with_sso": "Login with SSO",
+ "login_with_pwd": "Login with password",
+ "forgot_password": "Forgot password?",
+ "create_an_account": "Create an account",
+ "prompt_if_admin": "If you are admin,",
+ "prompt_create_account": "If you are setting up SigNoz for the first time,",
+ "prompt_no_account": "Don't have an account? Contact your admin to send you an invite link.",
+ "prompt_forgot_password": "Ask your admin to reset your password and send you a new invite link",
+ "prompt_on_sso_error": "Are you trying to resolve SSO configuration issue?",
+ "unexpected_error": "Sorry, something went wrong",
+ "failed_to_login": "sorry, failed to login",
+ "invalid_email": "Please enter a valid email address",
+ "invalid_account": "This account does not exist. To create a new account, contact your admin to get an invite link",
+ "invalid_config": "Invalid configuration detected, please contact your administrator"
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en-GB/organizationsettings.json b/frontend/public/locales/en-GB/organizationsettings.json
index 74797b447b..7daaf5c781 100644
--- a/frontend/public/locales/en-GB/organizationsettings.json
+++ b/frontend/public/locales/en-GB/organizationsettings.json
@@ -9,5 +9,10 @@
"add_another_team_member": "Add another team member",
"invite_team_members": "Invite team members",
"invite_members": "Invite Members",
- "pending_invites": "Pending Invites"
+ "pending_invites": "Pending Invites",
+ "authenticated_domains": "Authenticated Domains",
+ "delete_domain_message": "Are you sure you want to delete this domain?",
+ "delete_domain": "Delete Domain",
+ "add_domain": "Add Domains",
+ "saml_settings":"Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly"
}
diff --git a/frontend/public/locales/en-GB/signup.json b/frontend/public/locales/en-GB/signup.json
new file mode 100644
index 0000000000..f5657a07d2
--- /dev/null
+++ b/frontend/public/locales/en-GB/signup.json
@@ -0,0 +1,18 @@
+{
+ "label_email": "Email",
+ "placeholder_email": "name@yourcompany.com",
+ "label_password": "Password",
+ "label_confirm_password": "Confirm Password",
+ "label_firstname": "First Name",
+ "placeholder_firstname": "Your Name",
+ "label_orgname": "Organization Name",
+ "placeholder_orgname": "Your Company",
+ "prompt_keepme_posted": "Keep me updated on new SigNoz features",
+ "prompt_anonymise": "Anonymise my usage date. We collect data to measure product usage",
+ "failed_confirm_password": "Passwords don’t match. Please try again",
+ "unexpected_error": "Something went wrong",
+ "failed_to_initiate_login": "Signup completed but failed to initiate login",
+ "token_required": "Invite token is required for signup, please request one from your admin",
+ "button_get_started": "Get Started",
+ "prompt_admin_warning": "This will create an admin account. If you are not an admin, please ask your admin for an invite link"
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en-GB/translation.json b/frontend/public/locales/en-GB/translation.json
index 682f034e69..c37480259b 100644
--- a/frontend/public/locales/en-GB/translation.json
+++ b/frontend/public/locales/en-GB/translation.json
@@ -6,7 +6,7 @@
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
- "stale_version": "You are on an older version and may be loosing on the latest features we have shipped. We recommend to upgrade to the latest version",
+ "stale_version": "You are on an older version and may be losing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {
diff --git a/frontend/public/locales/en/licenses.json b/frontend/public/locales/en/licenses.json
new file mode 100644
index 0000000000..5d46685f9d
--- /dev/null
+++ b/frontend/public/locales/en/licenses.json
@@ -0,0 +1,13 @@
+{
+ "column_license_key": "License Key",
+ "column_valid_from": "Valid From",
+ "column_valid_until": "Valid Until",
+ "column_license_status": "Status",
+ "button_apply": "Apply",
+ "placeholder_license_key": "Enter a License Key",
+ "tab_current_license": "Current License",
+ "tab_license_history": "History",
+ "loading_licenses": "Loading licenses...",
+ "enter_license_key": "Please enter a license key",
+ "license_applied": "License applied successfully, please refresh the page to see changes."
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en/login.json b/frontend/public/locales/en/login.json
new file mode 100644
index 0000000000..84c9e1d3a9
--- /dev/null
+++ b/frontend/public/locales/en/login.json
@@ -0,0 +1,22 @@
+{
+ "label_email": "Email",
+ "placeholder_email": "name@yourcompany.com",
+ "label_password": "Password",
+ "button_initiate_login": "Next",
+ "button_login": "Login",
+ "login_page_title": "Login with SigNoz",
+ "login_with_sso": "Login with SSO",
+ "login_with_pwd": "Login with password",
+ "forgot_password": "Forgot password?",
+ "create_an_account": "Create an account",
+ "prompt_if_admin": "If you are admin,",
+ "prompt_create_account": "If you are setting up SigNoz for the first time,",
+ "prompt_no_account": "Don't have an account? Contact your admin to send you an invite link.",
+ "prompt_forgot_password": "Ask your admin to reset your password and send you a new invite link",
+ "prompt_on_sso_error": "Are you trying to resolve SSO configuration issue?",
+ "unexpected_error": "Sorry, something went wrong",
+ "failed_to_login": "sorry, failed to login",
+ "invalid_email": "Please enter a valid email address",
+ "invalid_account": "This account does not exist. To create a new account, contact your admin to get an invite link",
+ "invalid_config": "Invalid configuration detected, please contact your administrator"
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en/organizationsettings.json b/frontend/public/locales/en/organizationsettings.json
index 74797b447b..7daaf5c781 100644
--- a/frontend/public/locales/en/organizationsettings.json
+++ b/frontend/public/locales/en/organizationsettings.json
@@ -9,5 +9,10 @@
"add_another_team_member": "Add another team member",
"invite_team_members": "Invite team members",
"invite_members": "Invite Members",
- "pending_invites": "Pending Invites"
+ "pending_invites": "Pending Invites",
+ "authenticated_domains": "Authenticated Domains",
+ "delete_domain_message": "Are you sure you want to delete this domain?",
+ "delete_domain": "Delete Domain",
+ "add_domain": "Add Domains",
+ "saml_settings":"Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly"
}
diff --git a/frontend/public/locales/en/signup.json b/frontend/public/locales/en/signup.json
new file mode 100644
index 0000000000..f5657a07d2
--- /dev/null
+++ b/frontend/public/locales/en/signup.json
@@ -0,0 +1,18 @@
+{
+ "label_email": "Email",
+ "placeholder_email": "name@yourcompany.com",
+ "label_password": "Password",
+ "label_confirm_password": "Confirm Password",
+ "label_firstname": "First Name",
+ "placeholder_firstname": "Your Name",
+ "label_orgname": "Organization Name",
+ "placeholder_orgname": "Your Company",
+ "prompt_keepme_posted": "Keep me updated on new SigNoz features",
+ "prompt_anonymise": "Anonymise my usage date. We collect data to measure product usage",
+ "failed_confirm_password": "Passwords don’t match. Please try again",
+ "unexpected_error": "Something went wrong",
+ "failed_to_initiate_login": "Signup completed but failed to initiate login",
+ "token_required": "Invite token is required for signup, please request one from your admin",
+ "button_get_started": "Get Started",
+ "prompt_admin_warning": "This will create an admin account. If you are not an admin, please ask your admin for an invite link"
+}
\ No newline at end of file
diff --git a/frontend/public/locales/en/translation.json b/frontend/public/locales/en/translation.json
index 682f034e69..c37480259b 100644
--- a/frontend/public/locales/en/translation.json
+++ b/frontend/public/locales/en/translation.json
@@ -6,7 +6,7 @@
"release_notes": "Release Notes",
"read_how_to_upgrade": "Read instructions on how to upgrade",
"latest_version_signoz": "You are running the latest version of SigNoz.",
- "stale_version": "You are on an older version and may be loosing on the latest features we have shipped. We recommend to upgrade to the latest version",
+ "stale_version": "You are on an older version and may be losing out on the latest features we have shipped. We recommend to upgrade to the latest version",
"oops_something_went_wrong_version": "Oops.. facing issues with fetching updated version information",
"n_a": "N/A",
"routes": {
diff --git a/frontend/src/AppRoutes/pageComponents.ts b/frontend/src/AppRoutes/pageComponents.ts
index c83dfcd991..0b241fa121 100644
--- a/frontend/src/AppRoutes/pageComponents.ts
+++ b/frontend/src/AppRoutes/pageComponents.ts
@@ -119,3 +119,7 @@ export const SomethingWentWrong = Loadable(
/* webpackChunkName: "SomethingWentWrong" */ 'pages/SomethingWentWrong'
),
);
+
+export const LicensePage = Loadable(
+ () => import(/* webpackChunkName: "All Channels" */ 'pages/License'),
+);
diff --git a/frontend/src/AppRoutes/routes.ts b/frontend/src/AppRoutes/routes.ts
index 9bf52b39de..7210fd5928 100644
--- a/frontend/src/AppRoutes/routes.ts
+++ b/frontend/src/AppRoutes/routes.ts
@@ -12,6 +12,7 @@ import {
EditRulesPage,
ErrorDetails,
GettingStarted,
+ LicensePage,
ListAllALertsPage,
Login,
Logs,
@@ -166,6 +167,13 @@ const routes: AppRoutes[] = [
component: AllErrors,
key: 'ALL_ERROR',
},
+ {
+ path: ROUTES.LIST_LICENSES,
+ exact: true,
+ component: LicensePage,
+ isPrivate: true,
+ key: 'LIST_LICENSES',
+ },
{
path: ROUTES.ERROR_DETAIL,
exact: true,
diff --git a/frontend/src/api/SAML/deleteDomain.ts b/frontend/src/api/SAML/deleteDomain.ts
new file mode 100644
index 0000000000..50c2b51a80
--- /dev/null
+++ b/frontend/src/api/SAML/deleteDomain.ts
@@ -0,0 +1,24 @@
+import axios from 'api';
+import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
+import { AxiosError } from 'axios';
+import { ErrorResponse, SuccessResponse } from 'types/api';
+import { PayloadProps, Props } from 'types/api/SAML/deleteDomain';
+
+const deleteDomain = async (
+ props: Props,
+): Promise
;
+}
+
+interface ListLicensesProps {
+ licenses: PayloadProps;
+}
+
+export default ListLicenses;
diff --git a/frontend/src/container/Licenses/applyFormStyles.ts b/frontend/src/container/Licenses/applyFormStyles.ts
new file mode 100644
index 0000000000..634aa582e4
--- /dev/null
+++ b/frontend/src/container/Licenses/applyFormStyles.ts
@@ -0,0 +1,26 @@
+import { Form } from 'antd';
+import FormItem from 'antd/lib/form/FormItem';
+import styled from 'styled-components';
+
+export const ApplyFormContainer = styled.div`
+ &&& {
+ padding-top: 1em;
+ padding-bottom: 1em;
+ }
+`;
+
+export const ApplyForm = styled(Form)`
+ &&& {
+ width: 100%;
+ }
+`;
+
+export const LicenseInput = styled(FormItem)`
+ width: 200px;
+ &:focus {
+ width: 350px;
+ input {
+ width: 350px;
+ }
+ }
+`;
diff --git a/frontend/src/container/Licenses/index.tsx b/frontend/src/container/Licenses/index.tsx
new file mode 100644
index 0000000000..04b20b9927
--- /dev/null
+++ b/frontend/src/container/Licenses/index.tsx
@@ -0,0 +1,43 @@
+import { Tabs, Typography } from 'antd';
+import getAll from 'api/licenses/getAll';
+import Spinner from 'components/Spinner';
+import useFetch from 'hooks/useFetch';
+import React from 'react';
+import { useTranslation } from 'react-i18next';
+
+import ApplyLicenseForm from './ApplyLicenseForm';
+import ListLicenses from './ListLicenses';
+
+const { TabPane } = Tabs;
+
+function Licenses(): JSX.Element {
+ const { t } = useTranslation(['licenses']);
+ const { loading, payload, error, errorMessage } = useFetch(getAll);
+
+ if (error) {
+ return
record.name + v4()}
+ dataSource={!SSOFlag ? notEntripriseData : []}
+ columns={columns}
+ tableLayout="fixed"
+ />
+
+ );
+ }
+
+ const tableData = SSOFlag ? data?.payload || [] : notEntripriseData;
+
+ return (
+ <>
+
record.name + v4()}
+ />
+
+ >
+ );
+}
+
+export default AuthDomains;
diff --git a/frontend/src/container/OrganizationSettings/AuthDomains/styles.ts b/frontend/src/container/OrganizationSettings/AuthDomains/styles.ts
new file mode 100644
index 0000000000..26ebeec106
--- /dev/null
+++ b/frontend/src/container/OrganizationSettings/AuthDomains/styles.ts
@@ -0,0 +1,7 @@
+import styled from 'styled-components';
+
+export const Container = styled.div`
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+`;
diff --git a/frontend/src/container/OrganizationSettings/AuthDomains/utils.test.ts b/frontend/src/container/OrganizationSettings/AuthDomains/utils.test.ts
new file mode 100644
index 0000000000..e6903d4c5c
--- /dev/null
+++ b/frontend/src/container/OrganizationSettings/AuthDomains/utils.test.ts
@@ -0,0 +1,60 @@
+import { SAMLDomain } from 'types/api/SAML/listDomain';
+
+import { getIsValidCertificate } from './utils';
+
+const inValidCase: SAMLDomain['samlConfig'][] = [
+ {
+ samlCert: '',
+ samlEntity: '',
+ samlIdp: '',
+ },
+ {
+ samlCert: '',
+ samlEntity: '',
+ samlIdp: 'asd',
+ },
+ {
+ samlCert: 'sample certificate',
+ samlEntity: '',
+ samlIdp: '',
+ },
+ {
+ samlCert: 'sample cert',
+ samlEntity: 'sample entity',
+ samlIdp: '',
+ },
+];
+
+const validCase: SAMLDomain['samlConfig'][] = [
+ {
+ samlCert: 'sample cert',
+ samlEntity: 'sample entity',
+ samlIdp: 'sample idp',
+ },
+];
+
+describe('Utils', () => {
+ inValidCase.forEach((config) => {
+ it('should return invalid saml config', () => {
+ expect(
+ getIsValidCertificate({
+ samlCert: config.samlCert,
+ samlEntity: config.samlEntity,
+ samlIdp: config.samlIdp,
+ }),
+ ).toBe(false);
+ });
+ });
+
+ validCase.forEach((config) => {
+ it('should return invalid saml config', () => {
+ expect(
+ getIsValidCertificate({
+ samlCert: config.samlCert,
+ samlEntity: config.samlEntity,
+ samlIdp: config.samlIdp,
+ }),
+ ).toBe(true);
+ });
+ });
+});
diff --git a/frontend/src/container/OrganizationSettings/AuthDomains/utils.ts b/frontend/src/container/OrganizationSettings/AuthDomains/utils.ts
new file mode 100644
index 0000000000..17154cc917
--- /dev/null
+++ b/frontend/src/container/OrganizationSettings/AuthDomains/utils.ts
@@ -0,0 +1,6 @@
+export const getIsValidCertificate = (
+ config: Record