Merge pull request #4616 from SigNoz/release/v0.40.0

Release/v0.40.0
This commit is contained in:
Prashant Shahi 2024-02-28 19:46:06 +05:30 committed by GitHub
commit 71c4fcc382
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
129 changed files with 5389 additions and 292 deletions

View File

@ -152,6 +152,12 @@ jobs:
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env echo 'CLARITY_PROJECT_ID="${{ secrets.CLARITY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
- name: Install dependencies - name: Install dependencies
working-directory: frontend working-directory: frontend
run: yarn install run: yarn install

View File

@ -146,7 +146,7 @@ services:
condition: on-failure condition: on-failure
query-service: query-service:
image: signoz/query-service:0.39.1 image: signoz/query-service:0.40.0
command: command:
[ [
"-config=/root/config/prometheus.yml", "-config=/root/config/prometheus.yml",
@ -160,7 +160,7 @@ services:
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/ - ./data/signoz/:/var/lib/signoz/
environment: environment:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces - ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards - DASHBOARDS_PATH=/root/config/dashboards
@ -186,7 +186,7 @@ services:
<<: *db-depend <<: *db-depend
frontend: frontend:
image: signoz/frontend:0.39.1 image: signoz/frontend:0.40.0
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@ -199,7 +199,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:0.88.13 image: signoz/signoz-otel-collector:0.88.14
command: command:
[ [
"--config=/etc/otel-collector-config.yaml", "--config=/etc/otel-collector-config.yaml",
@ -237,7 +237,7 @@ services:
- query-service - query-service
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.88.13 image: signoz/signoz-schema-migrator:0.88.14
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure

View File

@ -66,7 +66,7 @@ services:
- --storage.path=/data - --storage.path=/data
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.13} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.14}
container_name: otel-migrator container_name: otel-migrator
command: command:
- "--dsn=tcp://clickhouse:9000" - "--dsn=tcp://clickhouse:9000"
@ -81,7 +81,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector: otel-collector:
container_name: signoz-otel-collector container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.88.13 image: signoz/signoz-otel-collector:0.88.14
command: command:
[ [
"--config=/etc/otel-collector-config.yaml", "--config=/etc/otel-collector-config.yaml",

View File

@ -164,7 +164,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service: query-service:
image: signoz/query-service:${DOCKER_TAG:-0.39.1} image: signoz/query-service:${DOCKER_TAG:-0.40.0}
container_name: signoz-query-service container_name: signoz-query-service
command: command:
[ [
@ -179,7 +179,7 @@ services:
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
- ./data/signoz/:/var/lib/signoz/ - ./data/signoz/:/var/lib/signoz/
environment: environment:
- ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces - ClickHouseUrl=tcp://clickhouse:9000
- ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/ - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards - DASHBOARDS_PATH=/root/config/dashboards
@ -203,7 +203,7 @@ services:
<<: *db-depend <<: *db-depend
frontend: frontend:
image: signoz/frontend:${DOCKER_TAG:-0.39.1} image: signoz/frontend:${DOCKER_TAG:-0.40.0}
container_name: signoz-frontend container_name: signoz-frontend
restart: on-failure restart: on-failure
depends_on: depends_on:
@ -215,7 +215,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator: otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.13} image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.14}
container_name: otel-migrator container_name: otel-migrator
command: command:
- "--dsn=tcp://clickhouse:9000" - "--dsn=tcp://clickhouse:9000"
@ -229,7 +229,7 @@ services:
otel-collector: otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.13} image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.14}
container_name: signoz-otel-collector container_name: signoz-otel-collector
command: command:
[ [

View File

@ -136,7 +136,7 @@ func (ah *APIHandler) getPATs(w http.ResponseWriter, r *http.Request) {
return return
} }
zap.S().Infof("Get PATs for user: %+v", user.Id) zap.S().Infof("Get PATs for user: %+v", user.Id)
pats, apierr := ah.AppDao().ListPATs(ctx, user.Id) pats, apierr := ah.AppDao().ListPATs(ctx)
if apierr != nil { if apierr != nil {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)
return return
@ -155,18 +155,7 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
}, nil) }, nil)
return return
} }
pat, apierr := ah.AppDao().GetPATByID(ctx, id)
if apierr != nil {
RespondError(w, apierr, nil)
return
}
if pat.UserID != user.Id {
RespondError(w, &model.ApiError{
Typ: model.ErrorUnauthorized,
Err: fmt.Errorf("unauthorized PAT revoke request"),
}, nil)
return
}
zap.S().Debugf("Revoke PAT with id: %+v", id) zap.S().Debugf("Revoke PAT with id: %+v", id)
if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil { if apierr := ah.AppDao().RevokePAT(ctx, id, user.Id); apierr != nil {
RespondError(w, apierr, nil) RespondError(w, apierr, nil)

View File

@ -39,6 +39,6 @@ type ModelDao interface {
UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError UpdatePATLastUsed(ctx context.Context, pat string, lastUsed int64) basemodel.BaseApiError
GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError) GetPATByID(ctx context.Context, id string) (*model.PAT, basemodel.BaseApiError)
GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError) GetUserByPAT(ctx context.Context, token string) (*basemodel.UserPayload, basemodel.BaseApiError)
ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError)
RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError RevokePAT(ctx context.Context, id string, userID string) basemodel.BaseApiError
} }

View File

@ -13,7 +13,7 @@ import (
func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) { func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basemodel.BaseApiError) {
result, err := m.DB().ExecContext(ctx, result, err := m.DB().ExecContext(ctx,
"INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", "INSERT INTO personal_access_tokens (user_id, token, role, name, created_at, expires_at, updated_at, updated_by_user_id, last_used, revoked) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
p.UserID, p.UserID,
p.Token, p.Token,
p.Role, p.Role,
@ -22,6 +22,8 @@ func (m *modelDao) CreatePAT(ctx context.Context, p model.PAT) (model.PAT, basem
p.ExpiresAt, p.ExpiresAt,
p.UpdatedAt, p.UpdatedAt,
p.UpdatedByUserID, p.UpdatedByUserID,
p.LastUsed,
p.Revoked,
) )
if err != nil { if err != nil {
zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err)) zap.S().Errorf("Failed to insert PAT in db, err: %v", zap.Error(err))
@ -78,11 +80,11 @@ func (m *modelDao) UpdatePATLastUsed(ctx context.Context, token string, lastUsed
return nil return nil
} }
func (m *modelDao) ListPATs(ctx context.Context, userID string) ([]model.PAT, basemodel.BaseApiError) { func (m *modelDao) ListPATs(ctx context.Context) ([]model.PAT, basemodel.BaseApiError) {
pats := []model.PAT{} pats := []model.PAT{}
if err := m.DB().Select(&pats, `SELECT * FROM personal_access_tokens WHERE user_id=? and revoked=false ORDER by updated_at DESC;`, userID); err != nil { if err := m.DB().Select(&pats, "SELECT * FROM personal_access_tokens WHERE revoked=false ORDER by updated_at DESC;"); err != nil {
zap.S().Errorf("Failed to fetch PATs for user: %s, err: %v", userID, zap.Error(err)) zap.S().Errorf("Failed to fetch PATs err: %v", zap.Error(err))
return nil, model.InternalError(fmt.Errorf("failed to fetch PATs")) return nil, model.InternalError(fmt.Errorf("failed to fetch PATs"))
} }
for i := range pats { for i := range pats {

View File

@ -24,7 +24,7 @@ const config: Config.InitialOptions = {
'^(?!.*\\.(js|jsx|mjs|cjs|ts|tsx|css|json)$)': 'jest-preview/transforms/file', '^(?!.*\\.(js|jsx|mjs|cjs|ts|tsx|css|json)$)': 'jest-preview/transforms/file',
}, },
transformIgnorePatterns: [ transformIgnorePatterns: [
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens)/)', 'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@signozhq/design-tokens|d3-interpolate|d3-color)/)',
], ],
setupFilesAfterEnv: ['<rootDir>jest.setup.ts'], setupFilesAfterEnv: ['<rootDir>jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'], testPathIgnorePatterns: ['/node_modules/', '/public/'],

View File

@ -40,6 +40,8 @@
"@monaco-editor/react": "^4.3.1", "@monaco-editor/react": "^4.3.1",
"@radix-ui/react-tabs": "1.0.4", "@radix-ui/react-tabs": "1.0.4",
"@radix-ui/react-tooltip": "1.0.7", "@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "7.102.1",
"@sentry/webpack-plugin": "2.14.2",
"@signozhq/design-tokens": "0.0.8", "@signozhq/design-tokens": "0.0.8",
"@uiw/react-md-editor": "3.23.5", "@uiw/react-md-editor": "3.23.5",
"@xstate/react": "^3.0.0", "@xstate/react": "^3.0.0",
@ -71,6 +73,7 @@
"fontfaceobserver": "2.3.0", "fontfaceobserver": "2.3.0",
"history": "4.10.1", "history": "4.10.1",
"html-webpack-plugin": "5.5.0", "html-webpack-plugin": "5.5.0",
"http-proxy-middleware": "2.0.6",
"i18next": "^21.6.12", "i18next": "^21.6.12",
"i18next-browser-languagedetector": "^6.1.3", "i18next-browser-languagedetector": "^6.1.3",
"i18next-http-backend": "^1.3.2", "i18next-http-backend": "^1.3.2",

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="80px" height="80px" viewBox="0 0 80 80" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 64 (93537) - https://sketch.com -->
<title>Icon-Architecture/64/Arch_Amazon-EC2_64</title>
<desc>Created with Sketch.</desc>
<defs>
<linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="linearGradient-1">
<stop stop-color="#C8511B" offset="0%"></stop>
<stop stop-color="#FF9900" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Icon-Architecture/64/Arch_Amazon-EC2_64" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Icon-Architecture-BG/64/Compute" fill="url(#linearGradient-1)">
<rect id="Rectangle" x="0" y="0" width="80" height="80"></rect>
</g>
<path d="M27,53 L52,53 L52,28 L27,28 L27,53 Z M54,28 L58,28 L58,30 L54,30 L54,34 L58,34 L58,36 L54,36 L54,39 L58,39 L58,41 L54,41 L54,45 L58,45 L58,47 L54,47 L54,51 L58,51 L58,53 L54,53 L54,53.136 C54,54.164 53.164,55 52.136,55 L52,55 L52,59 L50,59 L50,55 L46,55 L46,59 L44,59 L44,55 L41,55 L41,59 L39,59 L39,55 L35,55 L35,59 L33,59 L33,55 L29,55 L29,59 L27,59 L27,55 L26.864,55 C25.836,55 25,54.164 25,53.136 L25,53 L22,53 L22,51 L25,51 L25,47 L22,47 L22,45 L25,45 L25,41 L22,41 L22,39 L25,39 L25,36 L22,36 L22,34 L25,34 L25,30 L22,30 L22,28 L25,28 L25,27.864 C25,26.836 25.836,26 26.864,26 L27,26 L27,22 L29,22 L29,26 L33,26 L33,22 L35,22 L35,26 L39,26 L39,22 L41,22 L41,26 L44,26 L44,22 L46,22 L46,26 L50,26 L50,22 L52,22 L52,26 L52.136,26 C53.164,26 54,26.836 54,27.864 L54,28 Z M41,65.876 C41,65.944 40.944,66 40.876,66 L14.124,66 C14.056,66 14,65.944 14,65.876 L14,39.124 C14,39.056 14.056,39 14.124,39 L20,39 L20,37 L14.124,37 C12.953,37 12,37.953 12,39.124 L12,65.876 C12,67.047 12.953,68 14.124,68 L40.876,68 C42.047,68 43,67.047 43,65.876 L43,61 L41,61 L41,65.876 Z M68,14.124 L68,40.876 C68,42.047 67.047,43 65.876,43 L60,43 L60,41 L65.876,41 C65.944,41 66,40.944 66,40.876 L66,14.124 C66,14.056 65.944,14 65.876,14 L39.124,14 C39.056,14 39,14.056 39,14.124 L39,20 L37,20 L37,14.124 C37,12.953 37.953,12 39.124,12 L65.876,12 C67.047,12 68,12.953 68,14.124 L68,14.124 Z" id="Amazon-EC2_Icon_64_Squid" fill="#FFFFFF"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="80px" height="80px" viewBox="0 0 80 80" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 64 (93537) - https://sketch.com -->
<title>Icon-Architecture/64/Arch_Amazon-Elastic-Container-Service_64</title>
<desc>Created with Sketch.</desc>
<defs>
<linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="linearGradient-1">
<stop stop-color="#C8511B" offset="0%"></stop>
<stop stop-color="#FF9900" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Icon-Architecture/64/Arch_Amazon-Elastic-Container-Service_64" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Icon-Architecture-BG/64/Containers" fill="url(#linearGradient-1)">
<rect id="Rectangle" x="0" y="0" width="80" height="80"></rect>
</g>
<path d="M64,48.2340095 L56,43.4330117 L56,32.0000169 C56,31.6440171 55.812,31.3150172 55.504,31.1360173 L44,24.4260204 L44,14.7520248 L64,26.5710194 L64,48.2340095 Z M65.509,25.13902 L43.509,12.139026 C43.199,11.9560261 42.818,11.9540261 42.504,12.131026 C42.193,12.3090259 42,12.6410257 42,13.0000256 L42,25.0000201 C42,25.3550199 42.189,25.6840198 42.496,25.8640197 L54,32.5740166 L54,44.0000114 C54,44.3510113 54.185,44.6770111 54.486,44.857011 L64.486,50.8570083 C64.644,50.9520082 64.822,51 65,51 C65.17,51 65.34,50.9570082 65.493,50.8700083 C65.807,50.6930084 66,50.3600085 66,50 L66,26.0000196 C66,25.6460198 65.814,25.31902 65.509,25.13902 L65.509,25.13902 Z M40.445,66.863001 L17,54.3990067 L17,26.5710194 L37,14.7520248 L37,24.4510204 L26.463,31.1560173 C26.175,31.3400172 26,31.6580171 26,32.0000169 L26,49.0000091 C26,49.373009 26.208,49.7150088 26.538,49.8870087 L39.991,56.8870055 C40.28,57.0370055 40.624,57.0380055 40.912,56.8880055 L53.964,50.1440086 L61.996,54.9640064 L40.445,66.863001 Z M64.515,54.1420068 L54.515,48.1420095 C54.217,47.9640096 53.849,47.9520096 53.541,48.1120095 L40.455,54.8730065 L28,48.3930094 L28,32.5490167 L38.537,25.8440197 C38.825,25.6600198 39,25.3420199 39,25.0000201 L39,13.0000256 C39,12.6410257 38.808,12.3090259 38.496,12.131026 C38.184,11.9540261 37.802,11.9560261 37.491,12.139026 L15.491,25.13902 C15.187,25.31902 15,25.6460198 15,26.0000196 L15,55 C15,55.3690062 15.204,55.7090061 15.53,55.883006 L39.984,68.8830001 C40.131,68.961 40.292,69 40.453,69 C40.62,69 40.786,68.958 40.937,68.8750001 L64.484,55.875006 C64.797,55.7020061 64.993,55.3750062 65.0001416,55.0180064 C65.006,54.6600066 64.821,54.3260067 64.515,54.1420068 L64.515,54.1420068 Z" id="Amazon-Elastic-Container-Service_Icon_64_Squid" fill="#FFFFFF"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none"><path fill="url(#amazon-eks-color-16__paint0_linear_879_141)" fill-rule="evenodd" d="M6.381 10.148h.897V8.121l1.837 2.027h1.164L7.997 7.642l2.169-2.195H8.963L7.278 7.146V5.447h-.897v4.701z" clip-rule="evenodd"/><path fill="url(#amazon-eks-color-16__paint1_linear_879_141)" d="M8.532 3.803l3.186 1.81a.173.173 0 01.088.149v3.62c0 .06.033.118.088.149l2.842 1.615a.176.176 0 00.264-.15V3.947a.173.173 0 00-.088-.15L8.708.274a.176.176 0 00-.264.15v3.23c0 .062.034.119.088.15z"/><path fill="url(#amazon-eks-color-16__paint2_linear_879_141)" d="M11.273 10.288l-3.185 1.81a.178.178 0 01-.176 0l-3.63-2.062a.173.173 0 01-.088-.15V5.762c0-.062.034-.119.088-.15l3.186-1.81a.172.172 0 00.088-.15V.424a.176.176 0 00-.264-.15L1.088 3.798a.173.173 0 00-.088.15V11.7c0 .061.033.118.088.15l6.824 3.876c.054.03.122.03.176 0l6.204-3.524a.172.172 0 000-.3l-2.843-1.615a.178.178 0 00-.176 0z"/><defs><linearGradient id="amazon-eks-color-16__paint0_linear_879_141" x1="10.691" x2="8.521" y1="9.879" y2="4.634" gradientUnits="userSpaceOnUse"><stop stop-color="#426DDB"/><stop offset="1" stop-color="#3B4BDB"/></linearGradient><linearGradient id="amazon-eks-color-16__paint1_linear_879_141" x1="15.693" x2="9.546" y1="10.544" y2="-.213" gradientUnits="userSpaceOnUse"><stop stop-color="#426DDB"/><stop offset="1" stop-color="#3B4BDB"/></linearGradient><linearGradient id="amazon-eks-color-16__paint2_linear_879_141" x1="9.433" x2="2.732" y1="14.904" y2="2.88" gradientUnits="userSpaceOnUse"><stop stop-color="#2775FF"/><stop offset="1" stop-color="#188DFF"/></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -3,7 +3,7 @@
"alert_channels": "Alert Channels", "alert_channels": "Alert Channels",
"organization_settings": "Organization Settings", "organization_settings": "Organization Settings",
"ingestion_settings": "Ingestion Settings", "ingestion_settings": "Ingestion Settings",
"api_keys": "API Keys", "api_keys": "Access Tokens",
"my_settings": "My Settings", "my_settings": "My Settings",
"overview_metrics": "Overview Metrics", "overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls", "dbcall_metrics": "Database Calls",

View File

@ -26,7 +26,7 @@
"MY_SETTINGS": "SigNoz | My Settings", "MY_SETTINGS": "SigNoz | My Settings",
"ORG_SETTINGS": "SigNoz | Organization Settings", "ORG_SETTINGS": "SigNoz | Organization Settings",
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings", "INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
"API_KEYS": "SigNoz | API Keys", "API_KEYS": "SigNoz | Access Tokens",
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong", "SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
"UN_AUTHORIZED": "SigNoz | Unauthorized", "UN_AUTHORIZED": "SigNoz | Unauthorized",
"NOT_FOUND": "SigNoz | Page Not Found", "NOT_FOUND": "SigNoz | Page Not Found",

View File

@ -1,3 +1,3 @@
{ {
"delete_confirm_message": "Are you sure you want to delete {{keyName}} key? Deleting a key is irreversible and cannot be undone." "delete_confirm_message": "Are you sure you want to delete {{keyName}} token? Deleting a token is irreversible and cannot be undone."
} }

View File

@ -3,7 +3,7 @@
"alert_channels": "Alert Channels", "alert_channels": "Alert Channels",
"organization_settings": "Organization Settings", "organization_settings": "Organization Settings",
"ingestion_settings": "Ingestion Settings", "ingestion_settings": "Ingestion Settings",
"api_keys": "API Keys", "api_keys": "Access Tokens",
"my_settings": "My Settings", "my_settings": "My Settings",
"overview_metrics": "Overview Metrics", "overview_metrics": "Overview Metrics",
"dbcall_metrics": "Database Calls", "dbcall_metrics": "Database Calls",

View File

@ -26,7 +26,7 @@
"MY_SETTINGS": "SigNoz | My Settings", "MY_SETTINGS": "SigNoz | My Settings",
"ORG_SETTINGS": "SigNoz | Organization Settings", "ORG_SETTINGS": "SigNoz | Organization Settings",
"INGESTION_SETTINGS": "SigNoz | Ingestion Settings", "INGESTION_SETTINGS": "SigNoz | Ingestion Settings",
"API_KEYS": "SigNoz | API Keys", "API_KEYS": "SigNoz | Access Tokens",
"SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong", "SOMETHING_WENT_WRONG": "SigNoz | Something Went Wrong",
"UN_AUTHORIZED": "SigNoz | Unauthorized", "UN_AUTHORIZED": "SigNoz | Unauthorized",
"NOT_FOUND": "SigNoz | Page Not Found", "NOT_FOUND": "SigNoz | Page Not Found",

View File

@ -9,7 +9,7 @@ done
# create temporary tsconfig which includes only passed files # create temporary tsconfig which includes only passed files
str="{ str="{
\"extends\": \"./tsconfig.json\", \"extends\": \"./tsconfig.json\",
\"include\": [\"src/types/global.d.ts\",\"src/typings/window.ts\", \"src/typings/chartjs-adapter-date-fns.d.ts\", \"src/typings/environment.ts\" ,$files] \"include\": [\"src/types/global.d.ts\",\"src/typings/window.ts\", \"src/typings/chartjs-adapter-date-fns.d.ts\", \"src/typings/environment.ts\" ,\"src/container/OnboardingContainer/typings.d.ts\",$files]
}" }"
echo $str > tsconfig.tmp echo $str > tsconfig.tmp

View File

@ -20,11 +20,16 @@ import { UPDATE_USER_IS_FETCH } from 'types/actions/app';
import AppReducer from 'types/reducer/app'; import AppReducer from 'types/reducer/app';
import { routePermission } from 'utils/permission'; import { routePermission } from 'utils/permission';
import routes, { LIST_LICENSES } from './routes'; import routes, {
LIST_LICENSES,
oldNewRoutesMapping,
oldRoutes,
} from './routes';
import afterLogin from './utils'; import afterLogin from './utils';
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element { function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const { pathname } = useLocation(); const location = useLocation();
const { pathname } = location;
const mapRoutes = useMemo( const mapRoutes = useMemo(
() => () =>
@ -59,6 +64,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const currentRoute = mapRoutes.get('current'); const currentRoute = mapRoutes.get('current');
const isOldRoute = oldRoutes.indexOf(pathname) > -1;
const isLocalStorageLoggedIn = const isLocalStorageLoggedIn =
getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true'; getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true';
@ -158,6 +165,16 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
useEffect(() => { useEffect(() => {
(async (): Promise<void> => { (async (): Promise<void> => {
try { try {
if (isOldRoute) {
const redirectUrl = oldNewRoutesMapping[pathname];
const newLocation = {
...location,
pathname: redirectUrl,
};
history.replace(newLocation);
}
if (currentRoute) { if (currentRoute) {
const { isPrivate, key } = currentRoute; const { isPrivate, key } = currentRoute;

View File

@ -279,6 +279,13 @@ const routes: AppRoutes[] = [
key: 'LIVE_LOGS', key: 'LIVE_LOGS',
isPrivate: true, isPrivate: true,
}, },
{
path: ROUTES.LOGS_PIPELINES,
exact: true,
component: PipelinePage,
key: 'LOGS_PIPELINES',
isPrivate: true,
},
{ {
path: ROUTES.LOGIN, path: ROUTES.LOGIN,
exact: true, exact: true,
@ -307,13 +314,6 @@ const routes: AppRoutes[] = [
key: 'SOMETHING_WENT_WRONG', key: 'SOMETHING_WENT_WRONG',
isPrivate: false, isPrivate: false,
}, },
{
path: ROUTES.LOGS_PIPELINES,
exact: true,
component: PipelinePage,
key: 'LOGS_PIPELINES',
isPrivate: true,
},
{ {
path: ROUTES.BILLING, path: ROUTES.BILLING,
exact: true, exact: true,
@ -353,6 +353,22 @@ export const LIST_LICENSES: AppRoutes = {
key: 'LIST_LICENSES', key: 'LIST_LICENSES',
}; };
export const oldRoutes = [
'/pipelines',
'/logs/old-logs-explorer',
'/logs-explorer',
'/logs-explorer/live',
'/settings/api-keys',
];
export const oldNewRoutesMapping: Record<string, string> = {
'/pipelines': '/logs/pipelines',
'/logs/old-logs-explorer': '/logs/old-logs-explorer',
'/logs-explorer': '/logs/logs-explorer',
'/logs-explorer/live': '/logs/logs-explorer/live',
'/settings/api-keys': '/settings/access-tokens',
};
export interface AppRoutes { export interface AppRoutes {
component: RouteProps['component']; component: RouteProps['component'];
path: RouteProps['path']; path: RouteProps['path'];

View File

@ -0,0 +1,41 @@
import { CSSProperties } from 'react';
function BarIcon({
fillColor,
}: {
fillColor: CSSProperties['color'];
}): JSX.Element {
return (
<svg
width="30"
height="30"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M8 2H6C4.89543 2 4 2.89543 4 4V16C4 17.1046 4.89543 18 6 18H8C9.10457 18 10 17.1046 10 16V4C10 2.89543 9.10457 2 8 2Z"
stroke={fillColor}
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
/>
<path
d="M18 9H16C14.8954 9 14 9.89543 14 11V16C14 17.1046 14.8954 18 16 18H18C19.1046 18 20 17.1046 20 16V11C20 9.89543 19.1046 9 18 9Z"
stroke={fillColor}
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
/>
<path
d="M22 22H2"
stroke={fillColor}
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
);
}
export default BarIcon;

View File

@ -5,6 +5,7 @@ import cx from 'classnames';
import ROUTES from 'constants/routes'; import ROUTES from 'constants/routes';
import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal'; import { DateTimeRangeType } from 'container/TopNav/CustomDateTimeModal';
import { import {
LexicalContext,
Option, Option,
RelativeDurationSuggestionOptions, RelativeDurationSuggestionOptions,
} from 'container/TopNav/DateTimeSelectionV2/config'; } from 'container/TopNav/DateTimeSelectionV2/config';
@ -20,7 +21,10 @@ interface CustomTimePickerPopoverContentProps {
setIsOpen: Dispatch<SetStateAction<boolean>>; setIsOpen: Dispatch<SetStateAction<boolean>>;
customDateTimeVisible: boolean; customDateTimeVisible: boolean;
setCustomDTPickerVisible: Dispatch<SetStateAction<boolean>>; setCustomDTPickerVisible: Dispatch<SetStateAction<boolean>>;
onCustomDateHandler: (dateTimeRange: DateTimeRangeType) => void; onCustomDateHandler: (
dateTimeRange: DateTimeRangeType,
lexicalContext?: LexicalContext,
) => void;
onSelectHandler: (label: string, value: string) => void; onSelectHandler: (label: string, value: string) => void;
handleGoLive: () => void; handleGoLive: () => void;
selectedTime: string; selectedTime: string;
@ -63,7 +67,7 @@ function CustomTimePickerPopoverContent({
if (date_time?.[1]) { if (date_time?.[1]) {
onPopoverClose(false); onPopoverClose(false);
} }
onCustomDateHandler(date_time); onCustomDateHandler(date_time, LexicalContext.CUSTOM_DATE_PICKER);
}; };
function getTimeChips(options: Option[]): JSX.Element { function getTimeChips(options: Option[]): JSX.Element {
return ( return (

View File

@ -14,6 +14,7 @@ export const PANEL_TYPES_COMPONENT_MAP = {
[PANEL_TYPES.TRACE]: null, [PANEL_TYPES.TRACE]: null,
[PANEL_TYPES.LIST]: LogsPanelComponent, [PANEL_TYPES.LIST]: LogsPanelComponent,
[PANEL_TYPES.EMPTY_WIDGET]: null, [PANEL_TYPES.EMPTY_WIDGET]: null,
[PANEL_TYPES.BAR]: Uplot,
} as const; } as const;
export const getComponentForPanelType = ( export const getComponentForPanelType = (
@ -27,6 +28,7 @@ export const getComponentForPanelType = (
[PANEL_TYPES.TRACE]: null, [PANEL_TYPES.TRACE]: null,
[PANEL_TYPES.LIST]: [PANEL_TYPES.LIST]:
dataSource === DataSource.LOGS ? LogsPanelComponent : TracesTableComponent, dataSource === DataSource.LOGS ? LogsPanelComponent : TracesTableComponent,
[PANEL_TYPES.BAR]: Uplot,
[PANEL_TYPES.EMPTY_WIDGET]: null, [PANEL_TYPES.EMPTY_WIDGET]: null,
}; };

View File

@ -264,6 +264,7 @@ export enum PANEL_TYPES {
TABLE = 'table', TABLE = 'table',
LIST = 'list', LIST = 'list',
TRACE = 'trace', TRACE = 'trace',
BAR = 'bar',
EMPTY_WIDGET = 'EMPTY_WIDGET', EMPTY_WIDGET = 'EMPTY_WIDGET',
} }

View File

@ -24,7 +24,7 @@ const ROUTES = {
MY_SETTINGS: '/my-settings', MY_SETTINGS: '/my-settings',
SETTINGS: '/settings', SETTINGS: '/settings',
ORG_SETTINGS: '/settings/org-settings', ORG_SETTINGS: '/settings/org-settings',
API_KEYS: '/settings/api-keys', API_KEYS: '/settings/access-tokens',
INGESTION_SETTINGS: '/settings/ingestion-settings', INGESTION_SETTINGS: '/settings/ingestion-settings',
SOMETHING_WENT_WRONG: '/something-went-wrong', SOMETHING_WENT_WRONG: '/something-went-wrong',
UN_AUTHORIZED: '/un-authorized', UN_AUTHORIZED: '/un-authorized',

View File

@ -26,13 +26,13 @@ describe('APIKeys component', () => {
}); });
it('renders APIKeys component without crashing', () => { it('renders APIKeys component without crashing', () => {
expect(screen.getByText('API Keys')).toBeInTheDocument(); expect(screen.getByText('Access Tokens')).toBeInTheDocument();
expect( expect(
screen.getByText('Create and manage access keys for the SigNoz API'), screen.getByText('Create and manage access tokens for the SigNoz API'),
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
it('render list of API Keys', async () => { it('render list of Access Tokens', async () => {
server.use( server.use(
rest.get(apiKeysURL, (req, res, ctx) => rest.get(apiKeysURL, (req, res, ctx) =>
res(ctx.status(200), ctx.json(getAPIKeysResponse)), res(ctx.status(200), ctx.json(getAPIKeysResponse)),
@ -41,15 +41,15 @@ describe('APIKeys component', () => {
await waitFor(() => { await waitFor(() => {
expect(screen.getByText('No Expiry Token')).toBeInTheDocument(); expect(screen.getByText('No Expiry Token')).toBeInTheDocument();
expect(screen.getByText('1-5 of 18 API Keys')).toBeInTheDocument(); expect(screen.getByText('1-5 of 18 tokens')).toBeInTheDocument();
}); });
}); });
it('opens add new key modal on button click', async () => { it('opens add new key modal on button click', async () => {
fireEvent.click(screen.getByText('New Key')); fireEvent.click(screen.getByText('New Token'));
await waitFor(() => { await waitFor(() => {
const createNewKeyBtn = screen.getByRole('button', { const createNewKeyBtn = screen.getByRole('button', {
name: /Create new key/i, name: /Create new token/i,
}); });
expect(createNewKeyBtn).toBeInTheDocument(); expect(createNewKeyBtn).toBeInTheDocument();
@ -57,10 +57,10 @@ describe('APIKeys component', () => {
}); });
it('closes add new key modal on cancel button click', async () => { it('closes add new key modal on cancel button click', async () => {
fireEvent.click(screen.getByText('New Key')); fireEvent.click(screen.getByText('New Token'));
const createNewKeyBtn = screen.getByRole('button', { const createNewKeyBtn = screen.getByRole('button', {
name: /Create new key/i, name: /Create new token/i,
}); });
await waitFor(() => { await waitFor(() => {
@ -79,10 +79,10 @@ describe('APIKeys component', () => {
), ),
); );
fireEvent.click(screen.getByText('New Key')); fireEvent.click(screen.getByText('New Token'));
const createNewKeyBtn = screen.getByRole('button', { const createNewKeyBtn = screen.getByRole('button', {
name: /Create new key/i, name: /Create new token/i,
}); });
await waitFor(() => { await waitFor(() => {
@ -90,7 +90,7 @@ describe('APIKeys component', () => {
}); });
act(() => { act(() => {
const inputElement = screen.getByPlaceholderText('Enter Key Name'); const inputElement = screen.getByPlaceholderText('Enter Token Name');
fireEvent.change(inputElement, { target: { value: 'Top Secret' } }); fireEvent.change(inputElement, { target: { value: 'Top Secret' } });
fireEvent.click(screen.getByTestId('create-form-admin-role-btn')); fireEvent.click(screen.getByTestId('create-form-admin-role-btn'));
fireEvent.click(createNewKeyBtn); fireEvent.click(createNewKeyBtn);

View File

@ -514,15 +514,15 @@ function APIKeys(): JSX.Element {
<div className="api-key-container"> <div className="api-key-container">
<div className="api-key-content"> <div className="api-key-content">
<header> <header>
<Typography.Title className="title">API Keys</Typography.Title> <Typography.Title className="title">Access Tokens </Typography.Title>
<Typography.Text className="subtitle"> <Typography.Text className="subtitle">
Create and manage access keys for the SigNoz API Create and manage access tokens for the SigNoz API
</Typography.Text> </Typography.Text>
</header> </header>
<div className="api-keys-search-add-new"> <div className="api-keys-search-add-new">
<Input <Input
placeholder="Search for keys..." placeholder="Search for token..."
prefix={<Search size={12} color={Color.BG_VANILLA_400} />} prefix={<Search size={12} color={Color.BG_VANILLA_400} />}
value={searchValue} value={searchValue}
onChange={handleSearch} onChange={handleSearch}
@ -533,7 +533,7 @@ function APIKeys(): JSX.Element {
type="primary" type="primary"
onClick={showAddModal} onClick={showAddModal}
> >
<Plus size={14} /> New Key <Plus size={14} /> New Token
</Button> </Button>
</div> </div>
@ -546,7 +546,7 @@ function APIKeys(): JSX.Element {
pageSize: 5, pageSize: 5,
hideOnSinglePage: true, hideOnSinglePage: true,
showTotal: (total: number, range: number[]): string => showTotal: (total: number, range: number[]): string =>
`${range[0]}-${range[1]} of ${total} API Keys`, `${range[0]}-${range[1]} of ${total} tokens`,
}} }}
/> />
</div> </div>
@ -554,7 +554,7 @@ function APIKeys(): JSX.Element {
{/* Delete Key Modal */} {/* Delete Key Modal */}
<Modal <Modal
className="delete-api-key-modal" className="delete-api-key-modal"
title={<span className="title">Delete key</span>} title={<span className="title">Delete Token</span>}
open={isDeleteModalOpen} open={isDeleteModalOpen}
closable closable
afterClose={handleModalClose} afterClose={handleModalClose}
@ -576,7 +576,7 @@ function APIKeys(): JSX.Element {
onClick={onDeleteHandler} onClick={onDeleteHandler}
className="delete-btn" className="delete-btn"
> >
Delete key Delete Token
</Button>, </Button>,
]} ]}
> >
@ -590,7 +590,7 @@ function APIKeys(): JSX.Element {
{/* Edit Key Modal */} {/* Edit Key Modal */}
<Modal <Modal
className="api-key-modal" className="api-key-modal"
title="Edit key" title="Edit token"
open={isEditModalOpen} open={isEditModalOpen}
key="edit-api-key-modal" key="edit-api-key-modal"
afterClose={handleModalClose} afterClose={handleModalClose}
@ -614,7 +614,7 @@ function APIKeys(): JSX.Element {
icon={<Check size={14} />} icon={<Check size={14} />}
onClick={onUpdateApiKey} onClick={onUpdateApiKey}
> >
Update key Update Token
</Button>, </Button>,
]} ]}
> >
@ -634,7 +634,7 @@ function APIKeys(): JSX.Element {
label="Name" label="Name"
rules={[{ required: true }, { type: 'string', min: 6 }]} rules={[{ required: true }, { type: 'string', min: 6 }]}
> >
<Input placeholder="Enter Key Name" autoFocus /> <Input placeholder="Enter Token Name" autoFocus />
</Form.Item> </Form.Item>
<Form.Item name="role" label="Role"> <Form.Item name="role" label="Role">
@ -668,7 +668,7 @@ function APIKeys(): JSX.Element {
{/* Create New Key Modal */} {/* Create New Key Modal */}
<Modal <Modal
className="api-key-modal" className="api-key-modal"
title="Create new key" title="Create new token"
open={isAddModalOpen} open={isAddModalOpen}
key="create-api-key-modal" key="create-api-key-modal"
closable closable
@ -685,7 +685,7 @@ function APIKeys(): JSX.Element {
onClick={handleCopyClose} onClick={handleCopyClose}
icon={<Check size={12} />} icon={<Check size={12} />}
> >
Copy key and close Copy token and close
</Button>, </Button>,
] ]
: [ : [
@ -706,7 +706,7 @@ function APIKeys(): JSX.Element {
loading={isLoadingCreateAPIKey} loading={isLoadingCreateAPIKey}
onClick={onCreateAPIKey} onClick={onCreateAPIKey}
> >
Create new key Create new token
</Button>, </Button>,
] ]
} }
@ -730,7 +730,7 @@ function APIKeys(): JSX.Element {
rules={[{ required: true }, { type: 'string', min: 6 }]} rules={[{ required: true }, { type: 'string', min: 6 }]}
validateTrigger="onFinish" validateTrigger="onFinish"
> >
<Input placeholder="Enter Key Name" autoFocus /> <Input placeholder="Enter Token Name" autoFocus />
</Form.Item> </Form.Item>
<Form.Item name="role" label="Role"> <Form.Item name="role" label="Role">
@ -771,7 +771,7 @@ function APIKeys(): JSX.Element {
{showNewAPIKeyDetails && ( {showNewAPIKeyDetails && (
<div className="api-key-info-container"> <div className="api-key-info-container">
<Row> <Row>
<Col span={8}>Key</Col> <Col span={8}>Token</Col>
<Col span={16}> <Col span={16}>
<span className="copyable-text"> <span className="copyable-text">
<Typography.Text> <Typography.Text>

View File

@ -13,6 +13,7 @@ import useAnalytics from 'hooks/analytics/useAnalytics';
import useAxiosError from 'hooks/useAxiosError'; import useAxiosError from 'hooks/useAxiosError';
import useLicense from 'hooks/useLicense'; import useLicense from 'hooks/useLicense';
import { useNotifications } from 'hooks/useNotifications'; import { useNotifications } from 'hooks/useNotifications';
import { pick } from 'lodash-es';
import { useCallback, useEffect, useState } from 'react'; import { useCallback, useEffect, useState } from 'react';
import { useMutation, useQuery } from 'react-query'; import { useMutation, useQuery } from 'react-query';
import { useSelector } from 'react-redux'; import { useSelector } from 'react-redux';
@ -305,7 +306,7 @@ export default function BillingContainer(): JSX.Element {
const handleBilling = useCallback(async () => { const handleBilling = useCallback(async () => {
if (isFreeTrial && !licensesData?.payload?.trialConvertedToSubscription) { if (isFreeTrial && !licensesData?.payload?.trialConvertedToSubscription) {
trackEvent('Billing : Upgrade Plan', { trackEvent('Billing : Upgrade Plan', {
user, user: pick(user, ['email', 'userId', 'name']),
org, org,
}); });
@ -316,7 +317,7 @@ export default function BillingContainer(): JSX.Element {
}); });
} else { } else {
trackEvent('Billing : Manage Billing', { trackEvent('Billing : Manage Billing', {
user, user: pick(user, ['email', 'userId', 'name']),
org, org,
}); });

View File

@ -19,6 +19,8 @@ import { AlertDef } from 'types/api/alerts/def';
import { Query } from 'types/api/queryBuilder/queryBuilderData'; import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { EQueryType } from 'types/common/dashboard'; import { EQueryType } from 'types/common/dashboard';
import { GlobalReducer } from 'types/reducer/globalTime'; import { GlobalReducer } from 'types/reducer/globalTime';
import { getGraphType } from 'utils/getGraphType';
import { getSortedSeriesData } from 'utils/getSortedSeriesData';
import { getTimeRange } from 'utils/getTimeRange'; import { getTimeRange } from 'utils/getTimeRange';
import { ChartContainer, FailedMessageContainer } from './styles'; import { ChartContainer, FailedMessageContainer } from './styles';
@ -86,7 +88,7 @@ function ChartPreview({
{ {
query: query || initialQueriesMap.metrics, query: query || initialQueriesMap.metrics,
globalSelectedInterval: selectedInterval, globalSelectedInterval: selectedInterval,
graphType, graphType: getGraphType(graphType),
selectedTime, selectedTime,
params: { params: {
allowSelectedIntervalForStepGen, allowSelectedIntervalForStepGen,
@ -114,6 +116,13 @@ function ChartPreview({
setMaxTimeScale(endTime); setMaxTimeScale(endTime);
}, [maxTime, minTime, globalSelectedInterval, queryResponse]); }, [maxTime, minTime, globalSelectedInterval, queryResponse]);
if (queryResponse.data && graphType === PANEL_TYPES.BAR) {
const sortedSeriesData = getSortedSeriesData(
queryResponse.data?.payload.data.result,
);
queryResponse.data.payload.data.result = sortedSeriesData;
}
const chartData = getUPlotChartData(queryResponse?.data?.payload); const chartData = getUPlotChartData(queryResponse?.data?.payload);
const containerDimensions = useResizeObserver(graphRef); const containerDimensions = useResizeObserver(graphRef);
@ -153,6 +162,7 @@ function ChartPreview({
], ],
softMax: null, softMax: null,
softMin: null, softMin: null,
panelType: graphType,
}), }),
[ [
yAxisUnit, yAxisUnit,
@ -165,6 +175,7 @@ function ChartPreview({
t, t,
optionName, optionName,
alertDef?.condition.targetUnit, alertDef?.condition.targetUnit,
graphType,
], ],
); );

View File

@ -22,6 +22,7 @@ function QuerySection({
setQueryCategory, setQueryCategory,
alertType, alertType,
runQuery, runQuery,
panelType,
}: QuerySectionProps): JSX.Element { }: QuerySectionProps): JSX.Element {
// init namespace for translations // init namespace for translations
const { t } = useTranslation('alerts'); const { t } = useTranslation('alerts');
@ -44,7 +45,7 @@ function QuerySection({
const renderMetricUI = (): JSX.Element => ( const renderMetricUI = (): JSX.Element => (
<QueryBuilder <QueryBuilder
panelType={PANEL_TYPES.TIME_SERIES} panelType={panelType}
config={{ config={{
queryVariant: 'static', queryVariant: 'static',
initialDataSource: ALERTS_DATA_SOURCE_MAP[alertType], initialDataSource: ALERTS_DATA_SOURCE_MAP[alertType],
@ -196,6 +197,7 @@ interface QuerySectionProps {
setQueryCategory: (n: EQueryType) => void; setQueryCategory: (n: EQueryType) => void;
alertType: AlertTypes; alertType: AlertTypes;
runQuery: VoidFunction; runQuery: VoidFunction;
panelType: PANEL_TYPES;
} }
export default QuerySection; export default QuerySection;

View File

@ -10,6 +10,7 @@ import {
import saveAlertApi from 'api/alerts/save'; import saveAlertApi from 'api/alerts/save';
import testAlertApi from 'api/alerts/testAlert'; import testAlertApi from 'api/alerts/testAlert';
import { FeatureKeys } from 'constants/features'; import { FeatureKeys } from 'constants/features';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder'; import { PANEL_TYPES } from 'constants/queryBuilder';
import ROUTES from 'constants/routes'; import ROUTES from 'constants/routes';
import QueryTypeTag from 'container/NewWidget/LeftContainer/QueryTypeTag'; import QueryTypeTag from 'container/NewWidget/LeftContainer/QueryTypeTag';
@ -20,6 +21,7 @@ import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
import { updateStepInterval } from 'hooks/queryBuilder/useStepInterval'; import { updateStepInterval } from 'hooks/queryBuilder/useStepInterval';
import { MESSAGE, useIsFeatureDisabled } from 'hooks/useFeatureFlag'; import { MESSAGE, useIsFeatureDisabled } from 'hooks/useFeatureFlag';
import { useNotifications } from 'hooks/useNotifications'; import { useNotifications } from 'hooks/useNotifications';
import useUrlQuery from 'hooks/useUrlQuery';
import history from 'lib/history'; import history from 'lib/history';
import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi'; import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi';
import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi'; import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi';
@ -68,14 +70,23 @@ function FormAlertRules({
GlobalReducer GlobalReducer
>((state) => state.globalTime); >((state) => state.globalTime);
const urlQuery = useUrlQuery();
const panelType = urlQuery.get(QueryParams.panelTypes) as PANEL_TYPES | null;
const { const {
currentQuery, currentQuery,
panelType,
stagedQuery, stagedQuery,
handleRunQuery, handleRunQuery,
handleSetConfig,
initialDataSource,
redirectWithQueryBuilderData, redirectWithQueryBuilderData,
} = useQueryBuilder(); } = useQueryBuilder();
useEffect(() => {
handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, initialDataSource);
}, [handleSetConfig, initialDataSource, panelType]);
// use query client // use query client
const ruleCache = useQueryClient(); const ruleCache = useQueryClient();
@ -277,7 +288,7 @@ function FormAlertRules({
promQueries: mapQueryDataToApi(currentQuery.promql, 'name').data, promQueries: mapQueryDataToApi(currentQuery.promql, 'name').data,
chQueries: mapQueryDataToApi(currentQuery.clickhouse_sql, 'name').data, chQueries: mapQueryDataToApi(currentQuery.clickhouse_sql, 'name').data,
queryType: currentQuery.queryType, queryType: currentQuery.queryType,
panelType: initQuery.panelType, panelType: panelType || initQuery.panelType,
unit: currentQuery.unit, unit: currentQuery.unit,
}, },
}, },
@ -290,6 +301,7 @@ function FormAlertRules({
alertDef, alertDef,
alertType, alertType,
initQuery, initQuery,
panelType,
]); ]);
const isAlertAvialable = useIsFeatureDisabled( const isAlertAvialable = useIsFeatureDisabled(
@ -423,6 +435,7 @@ function FormAlertRules({
selectedInterval={globalSelectedInterval} selectedInterval={globalSelectedInterval}
alertDef={alertDef} alertDef={alertDef}
yAxisUnit={yAxisUnit || ''} yAxisUnit={yAxisUnit || ''}
graphType={panelType || PANEL_TYPES.TIME_SERIES}
/> />
); );
@ -439,6 +452,7 @@ function FormAlertRules({
alertDef={alertDef} alertDef={alertDef}
selectedInterval={globalSelectedInterval} selectedInterval={globalSelectedInterval}
yAxisUnit={yAxisUnit || ''} yAxisUnit={yAxisUnit || ''}
graphType={panelType || PANEL_TYPES.TIME_SERIES}
/> />
); );
@ -495,6 +509,7 @@ function FormAlertRules({
setQueryCategory={onQueryCategoryChange} setQueryCategory={onQueryCategoryChange}
alertType={alertType || AlertTypes.METRICS_BASED_ALERT} alertType={alertType || AlertTypes.METRICS_BASED_ALERT}
runQuery={handleRunQuery} runQuery={handleRunQuery}
panelType={panelType || PANEL_TYPES.TIME_SERIES}
/> />
<RuleOptions <RuleOptions

View File

@ -25,5 +25,6 @@ export const PANEL_TYPES_VS_FULL_VIEW_TABLE: PanelTypeAndGraphManagerVisibilityP
TABLE: false, TABLE: false,
LIST: false, LIST: false,
TRACE: false, TRACE: false,
BAR: true,
EMPTY_WIDGET: false, EMPTY_WIDGET: false,
}; };

View File

@ -25,12 +25,12 @@ import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers'; import { AppState } from 'store/reducers';
import { GlobalReducer } from 'types/reducer/globalTime'; import { GlobalReducer } from 'types/reducer/globalTime';
import uPlot from 'uplot'; import uPlot from 'uplot';
import { getSortedSeriesData } from 'utils/getSortedSeriesData';
import { getTimeRange } from 'utils/getTimeRange'; import { getTimeRange } from 'utils/getTimeRange';
import { getGraphVisibilityStateOnDataChange } from '../utils'; import { getGraphVisibilityStateOnDataChange } from '../utils';
import { PANEL_TYPES_VS_FULL_VIEW_TABLE } from './contants'; import { PANEL_TYPES_VS_FULL_VIEW_TABLE } from './contants';
import GraphManager from './GraphManager'; import GraphManager from './GraphManager';
// import GraphManager from './GraphManager';
import { GraphContainer, TimeContainer } from './styles'; import { GraphContainer, TimeContainer } from './styles';
import { FullViewProps } from './types'; import { FullViewProps } from './types';
@ -107,6 +107,13 @@ function FullView({
panelTypeAndGraphManagerVisibility: PANEL_TYPES_VS_FULL_VIEW_TABLE, panelTypeAndGraphManagerVisibility: PANEL_TYPES_VS_FULL_VIEW_TABLE,
}); });
if (response.data && widget.panelTypes === PANEL_TYPES.BAR) {
const sortedSeriesData = getSortedSeriesData(
response.data?.payload.data.result,
);
response.data.payload.data.result = sortedSeriesData;
}
const chartData = getUPlotChartData(response?.data?.payload, widget.fillSpans); const chartData = getUPlotChartData(response?.data?.payload, widget.fillSpans);
const isDarkMode = useIsDarkMode(); const isDarkMode = useIsDarkMode();
@ -152,6 +159,7 @@ function FullView({
maxTimeScale, maxTimeScale,
softMax: widget.softMax === undefined ? null : widget.softMax, softMax: widget.softMax === undefined ? null : widget.softMax,
softMin: widget.softMin === undefined ? null : widget.softMin, softMin: widget.softMin === undefined ? null : widget.softMin,
panelType: widget.panelTypes,
}); });
setChartOptions(newChartOptions); setChartOptions(newChartOptions);

View File

@ -21,6 +21,8 @@ import { useLocation } from 'react-router-dom';
import { UpdateTimeInterval } from 'store/actions'; import { UpdateTimeInterval } from 'store/actions';
import { AppState } from 'store/reducers'; import { AppState } from 'store/reducers';
import { GlobalReducer } from 'types/reducer/globalTime'; import { GlobalReducer } from 'types/reducer/globalTime';
import { getGraphType } from 'utils/getGraphType';
import { getSortedSeriesData } from 'utils/getSortedSeriesData';
import { getTimeRange } from 'utils/getTimeRange'; import { getTimeRange } from 'utils/getTimeRange';
import EmptyWidget from '../EmptyWidget'; import EmptyWidget from '../EmptyWidget';
@ -125,7 +127,7 @@ function GridCardGraph({
const queryResponse = useGetQueryRange( const queryResponse = useGetQueryRange(
{ {
selectedTime: widget?.timePreferance, selectedTime: widget?.timePreferance,
graphType: widget?.panelTypes, graphType: getGraphType(widget.panelTypes),
query: updatedQuery, query: updatedQuery,
globalSelectedInterval, globalSelectedInterval,
variables: getDashboardVariables(variables), variables: getDashboardVariables(variables),
@ -160,6 +162,13 @@ function GridCardGraph({
setMaxTimeScale(endTime); setMaxTimeScale(endTime);
}, [maxTime, minTime, globalSelectedInterval, queryResponse]); }, [maxTime, minTime, globalSelectedInterval, queryResponse]);
if (queryResponse.data && widget.panelTypes === PANEL_TYPES.BAR) {
const sortedSeriesData = getSortedSeriesData(
queryResponse.data?.payload.data.result,
);
queryResponse.data.payload.data.result = sortedSeriesData;
}
const chartData = getUPlotChartData(queryResponse?.data?.payload, fillSpans); const chartData = getUPlotChartData(queryResponse?.data?.payload, fillSpans);
const isDarkMode = useIsDarkMode(); const isDarkMode = useIsDarkMode();
@ -191,6 +200,7 @@ function GridCardGraph({
softMin: widget.softMin === undefined ? null : widget.softMin, softMin: widget.softMin === undefined ? null : widget.softMin,
graphsVisibilityStates: graphVisibility, graphsVisibilityStates: graphVisibility,
setGraphsVisibilityStates: setGraphVisibility, setGraphsVisibilityStates: setGraphVisibility,
panelType: widget.panelTypes,
}), }),
[ [
widget?.id, widget?.id,
@ -207,6 +217,7 @@ function GridCardGraph({
maxTimeScale, maxTimeScale,
graphVisibility, graphVisibility,
setGraphVisibility, setGraphVisibility,
widget.panelTypes,
], ],
); );

View File

@ -58,6 +58,11 @@ const GridPanelSwitch = forwardRef<
selectedTime, selectedTime,
}, },
[PANEL_TYPES.TRACE]: null, [PANEL_TYPES.TRACE]: null,
[PANEL_TYPES.BAR]: {
data,
options,
ref,
},
[PANEL_TYPES.EMPTY_WIDGET]: null, [PANEL_TYPES.EMPTY_WIDGET]: null,
}; };

View File

@ -41,5 +41,8 @@ export type PropsTypePropsMap = {
[PANEL_TYPES.TABLE]: GridTableComponentProps; [PANEL_TYPES.TABLE]: GridTableComponentProps;
[PANEL_TYPES.TRACE]: null; [PANEL_TYPES.TRACE]: null;
[PANEL_TYPES.LIST]: LogsPanelComponentProps | TracesTableComponentProps; [PANEL_TYPES.LIST]: LogsPanelComponentProps | TracesTableComponentProps;
[PANEL_TYPES.BAR]: UplotProps & {
ref: ForwardedRef<ToggleGraphProps | undefined>;
};
[PANEL_TYPES.EMPTY_WIDGET]: null; [PANEL_TYPES.EMPTY_WIDGET]: null;
}; };

View File

@ -120,7 +120,9 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
history.push( history.push(
`${ROUTES.EDIT_ALERTS}?ruleId=${record.id.toString()}&${ `${ROUTES.EDIT_ALERTS}?ruleId=${record.id.toString()}&${
QueryParams.compositeQuery QueryParams.compositeQuery
}=${encodeURIComponent(JSON.stringify(compositeQuery))}`, }=${encodeURIComponent(JSON.stringify(compositeQuery))}&panelTypes=${
record.condition.compositeQuery.panelType
}`,
); );
}) })
.catch(handleError); .catch(handleError);

View File

@ -105,7 +105,7 @@ function TableView({
const onTraceHandler = ( const onTraceHandler = (
record: DataType, record: DataType,
event: React.MouseEvent<HTMLDivElement, MouseEvent>, event: React.MouseEvent<HTMLDivElement, MouseEvent>,
) => (): void => { ): void => {
if (flattenLogData === null) return; if (flattenLogData === null) return;
const traceId = flattenLogData[record.field]; const traceId = flattenLogData[record.field];

View File

@ -161,7 +161,7 @@ function LogsExplorerList({
!isFetching && !isFetching &&
!isError && !isError &&
!isFilterApplied && !isFilterApplied &&
logs.length === 0 && <NoLogs />} logs.length === 0 && <NoLogs dataSource={DataSource.LOGS} />}
{!isLoading && {!isLoading &&
!isFetching && !isFetching &&

View File

@ -611,6 +611,7 @@ function LogsExplorerViews({
data={data} data={data}
isError={isError} isError={isError}
isFilterApplied={!isEmpty(listQuery?.filters.items)} isFilterApplied={!isEmpty(listQuery?.filters.items)}
dataSource={DataSource.LOGS}
/> />
)} )}

View File

@ -9,6 +9,7 @@ export const PANEL_TYPES_INITIAL_QUERY = {
[PANEL_TYPES.TABLE]: initialQueriesMap.metrics, [PANEL_TYPES.TABLE]: initialQueriesMap.metrics,
[PANEL_TYPES.LIST]: initialQueriesMap.logs, [PANEL_TYPES.LIST]: initialQueriesMap.logs,
[PANEL_TYPES.TRACE]: initialQueriesMap.traces, [PANEL_TYPES.TRACE]: initialQueriesMap.traces,
[PANEL_TYPES.BAR]: initialQueriesMap.metrics,
[PANEL_TYPES.EMPTY_WIDGET]: initialQueriesMap.metrics, [PANEL_TYPES.EMPTY_WIDGET]: initialQueriesMap.metrics,
}; };

View File

@ -1,3 +1,4 @@
import BarIcon from 'assets/Dashboard/BarIcon';
import List from 'assets/Dashboard/List'; import List from 'assets/Dashboard/List';
import TableIcon from 'assets/Dashboard/Table'; import TableIcon from 'assets/Dashboard/Table';
import TimeSeriesIcon from 'assets/Dashboard/TimeSeries'; import TimeSeriesIcon from 'assets/Dashboard/TimeSeries';
@ -18,6 +19,7 @@ const Items: ItemsProps[] = [
}, },
{ name: PANEL_TYPES.TABLE, Icon: TableIcon, display: 'Table' }, { name: PANEL_TYPES.TABLE, Icon: TableIcon, display: 'Table' },
{ name: PANEL_TYPES.LIST, Icon: List, display: 'List' }, { name: PANEL_TYPES.LIST, Icon: List, display: 'List' },
{ name: PANEL_TYPES.BAR, Icon: BarIcon, display: 'Bar' },
]; ];
interface ItemsProps { interface ItemsProps {

View File

@ -5,6 +5,8 @@ import { WidgetGraphProps } from 'container/NewWidget/types';
import { useGetWidgetQueryRange } from 'hooks/queryBuilder/useGetWidgetQueryRange'; import { useGetWidgetQueryRange } from 'hooks/queryBuilder/useGetWidgetQueryRange';
import useUrlQuery from 'hooks/useUrlQuery'; import useUrlQuery from 'hooks/useUrlQuery';
import { useDashboard } from 'providers/Dashboard/Dashboard'; import { useDashboard } from 'providers/Dashboard/Dashboard';
import { getGraphType } from 'utils/getGraphType';
import { getSortedSeriesData } from 'utils/getSortedSeriesData';
import { NotFoundContainer } from './styles'; import { NotFoundContainer } from './styles';
import WidgetGraph from './WidgetGraphs'; import WidgetGraph from './WidgetGraphs';
@ -31,10 +33,17 @@ function WidgetGraphContainer({
const selectedWidget = widgets.find((e) => e.id === widgetId); const selectedWidget = widgets.find((e) => e.id === widgetId);
const getWidgetQueryRange = useGetWidgetQueryRange({ const getWidgetQueryRange = useGetWidgetQueryRange({
graphType: selectedGraph, graphType: getGraphType(selectedGraph),
selectedTime: selectedTime.enum, selectedTime: selectedTime.enum,
}); });
if (getWidgetQueryRange.data && selectedGraph === PANEL_TYPES.BAR) {
const sortedSeriesData = getSortedSeriesData(
getWidgetQueryRange.data?.payload.data.result,
);
getWidgetQueryRange.data.payload.data.result = sortedSeriesData;
}
if (selectedWidget === undefined) { if (selectedWidget === undefined) {
return <Card>Invalid widget</Card>; return <Card>Invalid widget</Card>;
} }
@ -83,6 +92,7 @@ function WidgetGraphContainer({
selectedLogFields={selectedLogFields} selectedLogFields={selectedLogFields}
selectedTracesFields={selectedTracesFields} selectedTracesFields={selectedTracesFields}
selectedTime={selectedTime} selectedTime={selectedTime}
selectedGraph={selectedGraph}
/> />
); );
} }

View File

@ -1,4 +1,5 @@
import { QueryParams } from 'constants/query'; import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import GridPanelSwitch from 'container/GridPanelSwitch'; import GridPanelSwitch from 'container/GridPanelSwitch';
import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types'; import { ThresholdProps } from 'container/NewWidget/RightContainer/Threshold/types';
import { timePreferance } from 'container/NewWidget/RightContainer/timeItems'; import { timePreferance } from 'container/NewWidget/RightContainer/timeItems';
@ -34,6 +35,7 @@ function WidgetGraph({
selectedLogFields, selectedLogFields,
selectedTracesFields, selectedTracesFields,
selectedTime, selectedTime,
selectedGraph,
}: WidgetGraphProps): JSX.Element { }: WidgetGraphProps): JSX.Element {
const { stagedQuery, currentQuery } = useQueryBuilder(); const { stagedQuery, currentQuery } = useQueryBuilder();
@ -130,6 +132,7 @@ function WidgetGraph({
maxTimeScale, maxTimeScale,
softMax, softMax,
softMin, softMin,
panelType: selectedGraph,
}), }),
[ [
widgetId, widgetId,
@ -144,6 +147,7 @@ function WidgetGraph({
maxTimeScale, maxTimeScale,
softMax, softMax,
softMin, softMin,
selectedGraph,
], ],
); );
@ -183,6 +187,7 @@ interface WidgetGraphProps {
selectedLogFields: Widgets['selectedLogFields']; selectedLogFields: Widgets['selectedLogFields'];
selectedTracesFields: Widgets['selectedTracesFields']; selectedTracesFields: Widgets['selectedTracesFields'];
selectedTime: timePreferance; selectedTime: timePreferance;
selectedGraph: PANEL_TYPES;
} }
export default WidgetGraph; export default WidgetGraph;

View File

@ -5,6 +5,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import useUrlQuery from 'hooks/useUrlQuery'; import useUrlQuery from 'hooks/useUrlQuery';
import { useDashboard } from 'providers/Dashboard/Dashboard'; import { useDashboard } from 'providers/Dashboard/Dashboard';
import { memo } from 'react'; import { memo } from 'react';
import { getGraphType } from 'utils/getGraphType';
import { WidgetGraphProps } from '../../types'; import { WidgetGraphProps } from '../../types';
import PlotTag from './PlotTag'; import PlotTag from './PlotTag';
@ -34,7 +35,7 @@ function WidgetGraph({
const selectedWidget = widgets.find((e) => e.id === widgetId); const selectedWidget = widgets.find((e) => e.id === widgetId);
const getWidgetQueryRange = useGetWidgetQueryRange({ const getWidgetQueryRange = useGetWidgetQueryRange({
graphType: selectedGraph, graphType: getGraphType(selectedGraph),
selectedTime: selectedTime.enum, selectedTime: selectedTime.enum,
}); });

View File

@ -26,6 +26,7 @@ export const panelTypeVsThreshold: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true, [PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: true, [PANEL_TYPES.TABLE]: true,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -35,6 +36,7 @@ export const panelTypeVsSoftMinMax: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: false, [PANEL_TYPES.VALUE]: false,
[PANEL_TYPES.TABLE]: false, [PANEL_TYPES.TABLE]: false,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -44,6 +46,7 @@ export const panelTypeVsDragAndDrop: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true, [PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: true, [PANEL_TYPES.TABLE]: true,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: false,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -53,6 +56,7 @@ export const panelTypeVsFillSpan: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: false, [PANEL_TYPES.VALUE]: false,
[PANEL_TYPES.TABLE]: false, [PANEL_TYPES.TABLE]: false,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: false,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -62,6 +66,7 @@ export const panelTypeVsYAxisUnit: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true, [PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: true, [PANEL_TYPES.TABLE]: true,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -71,6 +76,7 @@ export const panelTypeVsCreateAlert: { [key in PANEL_TYPES]: boolean } = {
[PANEL_TYPES.VALUE]: true, [PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: false, [PANEL_TYPES.TABLE]: false,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;
@ -82,6 +88,7 @@ export const panelTypeVsPanelTimePreferences: {
[PANEL_TYPES.VALUE]: true, [PANEL_TYPES.VALUE]: true,
[PANEL_TYPES.TABLE]: true, [PANEL_TYPES.TABLE]: true,
[PANEL_TYPES.LIST]: false, [PANEL_TYPES.LIST]: false,
[PANEL_TYPES.BAR]: true,
[PANEL_TYPES.TRACE]: false, [PANEL_TYPES.TRACE]: false,
[PANEL_TYPES.EMPTY_WIDGET]: false, [PANEL_TYPES.EMPTY_WIDGET]: false,
} as const; } as const;

View File

@ -2,25 +2,31 @@ import './NoLogs.styles.scss';
import { Typography } from 'antd'; import { Typography } from 'antd';
import { ArrowUpRight } from 'lucide-react'; import { ArrowUpRight } from 'lucide-react';
import { DataSource } from 'types/common/queryBuilder';
export default function NoLogs(): JSX.Element { export default function NoLogs({
dataSource,
}: {
dataSource: DataSource;
}): JSX.Element {
return ( return (
<div className="no-logs-container"> <div className="no-logs-container">
<div className="no-logs-container-content"> <div className="no-logs-container-content">
<img className="eyes-emoji" src="/Images/eyesEmoji.svg" alt="eyes emoji" /> <img className="eyes-emoji" src="/Images/eyesEmoji.svg" alt="eyes emoji" />
<Typography className="no-logs-text"> <Typography className="no-logs-text">
No logs yet. No {dataSource} yet.
<span className="sub-text"> <span className="sub-text">
When we receive logs, they would show up here {' '}
When we receive {dataSource}, they would show up here
</span> </span>
</Typography> </Typography>
<Typography.Link <Typography.Link
className="send-logs-link" className="send-logs-link"
href="https://signoz.io/docs/userguide/logs/" href={`https://signoz.io/docs/userguide/${dataSource}/`}
target="_blank" target="_blank"
> >
Sending Logs to SigNoz <ArrowUpRight size={16} /> Sending {dataSource} to SigNoz <ArrowUpRight size={16} />
</Typography.Link> </Typography.Link>
</div> </div>
</div> </div>

View File

@ -0,0 +1,31 @@
### Step 1: Add filelog receiver to `config.yaml` file of otel collector
Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step
```bash
receivers:
...
filelog/app:
include: [ /tmp/app.log ]
start_at: end
...
```
Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
&nbsp;
&nbsp;
### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above.
```bash
service:
....
logs:
receivers: [otlp, filelog/app]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,113 @@
### Prerequisite
- A Linux based EC2 Instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Restart the Otel Collector
Restart the otel collector so that new changes are saved and you should see the logs in the dashboard.
Kill the process if it was already running using the below command
```bash
kill "$(< otel-pid)"
```
Restart the OTel collector when youre in the `otel-contirb` folder
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
Now you should be able to see the Logs on your SigNoz Cloud UI

View File

@ -0,0 +1,31 @@
### Step 1: Add filelog receiver to `config.yaml` file of otel collector
Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step
```bash
receivers:
...
filelog/app:
include: [ /tmp/app.log ]
start_at: end
...
```
Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
&nbsp;
&nbsp;
### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above.
```bash
service:
....
logs:
receivers: [otlp, filelog/app]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,114 @@
### Prerequisite
- A Linux based EC2 Instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Restart the Otel Collector
Restart the otel collector so that new changes are saved and you should see the logs in the dashboard.
Kill the process if it was already running using the below command
```bash
kill "$(< otel-pid)"
```
Restart the OTel collector when youre in the `otel-contirb` folder
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
Now you should be able to see the Logs on your SigNoz Cloud UI

View File

@ -0,0 +1,31 @@
### Step 1: Add filelog receiver to `config.yaml` file of otel collector
Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step
```bash
receivers:
...
filelog/app:
include: [ /tmp/app.log ]
start_at: end
...
```
Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
&nbsp;
&nbsp;
### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above.
```bash
service:
....
logs:
receivers: [otlp, filelog/app]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,113 @@
### Prerequisite
- A Linux based EC2 Instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Restart the Otel Collector
Restart the otel collector so that new changes are saved and you should see the logs in the dashboard.
Kill the process if it was already running using the below command
```bash
kill "$(< otel-pid)"
```
Restart the OTel collector when youre in the `otel-contirb` folder
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
Now you should be able to see the Logs on your SigNoz Cloud UI

View File

@ -0,0 +1,31 @@
### Step 1: Add filelog receiver to `config.yaml` file of otel collector
Add the filelog receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the previous step
```bash
receivers:
...
filelog/app:
include: [ /tmp/app.log ]
start_at: end
...
```
Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
&nbsp;
&nbsp;
### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above.
```bash
service:
....
logs:
receivers: [otlp, filelog/app]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,113 @@
### Prerequisite
- A Linux based EC2 Instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Restart the Otel Collector
Restart the otel collector so that new changes are saved and you should see the logs in the dashboard.
Kill the process if it was already running using the below command
```bash
kill "$(< otel-pid)"
```
Restart the OTel collector when youre in the `otel-contirb` folder
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
Now you should be able to see the Logs on your SigNoz Cloud UI

View File

@ -0,0 +1,15 @@
### Step 1: Download/Copy this hostmetrics JSON file
Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
&nbsp;
### Step 2: Import hostmetrics JSON file to SigNoz Cloud
Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI.
In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step.
To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/)

View File

@ -0,0 +1,113 @@
### Prerequisite
- A running EC2 instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Step 1: Download/Copy this hostmetrics JSON file
Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
&nbsp;
### Step 2: Import hostmetrics JSON file to SigNoz Cloud
Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI.
In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step.
To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/)

View File

@ -0,0 +1,114 @@
### Prerequisite
- A running EC2 instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Step 1: Download/Copy this hostmetrics JSON file
Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
&nbsp;
### Step 2: Import hostmetrics JSON file to SigNoz Cloud
Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI.
In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step.
To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/)

View File

@ -0,0 +1,113 @@
### Prerequisite
- A running EC2 instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,15 @@
### Step 1: Download/Copy this hostmetrics JSON file
Download/Copy the `hostmetrics-with-variable.json` from [here](https://github.com/SigNoz/dashboards/blob/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
&nbsp;
### Step 2: Import hostmetrics JSON file to SigNoz Cloud
Once you click on the **`Done`** button at the bottom right corner, you'll be taken to the SigNoz UI.
In SigNoZ UI, go to dashboards section and click New Dashboard button and select the import JSON option and upload/paste the `hostmetrics-with-variables.json` file data that you downloaded/copied in last step.
To know more detailed instructions, checkout the link [here](https://signoz.io/docs/userguide/hostmetrics/)

View File

@ -0,0 +1,112 @@
### Prerequisite
- A running EC2 instance
&nbsp;
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```
### Step 4: Run OTel Collector
Run this command inside the `otelcol-contrib` directory
```bash
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
```
### (Optional Step): View last 50 lines of `otelcol` logs
```bash
tail -f -n 50 otelcol-output.log
```
### (Optional Step): Stop `otelcol`
```bash
kill "$(< otel-pid)"
```

View File

@ -0,0 +1,68 @@
## Create Daemon Service
Using the template we downloaded and the SigNoz OpenTelemetry Collector configuration we created, we will create the Daemon Service. This can be done following these steps:
### Step 1: Set the environment variable
Set the environment variable by running the below command uisng your AWS CLI:
```bash
export CLUSTER_NAME=<YOUR-ECS-CLUSTER-NAME>
export REGION=<YOUR-ECS-REGION>
export COMMAND=--config=env:SIGNOZ_CONFIG_CONTENT
export SIGNOZ_CONFIG_PATH=/ecs/signoz/otelcol-daemon.yaml
```
`<YOUR-ECS-CLUSTER-NAME>` - Name of your ECS cluster. For example, **my-test-cluster**
`<YOUR-ECS-REGION>` - Region in which your ECS cluster is running. For example, **us-east-1**
&nbsp;
### Step 2: Create stack for Daemon Service
With the environment variables set, you can proceed to create the Daemon service using `cloudformation create-stack` by running the below command using your AWS CLI:
```bash
aws cloudformation create-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} \
--template-body file://daemon-template.yaml \
--parameters ParameterKey=ClusterName,ParameterValue=${CLUSTER_NAME} \
ParameterKey=CreateIAMRoles,ParameterValue=True \
ParameterKey=command,ParameterValue=${COMMAND} \
ParameterKey=SigNozConfigPath,ParameterValue=${SIGNOZ_CONFIG_PATH} \
--capabilities CAPABILITY_NAMED_IAM \
--region ${REGION}
```
&nbsp;
### Step 3: Verify Daemon Service
To verify that the daemon service is running, you can run the following command, which should output the task ARN of the Daemon service as the output.
```bash
aws ecs list-tasks --cluster ${CLUSTER_NAME} --region ${REGION}
```
&nbsp;
### Step 4: Verify Data in SigNoz
To verify that the data is being sent to SigNoz Cloud, you can go to the dashboard section of SigNoz and import one of the following dashboards below:
- [instancemetrics.json](https://raw.githubusercontent.com/SigNoz/dashboards/chore/ecs-dashboards/ecs-infra-metrics/instance-metrics.json)
- [hostmetrics-with-variable.json](https://raw.githubusercontent.com/SigNoz/dashboards/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
### Optional Step: Clean Up
In a cloud environment where resources are billed based on usage, cleaning up resources is crucial. This step involves removing the daemon service and any associated resources that were created during the setup process to collect and forward metrics and logs from your ECS infrastructure to SigNoz. To clean up the daemon service, you can run the following command:
```bash
aws cloudformation delete-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} --region ${REGION}
```
&nbsp;
Once you follow these steps, you should be able to see your logs and metrics data coming in SigNoz Cloud. To see data for your traces, click on Continue to next step below.

View File

@ -0,0 +1,28 @@
## Create SigNoz OTel Collector Configuration file
To setup the SigNoz OTel Collector config, follow these two steps:
### Step 1:
Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-daemon.yaml**
### Step 2:
Download the otelcol-daemon YAML configuration file:
```bash
wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-daemon.yaml
```
&nbsp;
Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below:
{region} : `{{REGION}}`
SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}`
&nbsp;
Once you update these values, copy the updated content of the `otelcol-daemon.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-daemon.yaml** parameter that you created in Step 1.
&nbsp;
**NOTE:**
- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, and `logs` from the `otelcol-daemon.yaml` file.

View File

@ -0,0 +1,90 @@
**NOTE** : If you don't want to send traces data of your application, you can skip this step.
&nbsp;
## Send Traces Data
To send traces data from applications deployed in ECS to SigNoz Cloud using Daemon Service we created in the previous section, follow these steps:
### Step 1: Instrument your application
To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/).
&nbsp;
### Step 2: Add Entrypoint to your Application Container
Add an entrypoint to the application container to set the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable to the endpoint of the daemon service.
Obtain the endpoint or IP address of the instance on which the task is running. This can be done by querying the metadata service of the instance. For **EC2**, the metadata service is available at **169.254.169.254**.
The `entryPoint` will look like:
```yaml
{
...,
"entryPoint": [
"sh",
"-c",
"export OTEL_EXPORTER_OTLP_ENDPOINT=\"http://$(curl http://169.254.169.254/latest/meta-data/local-ipv4):4317\"; <Application Startup Commands>"
],
"command": [],
...
}
```
Replace `<Application Startup Commands>` with the commands to start your application.
&nbsp;
### Step 3: Add Service Name of your Application
To add the service name of your application, you need to set the `OTEL_RESOURCE_ATTRIBUTES` environment variable of the application container to `service.name=<your-service-name>`.
In your task definition, add the following lines:
```bash
...
ContainerDefinitions:
- Name: <your-container-name>
...
Environment:
- Name: OTEL_RESOURCE_ATTRIBUTES
Value: service.name=<your-service-name>
...
...
```
If you are using JSON for task definition, then add the following lines:
```bash
...
"containerDefinitions": [
{
"name": "<your-container-name>",
...
"environment": [
{
"name": "OTEL_RESOURCE_ATTRIBUTES",
"value": "service.name=<your-service-name>"
}
],
...
}
],
...
```
&nbsp;
### Step 4: Rebuild and Deploy Application Container
Once you follow the above steps, you need to rebuild the application container and deploy it to ECS cluster.
&nbsp;
### Step 5: Verify Data in SigNoz
Generate some traffic to your application and go to your SigNoz cloud **Services** page to see your application name in the service list.

View File

@ -0,0 +1,21 @@
These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure.
## Setup Daemon Service
&nbsp;
### Daemon Service Template
This step guides in downloading a template which will be used to create a new service within your Amazon ECS (Elastic Container Service) cluster. The purpose of this service is to deploy a container that functions as a daemon. This service will run a container that will send data such as ECS infrastructure metrics and logs from docker containers and send it to SigNoz.
We will use CloudFormation template which includes parameters and configurations that define how the daemon service should be set up. For example, it specifies the container image to use for the daemon, the necessary environment variables, and network settings.
&nbsp;
Download the `daemon-template.yaml` using the command below:
```bash
wget https://github.com/SigNoz/benchmark/raw/main/ecs/ec2/daemon-template.yaml
```

View File

@ -0,0 +1,68 @@
## Create Daemon Service
Using the template we downloaded and the SigNoz OpenTelemetry Collector configuration we created, we will create the Daemon Service. This can be done following these steps:
### Step 1: Set the environment variable
Set the environment variable by running the below command uisng your AWS CLI:
```bash
export CLUSTER_NAME=<YOUR-ECS-CLUSTER-NAME>
export REGION=<YOUR-ECS-REGION>
export COMMAND=--config=env:SIGNOZ_CONFIG_CONTENT
export SIGNOZ_CONFIG_PATH=/ecs/signoz/otelcol-daemon.yaml
```
`<YOUR-ECS-CLUSTER-NAME>` - Name of your ECS cluster. For example, **my-test-cluster**
`<YOUR-ECS-REGION>` - Region in which your ECS cluster is running. For example, **us-east-1**
&nbsp;
### Step 2: Create stack for Daemon Service
With the environment variables set, you can proceed to create the Daemon service using `cloudformation create-stack` by running the below command using your AWS CLI:
```bash
aws cloudformation create-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} \
--template-body file://daemon-template.yaml \
--parameters ParameterKey=ClusterName,ParameterValue=${CLUSTER_NAME} \
ParameterKey=CreateIAMRoles,ParameterValue=True \
ParameterKey=command,ParameterValue=${COMMAND} \
ParameterKey=SigNozConfigPath,ParameterValue=${SIGNOZ_CONFIG_PATH} \
--capabilities CAPABILITY_NAMED_IAM \
--region ${REGION}
```
&nbsp;
### Step 3: Verify Daemon Service
To verify that the daemon service is running, you can run the following command, which should output the task ARN of the Daemon service as the output.
```bash
aws ecs list-tasks --cluster ${CLUSTER_NAME} --region ${REGION}
```
&nbsp;
### Step 4: Verify Data in SigNoz
To verify that the data is being sent to SigNoz Cloud, you can go to the dashboard section of SigNoz and import one of the following dashboards below:
- [instancemetrics.json](https://raw.githubusercontent.com/SigNoz/dashboards/chore/ecs-dashboards/ecs-infra-metrics/instance-metrics.json)
- [hostmetrics-with-variable.json](https://raw.githubusercontent.com/SigNoz/dashboards/main/hostmetrics/hostmetrics-with-variable.json)
&nbsp;
### Optional Step: Clean Up
In a cloud environment where resources are billed based on usage, cleaning up resources is crucial. This step involves removing the daemon service and any associated resources that were created during the setup process to collect and forward metrics and logs from your ECS infrastructure to SigNoz. To clean up the daemon service, you can run the following command:
```bash
aws cloudformation delete-stack --stack-name AOCECS-daemon-${CLUSTER_NAME}-${REGION} --region ${REGION}
```
&nbsp;
Once you follow these steps, you should be able to see your logs and metrics data coming in SigNoz Cloud. To see data for your traces, click on Continue to next step below.

View File

@ -0,0 +1,28 @@
## Create SigNoz OTel Collector Configuration file
To setup the SigNoz OTel Collector config, follow these two steps:
### Step 1:
Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-daemon.yaml**
### Step 2:
Download the otelcol-daemon YAML configuration file:
```bash
wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-daemon.yaml
```
&nbsp;
Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below:
{region} : `{{REGION}}`
SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}`
&nbsp;
Once you update these values, copy the updated content of the `otelcol-sidecar.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-daemon.yaml** parameter that you created in Step 1.
&nbsp;
**NOTE:**
- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, and `logs` from the `otelcol-daemon.yaml` file.

View File

@ -0,0 +1,106 @@
**NOTE** : If you don't want to send traces data of your application, you can skip this step.
&nbsp;
## Send Traces Data
To send traces data from applications deployed in ECS to SigNoz Cloud using Daemon Service we created in the previous section, follow these steps:
### Step 1: Instrument your application
To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/).
&nbsp;
### Step 2: Add Entrypoint to your Application Container
Add an entrypoint to the application container to set the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable to the endpoint of the daemon service.
Obtain the endpoint or IP address of the instance on which the task is running. This can be done using the default network mode, i.e., **Bridge**
The `entryPoint` will look like:
```yaml
{
...,
"entryPoint": [
"sh",
"-c",
"export OTEL_EXPORTER_OTLP_ENDPOINT=\"http://$(curl http://169.254.169.254/latest/meta-data/local-ipv4):4317\"; <Application Startup Commands>"
],
"command": [],
...
}
```
&nbsp;
In case you are using custom docker networking, you would have to use `ExtraHosts` in your task definition:
```yaml
{
...
"extraHosts": [
{
"hostname": "signoz-collector",
"ipAddress": "host-gateway"
}
...
]
}
```
Replace `<Application Startup Commands>` with the commands to start your application.
&nbsp;
### Step 3: Add Service Name of your Application
To add the service name of your application, you need to set the `OTEL_RESOURCE_ATTRIBUTES` environment variable of the application container to `service.name=<your-service-name>`.
In your task definition, add the following lines:
```bash
...
ContainerDefinitions:
- Name: <your-container-name>
...
Environment:
- Name: OTEL_RESOURCE_ATTRIBUTES
Value: service.name=<your-service-name>
...
...
```
If you are using JSON for task definition, then add the following lines:
```bash
...
"containerDefinitions": [
{
"name": "<your-container-name>",
...
"environment": [
{
"name": "OTEL_RESOURCE_ATTRIBUTES",
"value": "service.name=<your-service-name>"
}
],
...
}
],
...
```
&nbsp;
### Step 4: Rebuild and Deploy Application Container
Once you follow the above steps, you need to rebuild the application container and deploy it to ECS cluster.
&nbsp;
### Step 5: Step 5: Verify Data in SigNoz
Generate some traffic to your application and go to your SigNoz cloud services page to see your application name in the service list.

View File

@ -0,0 +1,21 @@
These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure.
## Setup Daemon Service
&nbsp;
### Daemon Service Template
This step guides in downloading a template which will be used to create a new service within your Amazon ECS (Elastic Container Service) cluster. The purpose of this service is to deploy a container that functions as a daemon. This service will run a container that will send data such as ECS infrastructure metrics and logs from docker containers and send it to SigNoz.
We will use CloudFormation template which includes parameters and configurations that define how the daemon service should be set up. For example, it specifies the container image to use for the daemon, the necessary environment variables, and network settings.
&nbsp;
Download the `daemon-template.yaml` using the command below:
```bash
wget https://github.com/SigNoz/benchmark/raw/main/ecs/external/daemon-template.yaml
```

View File

@ -0,0 +1,30 @@
These steps will help you to collect **metrics, logs and traces** from your ECS infrastructure.
## Create SigNoz OTel Collector Configuration file
To setup the SigNoz OTel Collector config, follow these two steps:
### Step 1:
Navigate to the AWS Parameter store and create a new parameter named **/ecs/signoz/otelcol-sidecar.yaml**
### Step 2:
Download the otelcol-sidecar YAML configuration file:
```bash
wget https://github.com/SigNoz/benchmark/raw/main/ecs/otelcol-sidecar.yaml
```
&nbsp;
Update `{region}` and `SIGNOZ_INGESTION_KEY` values in your YAML configuration file with your SigNoz cloud values mentioned below:
{region} : `{{REGION}}`
SIGNOZ_INGESTION_KEY : `{{SIGNOZ_INGESTION_KEY}}`
&nbsp;
Once you update these values, copy the updated content of the `otelcol-sidecar.yaml` file and paste it in the value field of the **/ecs/signoz/otelcol-sidecar.yaml** parameter that you created in Step 1.
&nbsp;
**NOTE:**
- After successful set up, feel free to remove `logging` exporter if it gets too noisy. To do so, simply remove the logging exporter from the **exporters** list in the following pipelines: `traces`, `metrics`, `metrics/aws` and `logs` from the `otelcol-sidecar.yaml` file.

View File

@ -0,0 +1,143 @@
## Create Sidecar Collector Container
This step involves integrating the SigNoz collector into your ECS task definitions as a sidecar container. The sidecar collector container will run alongside your application container(s) within the same ECS task and will collect ECS container metrics and send them to SigNoz Cloud. Follow these steps to create the Sidecar collector container:
### Step 1: Update task definition of your application
In your ECS task definition, include a new container definition specifically for the sidecar container. This container will operate alongside your main application container(s) within the same task definition. The JSON configuration for that will look like this:
```json
{
...
"containerDefinitions": [
...,
{
"name": "signoz-collector",
"image": "signoz/signoz-otel-collector:0.88.13",
"user": "root",
"command": [
"--config=env:SIGNOZ_CONFIG_CONTENT"
],
"secrets": [
{
"name": "SIGNOZ_CONFIG_CONTENT",
"valueFrom": "/ecs/signoz/otelcol-sidecar.yaml"
}
],
"memory": 1024,
"cpu": 512,
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 4317
},
{
"protocol": "tcp",
"containerPort": 4318
},
{
"containerPort": 8006,
"protocol": "tcp"
}
],
"healthCheck": {
"command": [
"CMD-SHELL",
"wget -qO- http://localhost:13133/ || exit 1"
],
"interval": 5,
"timeout": 6,
"retries": 5,
"startPeriod": 1
},
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/signoz-otel-EC2-sidcar",
"awslogs-region": "<aws-region>",
"awslogs-stream-prefix": "ecs",
"awslogs-create-group": "True"
}
}
}
]
...
}
```
Replace `<aws-region>` with the Region for your ECS cluster. For example, **us-east-1**
&nbsp;
### Step 2: Update ECS Task Execution Role
To update the Task Execution role, follow these steps:
1. **Identify the Role:** Identify the IAM role used by your ECS tasks for execution. It's often named something like **ecsTaskExecutionRole**.
2. **Edit the Role:** Navigate to the IAM console in the AWS Management Console, find the role by name, and open its details page.
3. **Attach Policy or add inline Policy:**
There are two ways to grant access to the Parameter store:
- **Attach AWS Managed Policies:** If the role doesn't already have the following policy, attach it:
- `AmazonSSMReadOnlyAccess`
- **Add Inline Policy:** Alternatively, for more granular control, you can create an inline policy that specifically grants access to only the necessary resources in the Parameter Store. The JSON for the inline policy will be:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ssm:GetParameter"
],
"Resource": [
"arn:aws:ssm:<aws-region>:<aws-account-id>:parameter/ecs/signoz/otelcol-sidecar.yaml"
],
"Effect": "Allow"
}
]
}
```
&nbsp;
### Step 3: Update ECS Task Role
To update the ECS Task Role, follow these steps:
1. **Identify the Role:** Determine the IAM role your ECS tasks are currently using to interact with AWS services. This role is specified in the ECS task definition under the "taskRoleArn" field.
2. **Edit the Role:** Go to the IAM section of the AWS Management Console, locate the role by its name, and open its configuration.
3. **Attach Policies or Add Inline Policy:**
There are two ways to grant access to the Parameter store:
- Attach AWS Managed Policies: If the role doesn't already have the following policies, attach it:
- AmazonSSMReadOnlyAccess
- **Add Inline Policy for Granular Access:** For tighter security, you might opt to create an inline policy that specifies exactly which resources the tasks can access and what actions they can perform on those resources. This is particularly important for accessing specific resources like the Parameter Store parameters used by the SigNoz sidecar. The JSON for the inline policy will be:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ssm:GetParameter"
],
"Resource": [
"arn:aws:ssm:<aws-region>:<aws-account-id>:parameter/ecs/signoz/otelcol-sidecar.yaml"
],
"Effect": "Allow"
}
]
}
```

View File

@ -0,0 +1,10 @@
## Deploy the task definition
If your application runs as an ECS service, you update the service to use the new revision of your task definition. This tells ECS to start new tasks based on this updated definition and gracefully replace the old tasks with the new ones, ensuring minimal disruption to your application.
**NOTE:** Once the task is running, you should be able to see SigNoz sidecar container logs in CloudWatch Logs because we have set the logDriver parameter to be `awslogs` in our task definition.
## Verify data in SigNoz
To verify that your sidecar container is running, go to the Dashboard section of SigNoz Cloud and import the dashboard **ECS - Container Metrics** Dashboard from [here](https://raw.githubusercontent.com/SigNoz/dashboards/main/ecs-infra-metrics/container-metrics.json).

View File

@ -0,0 +1,84 @@
**NOTE** : If you don't want to send traces data of your application, you can skip this step.
&nbsp;
## Send Traces Data
To send traces data from applications deployed in ECS to SigNoz Cloud using sidecar container we created in the previous steps, follow these steps:
### Step 1: Instrument your application
To add OpenTelemetry instrumentation to your application, check out the Application Monitoring section in onboarding you can follow the docs [here](https://signoz.io/docs/instrumentation/).
&nbsp;
### Step 2: Configure OTLP Endpoint
In your application task definition, you need to set the OTLP endpoint to the endpoint of the sidecar container. This can be done by setting the environment variable `OTEL_EXPORTER_OTLP_ENDPOINT` to the endpoint of the sidecar container.
Depending on the Network Mode, the ECS task definition will change:
For **Bridge** network mode, ECS task definition will be:
```json
{
...
"containerDefinitions": [
{
"name": "<your-container-name>",
"environment": [
{
"name": "OTEL_EXPORTER_OTLP_ENDPOINT",
"value": "http://signoz-collector:4317"
},
{
"name": "OTEL_RESOURCE_ATTRIBUTES",
"value": "service.name=<your-service-name>"
}
],
"links": [
"signoz-collector"
],
...
}
]
}
```
&nbsp;
For **AWS VCP** network mode, ECS task definition will be:
```json
{
...
"containerDefinitions": [
{
"name": "<your-container-name>",
"environment": [
{
"name": "OTEL_EXPORTER_OTLP_ENDPOINT",
"value": "http://localhost:4317"
},
{
"name": "OTEL_RESOURCE_ATTRIBUTES",
"value": "service.name=<your-service-name>"
}
],
...
}
]
}
```
Replace `<your-container-name>` with the name of your container.
&nbsp;
### Step 3: Rebuild and Deploy Application Container
After instrumenting your application and configuring the OTLP endpoint, you'll need to rebuild your application container with these changes and deploy it to ECS cluster using the same task definition that we used in the previous section.
&nbsp;
### Step 4: Verify Data in SigNoz
Generate some traffic to your application and go to your SigNoz cloud **Services** page to see your application name in the service list.

View File

@ -0,0 +1,133 @@
**NOTE** : If you don't want to send logs data of your applications deployed on ECS, you can skip this step.
## Send Logs Data
To send logs data from applications deployed in ECS to SigNoz Cloud using sidecar container we created in the previous steps, follow these steps:
### Step 1: Configure Log Router
Add a new container definition in your ECS task definition for the Fluent Bit log router:
```json
{
...
{
"name": "signoz-log-router",
"image": "906394416424.dkr.ecr.us-west-2.amazonaws.com/aws-for-fluent-bit:stable",
"cpu": 250,
"memory": 512,
"essential": true,
"dependsOn": [
{
"containerName": "signoz-collector",
"condition": "HEALTHY"
}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-create-group": "True",
"awslogs-group": "/ecs/ecs-signoz-log-router",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "ecs"
}
},
"firelensConfiguration": {
"type": "fluentbit",
"options": {
"enable-ecs-log-metadata": "true"
}
}
}
}
```
**NOTE:** When collecting logs from multiple applications, it is recommended to use `<application-name>-log-router` pattern instead of `signoz-log-router` for container name and `awslogs-group`. It helps to separate log router of different application.
&nbsp;
### Step 2: Send logs to Sidecar Container
In your application task definition, you need to use `awsfirelens` log driver to send logs to the sidecar otel-collector container via Fluent Bit log router.
Depending on the Network Mode, update the ECS task definition:
For **Bridge** network mode:
```json
{
...
"containerDefinitions": [
{
"name": "<your-container-name>",
"dependsOn": [
{
"containerName": "signoz-log-router",
"condition": "START"
}
],
"logConfiguration": {
"logDriver": "awsfirelens",
"options": {
"Name": "forward",
"Match": "*",
"Host": "signoz-collector",
"Port": "8006",
"tls": "off",
"tls.verify": "off"
}
},
"links": [
"signoz-collector"
],
...
}
]
}
```
&nbsp;
For **AWS VCP** network mode:
```json
{
...
"containerDefinitions": [
{
"name": "<your-container-name>",
"dependsOn": [
{
"containerName": "signoz-log-router",
"condition": "START"
}
],
"logConfiguration": {
"logDriver": "awsfirelens",
"options": {
"Name": "forward",
"Match": "*",
"Host": "localhost",
"Port": "8006",
"tls": "off",
"tls.verify": "off"
}
}
...
}
]
}
```
### Step 3: Rebuild and Deploy Application Container
Rebuild your application container and deploy it to ECS cluster using the same task definition that we updated in the previous section.
&nbsp;
### Step 4: Verify Data in SigNoz
Generate some logs from your application and go to your SigNoz cloud **Logs** page to see your application logs.

View File

@ -0,0 +1,24 @@
## Install otel-collector in your Kubernetes infra
&nbsp;
Add the SigNoz Helm Chart repository
```bash
helm repo add signoz https://charts.signoz.io
```
&nbsp;
If the chart is already present, update the chart to the latest using:
```bash
helm repo update
```
&nbsp;
Install the Kubernetes Infrastructure chart provided by SigNoz
```bash
helm install my-release signoz/k8s-infra \
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
--set otelInsecure=false \
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
--set global.clusterName=<CLUSTER_NAME>
```
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.

View File

@ -0,0 +1,16 @@
## Monitor using Dashboards
To visualize the Kubernetes Metrics, you can use one of the following pre-built Dashboards:
- [K8s Node-Level Metrics](https://github.com/SigNoz/dashboards/blob/main/k8s-node-%26-pod-metrics/k8s-node-level-metrics.json)
- [K8s Pod_level Metrics](https://github.com/SigNoz/dashboards/blob/main/k8s-node-%26-pod-metrics/k8s-pod-level-metrics.json)
You should copy the JSON data in these files and create a New Dashboard in the Dashboard Tab of SigNoz.
&nbsp;
By following the previous step, you should also be able to see Kubernetes Pod logs in the logs Section of SigNoz.
&nbsp;
To send traces for your application deployed on your Kubernetes cluster, checkout the Application monitoring section of onboarding.

View File

@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
### Step 2: Include filelog receiver in the Pipeline ### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above. We will modify our pipeline inside `config.yaml` to include the receiver we have created above.

View File

@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
### Step 2: Include filelog receiver in the Pipeline ### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above. We will modify our pipeline inside `config.yaml` to include the receiver we have created above.

View File

@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
### Step 2: Include filelog receiver in the Pipeline ### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above. We will modify our pipeline inside `config.yaml` to include the receiver we have created above.

View File

@ -14,7 +14,7 @@ Replace `/tmp/app.log` with the path to your log file.
Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded. Note: change the `start_at` value to `beginning` if you want to read the log file from the beginning. It may be useful if you want to send old logs to SigNoz. The log records older than the standard log retention period (default 15 days) will be discarded.
For more configurations that are available for syslog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver). For more configurations that are available for filelog receiver please check [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).
### Step 2: Include filelog receiver in the Pipeline ### Step 2: Include filelog receiver in the Pipeline
We will modify our pipeline inside `config.yaml` to include the receiver we have created above. We will modify our pipeline inside `config.yaml` to include the receiver we have created above.

View File

@ -21,11 +21,13 @@ import {
} from './context/OnboardingContext'; } from './context/OnboardingContext';
import { DataSourceType } from './Steps/DataSource/DataSource'; import { DataSourceType } from './Steps/DataSource/DataSource';
import { import {
defaultAwsServices,
defaultInfraMetricsType, defaultInfraMetricsType,
defaultLogsType, defaultLogsType,
} from './utils/dataSourceUtils'; } from './utils/dataSourceUtils';
import { import {
APM_STEPS, APM_STEPS,
AWS_MONITORING_STEPS,
getSteps, getSteps,
INFRASTRUCTURE_MONITORING_STEPS, INFRASTRUCTURE_MONITORING_STEPS,
LOGS_MANAGEMENT_STEPS, LOGS_MANAGEMENT_STEPS,
@ -35,6 +37,7 @@ export enum ModulesMap {
APM = 'APM', APM = 'APM',
LogsManagement = 'LogsManagement', LogsManagement = 'LogsManagement',
InfrastructureMonitoring = 'InfrastructureMonitoring', InfrastructureMonitoring = 'InfrastructureMonitoring',
AwsMonitoring = 'AwsMonitoring',
} }
export interface ModuleProps { export interface ModuleProps {
@ -68,6 +71,12 @@ export const useCases = {
desc: desc:
'Monitor Kubernetes infrastructure metrics, hostmetrics, or metrics of any third-party integration', 'Monitor Kubernetes infrastructure metrics, hostmetrics, or metrics of any third-party integration',
}, },
AwsMonitoring: {
id: ModulesMap.AwsMonitoring,
title: 'AWS Monitoring',
desc:
'Monitor your traces, logs and metrics for AWS services like EC2, ECS, EKS etc.',
},
}; };
export default function Onboarding(): JSX.Element { export default function Onboarding(): JSX.Element {
@ -173,6 +182,13 @@ export default function Onboarding(): JSX.Element {
setSelectedModuleSteps(LOGS_MANAGEMENT_STEPS); setSelectedModuleSteps(LOGS_MANAGEMENT_STEPS);
updateSelectedDataSource(defaultLogsType); updateSelectedDataSource(defaultLogsType);
} }
} else if (selectedModule?.id === ModulesMap.AwsMonitoring) {
if (selectedDataSource) {
setModuleStepsBasedOnSelectedDataSource(selectedDataSource);
} else {
setSelectedModuleSteps(AWS_MONITORING_STEPS);
updateSelectedDataSource(defaultAwsServices);
}
} else if (selectedModule?.id === ModulesMap.APM) { } else if (selectedModule?.id === ModulesMap.APM) {
handleAPMSteps(); handleAPMSteps();
} }

View File

@ -1,6 +1,7 @@
/* eslint-disable @typescript-eslint/ban-ts-comment */ /* eslint-disable @typescript-eslint/ban-ts-comment */
import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer'; import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer';
import { ApmDocFilePaths } from 'container/OnboardingContainer/constants/apmDocFilePaths'; import { ApmDocFilePaths } from 'container/OnboardingContainer/constants/apmDocFilePaths';
import { AwsMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/awsMonitoringDocFilePaths';
import { InfraMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/infraMonitoringDocFilePaths'; import { InfraMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/infraMonitoringDocFilePaths';
import { LogsManagementDocFilePaths } from 'container/OnboardingContainer/constants/logsManagementDocFilePaths'; import { LogsManagementDocFilePaths } from 'container/OnboardingContainer/constants/logsManagementDocFilePaths';
import { import {
@ -66,6 +67,8 @@ export default function MarkdownStep(): JSX.Element {
docFilePaths = LogsManagementDocFilePaths; docFilePaths = LogsManagementDocFilePaths;
} else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) { } else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) {
docFilePaths = InfraMonitoringDocFilePaths; docFilePaths = InfraMonitoringDocFilePaths;
} else if (selectedModule?.id === ModulesMap.AwsMonitoring) {
docFilePaths = AwsMonitoringDocFilePaths;
} }
// @ts-ignore // @ts-ignore
if (docFilePaths && docFilePaths[path]) { if (docFilePaths && docFilePaths[path]) {

View File

@ -151,6 +151,10 @@ export default function ModuleStepsContainer({
history.push(ROUTES.LOGS_EXPLORER); history.push(ROUTES.LOGS_EXPLORER);
} else if (selectedModule.id === ModulesMap.InfrastructureMonitoring) { } else if (selectedModule.id === ModulesMap.InfrastructureMonitoring) {
history.push(ROUTES.APPLICATION); history.push(ROUTES.APPLICATION);
} else if (selectedModule.id === ModulesMap.AwsMonitoring) {
history.push(ROUTES.APPLICATION);
} else {
history.push(ROUTES.APPLICATION);
} }
}; };
@ -264,6 +268,69 @@ export default function ModuleStepsContainer({
module: activeStep?.module?.id, module: activeStep?.module?.id,
}); });
break; break;
case stepsMap.sendLogsCloudwatch:
trackEvent('Onboarding V2: Send Logs Cloudwatch', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.setupDaemonService:
trackEvent('Onboarding V2: Setup ECS Daemon Service', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.createOtelConfig:
trackEvent('Onboarding V2: Create ECS OTel Config', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.createDaemonService:
trackEvent('Onboarding V2: Create ECS Daemon Service', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.ecsSendData:
trackEvent('Onboarding V2: ECS send traces data', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.createSidecarCollectorContainer:
trackEvent('Onboarding V2: ECS create Sidecar Container', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.deployTaskDefinition:
trackEvent('Onboarding V2: ECS deploy task definition', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.ecsSendLogsData:
trackEvent('Onboarding V2: ECS Fargate send logs data', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
case stepsMap.monitorDashboard:
trackEvent('Onboarding V2: EKS monitor dashboard', {
dataSource: selectedDataSource?.id,
environment: selectedEnvironment,
module: activeStep?.module?.id,
});
break;
default: default:
break; break;
} }

View File

@ -0,0 +1,127 @@
/* eslint-disable simple-import-sort/imports */
// Application Logs Start
// LINUX AMD 64
import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-installOtelCollector.md';
import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-configureReceiver.md';
import AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxAMD64/appplicationLogs-linuxamd64-runOtelCollector.md';
// LINUX ARM 64
import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-installOtelCollector.md';
import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-configureReceiver.md';
import AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/LinuxARM64/appplicationLogs-linuxarm64-runOtelCollector.md';
// MacOS AMD 64
import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-installOtelCollector.md';
import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-configureReceiver.md';
import AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsAMD64/appplicationLogs-macosamd64-runOtelCollector.md';
// MacOS ARM 64
import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-installOtelCollector.md';
import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_configureReceiver from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-configureReceiver.md';
import AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_restartOtelCollector from '../Modules/AwsMonitoring/EC2ApplicationLogs/md-docs/MacOsARM64/appplicationLogs-macosarm64-runOtelCollector.md';
// Application Logs End
// Hostmetrics Start
// LINUX AMD 64
import AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-setupOtelCollector.md';
import AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxAMD64/hostmetrics-configureHostmetricsJson.md';
// LINUX ARM 64
import AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-setupOtelCollector.md';
import AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/LinuxARM64/hostmetrics-configureHostmetricsJson.md';
// MacOS AMD 64
import AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-setupOtelCollector.md';
import AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsAMD64/hostmetrics-configureHostmetricsJson.md';
// MacOS ARM 64
import AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_setupOtelCollector from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-setupOtelCollector.md';
import AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_configureHostmetricsJson from '../Modules/AwsMonitoring/EC2InfrastructureMetrics/md-docs/MacOsARM64/hostmetrics-configureHostmetricsJson.md';
// Hostmetrics End
// AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector,
import AwsMonitoring_awsEcsEc2_setupDaemonService from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-setupDeamonService.md';
import AwsMonitoring_awsEcsEc2_createOtelConfig from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createOtelConfig.md';
import AwsMonitoring_awsEcsEc2_createDaemonService from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-createDaemonService.md';
import AwsMonitoring_awsEcsEc2_ecsSendData from '../Modules/AwsMonitoring/ECSEc2/md-docs/ecsEc2-sendData.md';
import AwsMonitoring_awsEcsExternal_setupDaemonService from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-setupDeamonService.md';
import AwsMonitoring_awsEcsExternal_createOtelConfig from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createOtelConfig.md';
import AwsMonitoring_awsEcsExternal_createDaemonService from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-createDaemonService.md';
import AwsMonitoring_awsEcsExternal_ecsSendData from '../Modules/AwsMonitoring/ECSExternal/md-docs/ecsExternal-sendData.md';
import AwsMonitoring_awsEcsFargate_createOtelConfig from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createOtelConfig.md';
import AwsMonitoring_awsEcsFargate_createSidecarCollectorContainer from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-createSidecarCollectorContainer.md';
import AwsMonitoring_awsEcsFargate_deployTaskDefinition from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-deployTaskDefinition.md';
import AwsMonitoring_awsEcsFargate_ecsSendData from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendData.md';
import AwsMonitoring_awsEcsFargate_ecsSendLogsData from '../Modules/AwsMonitoring/ECSFargate/md-docs/ecsFargate-sendLogs.md';
// AWS EKS
import AwsMonitoring_awsEks_setupOtelCollector from '../Modules/AwsMonitoring/EKS/eks-installOtelCollector.md';
import AwsMonitoring_awsEks_monitorDashboard from '../Modules/AwsMonitoring/EKS/eks-monitorUsingDashboard.md';
export const AwsMonitoringDocFilePaths = {
/// /// AWS EC2 Application Logs
// Linux AMD64
AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_setupOtelCollector,
AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_configureReceiver,
AwsMonitoring_awsEc2ApplicationLogs_linuxAMD64_restartOtelCollector,
// LINUX ARM 64
AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_setupOtelCollector,
AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_configureReceiver,
AwsMonitoring_awsEc2ApplicationLogs_linuxARM64_restartOtelCollector,
// MacOS AMD 64
AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_setupOtelCollector,
AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_configureReceiver,
AwsMonitoring_awsEc2ApplicationLogs_macOsAMD64_restartOtelCollector,
// MacOS ARM 64
AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_setupOtelCollector,
AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_configureReceiver,
AwsMonitoring_awsEc2ApplicationLogs_macOsARM64_restartOtelCollector,
/// /// AWS EC2 Infrastructure Metrics
// Linux AMD64
AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_setupOtelCollector,
AwsMonitoring_awsEc2InfrastructureMetrics_linuxAMD64_configureHostmetricsJson,
// Linux ARM64
AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_setupOtelCollector,
AwsMonitoring_awsEc2InfrastructureMetrics_linuxARM64_configureHostmetricsJson,
// MacOS AMD64
AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_setupOtelCollector,
AwsMonitoring_awsEc2InfrastructureMetrics_macOsAMD64_configureHostmetricsJson,
// MacOS ARM64
AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_setupOtelCollector,
AwsMonitoring_awsEc2InfrastructureMetrics_macOsARM64_configureHostmetricsJson,
/// //// AWS ECS EC2
AwsMonitoring_awsEcsEc2_setupDaemonService,
AwsMonitoring_awsEcsEc2_createOtelConfig,
AwsMonitoring_awsEcsEc2_createDaemonService,
AwsMonitoring_awsEcsEc2_ecsSendData,
/// //// AWS ECS External
AwsMonitoring_awsEcsExternal_setupDaemonService,
AwsMonitoring_awsEcsExternal_createOtelConfig,
AwsMonitoring_awsEcsExternal_createDaemonService,
AwsMonitoring_awsEcsExternal_ecsSendData,
/// //// AWS ECS Fargate
AwsMonitoring_awsEcsFargate_createOtelConfig,
AwsMonitoring_awsEcsFargate_createSidecarCollectorContainer,
AwsMonitoring_awsEcsFargate_deployTaskDefinition,
AwsMonitoring_awsEcsFargate_ecsSendData,
AwsMonitoring_awsEcsFargate_ecsSendLogsData,
/// /// AWS EKS
AwsMonitoring_awsEks_setupOtelCollector,
AwsMonitoring_awsEks_monitorDashboard,
};

View File

@ -27,6 +27,14 @@ export const stepsMap = {
createHttpPayload: `createHttpPayload`, createHttpPayload: `createHttpPayload`,
configureAws: `configureAws`, configureAws: `configureAws`,
sendLogsCloudwatch: `sendLogsCloudwatch`, sendLogsCloudwatch: `sendLogsCloudwatch`,
setupDaemonService: `setupDaemonService`,
createOtelConfig: `createOtelConfig`,
createDaemonService: `createDaemonService`,
ecsSendData: `ecsSendData`,
createSidecarCollectorContainer: `createSidecarCollectorContainer`,
deployTaskDefinition: `deployTaskDefinition`,
ecsSendLogsData: `ecsSendLogsData`,
monitorDashboard: `monitorDashboard`,
}; };
export const DataSourceStep: SelectedModuleStepProps = { export const DataSourceStep: SelectedModuleStepProps = {
@ -153,3 +161,43 @@ export const SendLogsCloudwatch: SelectedModuleStepProps = {
title: 'Send Logs', title: 'Send Logs',
component: <MarkdownStep />, component: <MarkdownStep />,
}; };
export const SetupDaemonService: SelectedModuleStepProps = {
id: stepsMap.setupDaemonService,
title: 'Setup Daemon Service',
component: <MarkdownStep />,
};
export const CreateOtelConfig: SelectedModuleStepProps = {
id: stepsMap.createOtelConfig,
title: 'Create OTel Config',
component: <MarkdownStep />,
};
export const CreateDaemonService: SelectedModuleStepProps = {
id: stepsMap.createDaemonService,
title: 'Create Daemon Service',
component: <MarkdownStep />,
};
export const EcsSendData: SelectedModuleStepProps = {
id: stepsMap.ecsSendData,
title: 'Send Traces Data',
component: <MarkdownStep />,
};
export const CreateSidecarCollectorContainer: SelectedModuleStepProps = {
id: stepsMap.createSidecarCollectorContainer,
title: 'Create Sidecar Collector',
component: <MarkdownStep />,
};
export const DeployTaskDefinition: SelectedModuleStepProps = {
id: stepsMap.deployTaskDefinition,
title: 'Deploy Task Definition',
component: <MarkdownStep />,
};
export const EcsSendLogsData: SelectedModuleStepProps = {
id: stepsMap.ecsSendLogsData,
title: 'Send Logs Data',
component: <MarkdownStep />,
};
export const MonitorDashboard: SelectedModuleStepProps = {
id: stepsMap.monitorDashboard,
title: 'Monitor using Dashboard ',
component: <MarkdownStep />,
};

View File

@ -68,6 +68,7 @@ export const frameworksMap = {
}, },
LogsManagement: {}, LogsManagement: {},
InfrastructureMonitoring: {}, InfrastructureMonitoring: {},
AwsMonitoring: {},
}; };
export const defaultApplicationDataSource = { export const defaultApplicationDataSource = {
@ -212,6 +213,45 @@ const supportedInfraMetrics = [
}, },
]; ];
export const defaultAwsServices = {
name: 'EC2 - Application Logs',
id: 'awsEc2ApplicationLogs',
imgURL: `Logos/ec2.svg`,
};
const supportedAwsServices = [
{
name: 'EC2 - App/Server Logs',
id: 'awsEc2ApplicationLogs',
imgURL: `Logos/ec2.svg`,
},
{
name: 'EC2 - Infra Metrics',
id: 'awsEc2InfrastructureMetrics',
imgURL: `Logos/ec2.svg`,
},
{
name: 'ECS - EC2',
id: 'awsEcsEc2',
imgURL: `Logos/ecs.svg`,
},
{
name: 'ECS - Fargate',
id: 'awsEcsFargate',
imgURL: `Logos/ecs.svg`,
},
{
name: 'ECS - External',
id: 'awsEcsExternal',
imgURL: `Logos/ecs.svg`,
},
{
name: 'EKS',
id: 'awsEks',
imgURL: `Logos/eks.svg`,
},
];
export const getDataSources = (module: ModuleProps): DataSourceType[] => { export const getDataSources = (module: ModuleProps): DataSourceType[] => {
if (module.id === ModulesMap.APM) { if (module.id === ModulesMap.APM) {
return supportedLanguages; return supportedLanguages;
@ -221,7 +261,11 @@ export const getDataSources = (module: ModuleProps): DataSourceType[] => {
return supportedInfraMetrics; return supportedInfraMetrics;
} }
if (module.id === ModulesMap.LogsManagement) {
return supportedLogsTypes; return supportedLogsTypes;
}
return supportedAwsServices;
}; };
export const getSupportedFrameworks = ({ export const getSupportedFrameworks = ({

View File

@ -6,16 +6,24 @@ import {
ConfigureHostmetricsJSON, ConfigureHostmetricsJSON,
ConfigureMetricsReceiver, ConfigureMetricsReceiver,
ConfigureReceiver, ConfigureReceiver,
CreateDaemonService,
CreateHttpPayload, CreateHttpPayload,
CreateOtelConfig,
CreateSidecarCollectorContainer,
DataSourceStep, DataSourceStep,
DeployTaskDefinition,
EcsSendData,
EcsSendLogsData,
EnvDetailsStep, EnvDetailsStep,
InstallOpenTelemetryStep, InstallOpenTelemetryStep,
LogsTestConnectionStep, LogsTestConnectionStep,
MonitorDashboard,
PlotMetrics, PlotMetrics,
RestartOtelCollector, RestartOtelCollector,
RunApplicationStep, RunApplicationStep,
SelectMethodStep, SelectMethodStep,
SendLogsCloudwatch, SendLogsCloudwatch,
SetupDaemonService,
SetupLogDrains, SetupLogDrains,
SetupOtelCollectorStep, SetupOtelCollectorStep,
StartContainer, StartContainer,
@ -47,6 +55,8 @@ export const INFRASTRUCTURE_MONITORING_STEPS: SelectedModuleStepProps[] = [
DataSourceStep, DataSourceStep,
]; ];
export const AWS_MONITORING_STEPS: SelectedModuleStepProps[] = [DataSourceStep];
export const getSteps = ({ export const getSteps = ({
selectedDataSource, selectedDataSource,
}: GetStepsProps): SelectedModuleStepProps[] => { }: GetStepsProps): SelectedModuleStepProps[] => {
@ -72,6 +82,7 @@ export const getSteps = ({
case 'fluentD': case 'fluentD':
case 'fluentBit': case 'fluentBit':
case 'logStash': case 'logStash':
case 'awsEc2ApplicationLogs':
return [ return [
DataSourceStep, DataSourceStep,
EnvDetailsStep, EnvDetailsStep,
@ -98,6 +109,7 @@ export const getSteps = ({
case 'kubernetesInfraMetrics': case 'kubernetesInfraMetrics':
return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics]; return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics];
case 'hostMetrics': case 'hostMetrics':
case 'awsEc2InfrastructureMetrics':
return [ return [
DataSourceStep, DataSourceStep,
EnvDetailsStep, EnvDetailsStep,
@ -111,6 +123,28 @@ export const getSteps = ({
SetupOtelCollectorStep, SetupOtelCollectorStep,
ConfigureMetricsReceiver, ConfigureMetricsReceiver,
]; ];
case 'awsEcsExternal':
case 'awsEcsEc2':
return [
DataSourceStep,
SetupDaemonService,
CreateOtelConfig,
CreateDaemonService,
EcsSendData,
];
case 'awsEcsFargate':
return [
DataSourceStep,
CreateOtelConfig,
CreateSidecarCollectorContainer,
DeployTaskDefinition,
EcsSendData,
EcsSendLogsData,
];
case 'awsEks':
return [DataSourceStep, SetupOtelCollectorStep, MonitorDashboard];
default: default:
return [DataSourceStep]; return [DataSourceStep];
} }

View File

@ -46,6 +46,10 @@ export const QueryBuilder = memo(function QueryBuilder({
useEffect(() => { useEffect(() => {
if (currentDataSource !== initialDataSource || newPanelType !== panelType) { if (currentDataSource !== initialDataSource || newPanelType !== panelType) {
if (panelType === PANEL_TYPES.BAR) {
handleSetConfig(PANEL_TYPES.BAR, DataSource.METRICS);
return;
}
handleSetConfig(newPanelType, currentDataSource); handleSetConfig(newPanelType, currentDataSource);
} }
}, [ }, [

View File

@ -14,6 +14,7 @@ import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers'; import { AppState } from 'store/reducers';
import { SuccessResponse } from 'types/api'; import { SuccessResponse } from 'types/api';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { DataSource } from 'types/common/queryBuilder';
import { GlobalReducer } from 'types/reducer/globalTime'; import { GlobalReducer } from 'types/reducer/globalTime';
import { getTimeRange } from 'utils/getTimeRange'; import { getTimeRange } from 'utils/getTimeRange';
@ -25,6 +26,7 @@ function TimeSeriesView({
isError, isError,
yAxisUnit, yAxisUnit,
isFilterApplied, isFilterApplied,
dataSource,
}: TimeSeriesViewProps): JSX.Element { }: TimeSeriesViewProps): JSX.Element {
const graphRef = useRef<HTMLDivElement>(null); const graphRef = useRef<HTMLDivElement>(null);
@ -93,7 +95,7 @@ function TimeSeriesView({
chartData[0]?.length === 0 && chartData[0]?.length === 0 &&
!isLoading && !isLoading &&
!isError && !isError &&
!isFilterApplied && <NoLogs />} !isFilterApplied && <NoLogs dataSource={dataSource} />}
{!isLoading && {!isLoading &&
!isError && !isError &&
@ -111,6 +113,7 @@ interface TimeSeriesViewProps {
isLoading: boolean; isLoading: boolean;
isError: boolean; isError: boolean;
isFilterApplied: boolean; isFilterApplied: boolean;
dataSource: DataSource;
} }
TimeSeriesView.defaultProps = { TimeSeriesView.defaultProps = {

View File

@ -74,6 +74,7 @@ function TimeSeriesViewContainer({
isLoading={isLoading} isLoading={isLoading}
data={responseData} data={responseData}
yAxisUnit={isValidToConvertToMs ? 'ms' : 'short'} yAxisUnit={isValidToConvertToMs ? 'ms' : 'short'}
dataSource={dataSource}
/> />
); );
} }

View File

@ -148,3 +148,8 @@ export interface TimeRange {
startTime: string; startTime: string;
endTime: string; endTime: string;
} }
export enum LexicalContext {
CUSTOM_DATE_PICKER = 'customDatePicker',
CUSTOM_DATE_TIME_INPUT = 'customDateTimeInput',
}

View File

@ -44,6 +44,7 @@ import { DateTimeRangeType } from '../CustomDateTimeModal';
import { import {
getDefaultOption, getDefaultOption,
getOptions, getOptions,
LexicalContext,
LocalStorageTimeRange, LocalStorageTimeRange,
Time, Time,
TimeRange, TimeRange,
@ -318,31 +319,37 @@ function DateTimeSelection({
onLastRefreshHandler(); onLastRefreshHandler();
}; };
const onCustomDateHandler = (dateTimeRange: DateTimeRangeType): void => { const onCustomDateHandler = (
dateTimeRange: DateTimeRangeType,
lexicalContext?: LexicalContext,
): void => {
if (dateTimeRange !== null) { if (dateTimeRange !== null) {
const [startTimeMoment, endTimeMoment] = dateTimeRange; const [startTimeMoment, endTimeMoment] = dateTimeRange;
if (startTimeMoment && endTimeMoment) { if (startTimeMoment && endTimeMoment) {
let startTime = startTimeMoment;
let endTime = endTimeMoment;
if (
lexicalContext &&
lexicalContext === LexicalContext.CUSTOM_DATE_PICKER
) {
startTime = startTime.startOf('day');
endTime = endTime.endOf('day');
}
setCustomDTPickerVisible(false); setCustomDTPickerVisible(false);
startTimeMoment.startOf('day').toString();
updateTimeInterval('custom', [ updateTimeInterval('custom', [
startTimeMoment.startOf('day').toDate().getTime(), startTime.toDate().getTime(),
endTimeMoment.endOf('day').toDate().getTime(), endTime.toDate().getTime(),
]); ]);
setLocalStorageKey('startTime', startTimeMoment.toString()); setLocalStorageKey('startTime', startTime.toString());
setLocalStorageKey('endTime', endTimeMoment.toString()); setLocalStorageKey('endTime', endTime.toString());
updateLocalStorageForRoutes( updateLocalStorageForRoutes(JSON.stringify({ startTime, endTime }));
JSON.stringify({ startTime: startTimeMoment, endTime: endTimeMoment }),
);
if (!isLogsExplorerPage) { if (!isLogsExplorerPage) {
urlQuery.set( urlQuery.set(
QueryParams.startTime, QueryParams.startTime,
startTimeMoment?.toDate().getTime().toString(), startTime?.toDate().getTime().toString(),
);
urlQuery.set(
QueryParams.endTime,
endTimeMoment?.toDate().getTime().toString(),
); );
urlQuery.set(QueryParams.endTime, endTime?.toDate().getTime().toString());
const generatedUrl = `${location.pathname}?${urlQuery.toString()}`; const generatedUrl = `${location.pathname}?${urlQuery.toString()}`;
history.replace(generatedUrl); history.replace(generatedUrl);
} }

View File

@ -14,6 +14,7 @@ import { useSelector } from 'react-redux';
import { AppState } from 'store/reducers'; import { AppState } from 'store/reducers';
import { Widgets } from 'types/api/dashboard/getAll'; import { Widgets } from 'types/api/dashboard/getAll';
import { GlobalReducer } from 'types/reducer/globalTime'; import { GlobalReducer } from 'types/reducer/globalTime';
import { getGraphType } from 'utils/getGraphType';
const useCreateAlerts = (widget?: Widgets): VoidFunction => { const useCreateAlerts = (widget?: Widgets): VoidFunction => {
const queryRangeMutation = useMutation(getQueryRangeFormat); const queryRangeMutation = useMutation(getQueryRangeFormat);
@ -33,7 +34,7 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => {
const { queryPayload } = prepareQueryRangePayload({ const { queryPayload } = prepareQueryRangePayload({
query: widget.query, query: widget.query,
globalSelectedInterval, globalSelectedInterval,
graphType: widget.panelTypes, graphType: getGraphType(widget.panelTypes),
selectedTime: widget.timePreferance, selectedTime: widget.timePreferance,
variables: getDashboardVariables(selectedDashboard?.data.variables), variables: getDashboardVariables(selectedDashboard?.data.variables),
}); });
@ -44,7 +45,7 @@ const useCreateAlerts = (widget?: Widgets): VoidFunction => {
history.push( history.push(
`${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent( `${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent(
JSON.stringify(updatedQuery), JSON.stringify(updatedQuery),
)}`, )}&${QueryParams.panelTypes}=${widget.panelTypes}`,
); );
}, },
onError: () => { onError: () => {

View File

@ -1,3 +1,4 @@
import { PANEL_TYPES } from 'constants/queryBuilder';
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys'; import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
import { import {
GetMetricQueryRange, GetMetricQueryRange,
@ -14,6 +15,17 @@ type UseGetQueryRange = (
) => UseQueryResult<SuccessResponse<MetricRangePayloadProps>, Error>; ) => UseQueryResult<SuccessResponse<MetricRangePayloadProps>, Error>;
export const useGetQueryRange: UseGetQueryRange = (requestData, options) => { export const useGetQueryRange: UseGetQueryRange = (requestData, options) => {
const newRequestData: GetQueryResultsProps = useMemo(
() => ({
...requestData,
graphType:
requestData.graphType === PANEL_TYPES.BAR
? PANEL_TYPES.TIME_SERIES
: requestData.graphType,
}),
[requestData],
);
const queryKey = useMemo(() => { const queryKey = useMemo(() => {
if (options?.queryKey && Array.isArray(options.queryKey)) { if (options?.queryKey && Array.isArray(options.queryKey)) {
return [...options.queryKey]; return [...options.queryKey];
@ -23,11 +35,11 @@ export const useGetQueryRange: UseGetQueryRange = (requestData, options) => {
return options.queryKey; return options.queryKey;
} }
return [REACT_QUERY_KEY.GET_QUERY_RANGE, requestData]; return [REACT_QUERY_KEY.GET_QUERY_RANGE, newRequestData];
}, [options?.queryKey, requestData]); }, [options?.queryKey, newRequestData]);
return useQuery<SuccessResponse<MetricRangePayloadProps>, Error>({ return useQuery<SuccessResponse<MetricRangePayloadProps>, Error>({
queryFn: async ({ signal }) => GetMetricQueryRange(requestData, signal), queryFn: async ({ signal }) => GetMetricQueryRange(newRequestData, signal),
...options, ...options,
queryKey, queryKey,
}); });

Some files were not shown because too many files have changed in this diff Show More