feat: added frontend for newly addded flows (#4151)

* feat: added frontend for newly addded flows

* chore: added content for flows

* chore: updated content for dotnet docs

* chore: updated go instrumentation and http logs content

* fix: removed console log and return true

* fix: quickstart by default and cloudwatch logs

* fix: removed console log
This commit is contained in:
CheetoDa 2023-12-20 20:16:21 +05:30 committed by GitHub
parent 90b8959045
commit 1be4731710
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
64 changed files with 2458 additions and 11 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 957 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View File

@ -0,0 +1,24 @@
## Install otel-collector in your Kubernetes infra
 
Add the SigNoz Helm Chart repository
```bash
helm repo add signoz https://charts.signoz.io
```
 
If the chart is already present, update the chart to the latest using:
```bash
helm repo update
```
 
Install the Kubernetes Infrastructure chart provided by SigNoz
```bash
helm install my-release signoz/k8s-infra \
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
--set otelInsecure=false \
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
--set global.clusterName=<CLUSTER_NAME>
```
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.

View File

@ -0,0 +1,65 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,10 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@ -0,0 +1,71 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,10 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@ -0,0 +1,98 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in `otelcol-contrib` folder with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,67 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,18 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@ -0,0 +1,70 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,10 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@ -0,0 +1,99 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in `otelcol-contrib` folder with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,68 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,18 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@ -0,0 +1,70 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,10 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@ -0,0 +1,97 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in folder `otelcol-contrib` with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,67 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,18 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@ -0,0 +1,70 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,10 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@ -0,0 +1,98 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in folder `otelcol-contrib` with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,68 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@ -0,0 +1,18 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@ -37,7 +37,8 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -38,6 +38,7 @@ To configure your application to send data we will need a function to initialize
import ( import (
..... .....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"

View File

@ -0,0 +1,39 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@ -0,0 +1,50 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@ -0,0 +1,94 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,38 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@ -0,0 +1,39 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@ -0,0 +1,50 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@ -0,0 +1,95 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,38 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@ -0,0 +1,39 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@ -0,0 +1,50 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@ -0,0 +1,94 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,38 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@ -0,0 +1,39 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@ -0,0 +1,50 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@ -0,0 +1,93 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@ -0,0 +1,38 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@ -0,0 +1,29 @@
You can stream logs from Heroku to SigNoz using [httpsdrain](https://devcenter.heroku.com/articles/log-drains#https-drains).
&nbsp;
### Use the Heroku CLI to add a https drain
```bash
heroku drains:add https://<TENANT_NAME>:{{SIGNOZ_INGESTION_KEY}}@ingest.{{REGION}}.signoz.cloud:443/logs/heroku -a <YOUR_APP_NAME>
```
&nbsp;
`<TENANT_NAME>` should be raplaced with the name of your SigNoz instance.
For example, if your SigNoz instance URL is `https://cpvo-test.us.signoz.cloud` the `TENANT_NAME` is `cpvo-test`.
**Note:** You can find your instance URL in your browser's current tab address bar or in the invite email sent to you.
&nbsp;
`<YOUR_APP_NAME>` is the name of the Heroku application where you want to add the drain.
&nbsp;
Once you have successfully added the drain, click on the `Done` button below to see your logs in the SigNoz UI.

View File

@ -0,0 +1,95 @@
## Payload Structure
To send logs to SigNoz over HTTP, we have a payload structure which is an array of JSON logs which adheres to the [OTEL Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/).
&nbsp;
The structure of the Payload has the following fields:
```bash
[
{
"timestamp": <uint64>,
"trace_id": <hex string>,
"span_id": <hex string>,
"trace_flags": <int>,
"severity_text": <string>,
"severity_number": <int>,
"attributes": <map>,
"resources": <map>,
"body": <string>,
}
]
```
**Notes:**
* `timestamp` is an int64 representing nanaseconds since the Unix epoch.
* You can use **body** or **message** to denote the log content.
&nbsp;
Any other fields present apart from the ones mentioned above will be moved to the **attributes map**. For example:
```bash
[
{
"host": "myhost",
"method": "GET",
"body": "this is a log line"
}
]
```
Will be treated as:
```bash
[
{
"attributes": {
"host": "myhost",
"method": "GET"
},
"body": "this is a log line"
}
]
```
&nbsp;
## Send logs
This is a **sample cURL request** which can be used as a template:
&nbsp;
```bash
curl --location 'https://ingest.{{REGION}}.signoz.cloud:443/logs/json/' \
--header 'Content-Type: application/json' \
--header 'signoz-access-token: {{SIGNOZ_INGESTION_KEY}}' \
--data '[
{
"trace_id": "000000000000000018c51935df0b93b9",
"span_id": "18c51935df0b93b9",
"trace_flags": 0,
"severity_text": "info",
"severity_number": 4,
"attributes": {
"method": "GET",
"path": "/api/users"
},
"resources": {
"host": "myhost",
"namespace": "prod"
},
"message": "This is a log line"
}
]'
```
&nbsp;
This curl request will have the timestamp of when you send the above log.
&nbsp;
To specify a particular timestamp in your log, ensure you include the `timestamp` field in your cURL request. Place the timestamp field before the `trace_id` field. For example, `timestamp`: 1698310066000000000
&nbsp;
**Note:** You can customize the cURL request as needed for your specific use case.

View File

@ -0,0 +1,50 @@
You can stream logs from Vercel to SigNoz using [log drains](https://vercel.com/docs/observability/log-drains-overview/log-drains#configure-a-log-drain).
**Note:** Log Drains are only supported in **Vercel Pro** and **Enterprise accounts**.
&nbsp;
### Step 1: Select Sources
* From the Vercel dashboard, go to **Team Settings > Log Drains**.
&nbsp;
* Select sources from which you want to collect logs (Example -> Statci, External, Lambda etc.)
&nbsp;
* Choose delivery format as `JSON`
&nbsp;
* Specify your target projects
&nbsp;
### Step 2: Add Log Drain
* Enter the endpoint URL as follows:
```bash
https://ingest.{{REGION}}.signoz.cloud:443/logs/json
```
&nbsp;
* Enable **Custom Headers** and add the headers `signoz-access-token` and `x-vercel-verify`
```bash
signoz-access-token: {{SIGNOZ_INGESTION_KEY}}
```
```bash
x-vercel-verify: <YOUR_VERCEL_VERIFY_TOKEN>
```
**Note:** The value of `x-vercel-verify` will be visible on your screen in the endpoint section.
&nbsp;
* Click on **Verify** button and then **Add Log Drain** button in Vercel.
&nbsp;
Click on the **Done** button below and you should be able to see your logs in SigNoz.

View File

@ -67,7 +67,6 @@ export default function MarkdownStep(): JSX.Element {
} else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) { } else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) {
docFilePaths = InfraMonitoringDocFilePaths; docFilePaths = InfraMonitoringDocFilePaths;
} }
// @ts-ignore // @ts-ignore
if (docFilePaths && docFilePaths[path]) { if (docFilePaths && docFilePaths[path]) {
// @ts-ignore // @ts-ignore

View File

@ -46,6 +46,11 @@ export default function SelectMethod(): JSX.Element {
<div> <div>
<Radio.Group onChange={onChange} value={value}> <Radio.Group onChange={onChange} value={value}>
<Space direction="vertical"> <Space direction="vertical">
<Radio value={OnboardingMethods.QUICK_START}>
<Typography.Text> Quick Start </Typography.Text> <br />
<small>Send data to SigNoz directly from OpenTelemetry SDK.</small>
</Radio>
<Radio value={OnboardingMethods.RECOMMENDED_STEPS}> <Radio value={OnboardingMethods.RECOMMENDED_STEPS}>
<Typography.Text> Use Recommended Steps </Typography.Text> <br /> <Typography.Text> Use Recommended Steps </Typography.Text> <br />
<small> <small>
@ -53,11 +58,6 @@ export default function SelectMethod(): JSX.Element {
you send to SigNoz, collect host metrics & logs). you send to SigNoz, collect host metrics & logs).
</small> </small>
</Radio> </Radio>
<Radio value={OnboardingMethods.QUICK_START}>
<Typography.Text> Quick Start </Typography.Text> <br />
<small>Send data to SigNoz directly from OpenTelemetry SDK.</small>
</Radio>
</Space> </Space>
</Radio.Group> </Radio.Group>
</div> </div>

View File

@ -7,6 +7,40 @@
/// ////// JavaScript Done /// ////// JavaScript Done
/// ///// Go Start /// ///// Go Start
// Go-Kubernetes // Go-Kubernetes
/// /// ROR Done
/// /// .NET Start
// dotnet-Kubernetes
import APM_dotnet_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-installOtelCollector.md';
import APM_dotnet_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-instrumentApplication.md';
import APM_dotnet_kubernetes_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-runApplication.md';
// dotnet-LinuxAMD64-quickstart
import APM_dotnet_linuxAMD64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/QuickStart/dotnet-linuxamd64-quickStart-instrumentApplication.md';
import APM_dotnet_linuxAMD64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/QuickStart/dotnet-linuxamd64-quickStart-runApplication.md';
// dotnet-LinuxAMD64-recommended
import APM_dotnet_linuxAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-installOtelCollector.md';
import APM_dotnet_linuxAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-instrumentApplication.md';
import APM_dotnet_linuxAMD64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-runApplication.md';
// dotnet-LinuxARM64-quickstart
import APM_dotnet_linuxARM64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/QuickStart/dotnet-linuxarm64-quickStart-instrumentApplication.md';
import APM_dotnet_linuxARM64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/QuickStart/dotnet-linuxarm64-quickStart-runApplication.md';
// dotnet-LinuxARM64-recommended
import APM_dotnet_linuxARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-installOtelCollector.md';
import APM_dotnet_linuxARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-instrumentApplication.md';
import APM_dotnet_linuxARM64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-runApplication.md';
// dotnet-MacOsAMD64-quickstart
import APM_dotnet_macOsAMD64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/QuickStart/dotnet-macosamd64-quickStart-instrumentApplication.md';
import APM_dotnet_macOsAMD64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/QuickStart/dotnet-macosamd64-quickStart-runApplication.md';
// dotnet-MacOsAMD64-recommended
import APM_dotnet_macOsAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-installOtelCollector.md';
import APM_dotnet_macOsAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-instrumentApplication.md';
import APM_dotnet_macOsAMD64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-runApplication.md';
// dotnet-MacOsARM64-quickstart
import APM_dotnet_macOsARM64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/QuickStart/dotnet-macosarm64-quickStart-instrumentApplication.md';
import APM_dotnet_macOsARM64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/QuickStart/dotnet-macosarm64-quickStart-runApplication.md';
// dotnet-MacOsARM64-recommended
import APM_dotnet_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-installOtelCollector.md';
import APM_dotnet_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-instrumentApplication.md';
import APM_dotnet_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-runApplication.md';
import APM_go_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-installOtelCollector.md'; import APM_go_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-installOtelCollector.md';
import APM_go_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-instrumentApplication.md'; import APM_go_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-instrumentApplication.md';
import APM_go_kubernetes_recommendedSteps_runApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-runApplication.md'; import APM_go_kubernetes_recommendedSteps_runApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-runApplication.md';
@ -1100,7 +1134,7 @@ export const ApmDocFilePaths = {
APM_rails_linuxARM64_recommendedSteps_instrumentApplication, APM_rails_linuxARM64_recommendedSteps_instrumentApplication,
APM_rails_linuxARM64_recommendedSteps_runApplication, APM_rails_linuxARM64_recommendedSteps_runApplication,
// ROR-LinuxARM64-quinestjs // ROR-LinuxARM64-quickstart
APM_rails_linuxARM64_quickStart_instrumentApplication, APM_rails_linuxARM64_quickStart_instrumentApplication,
APM_rails_linuxARM64_quickStart_runApplication, APM_rails_linuxARM64_quickStart_runApplication,
@ -1121,4 +1155,51 @@ export const ApmDocFilePaths = {
// ROR-MacOsARM64-quickstart // ROR-MacOsARM64-quickstart
APM_rails_macOsARM64_quickStart_instrumentApplication, APM_rails_macOsARM64_quickStart_instrumentApplication,
APM_rails_macOsARM64_quickStart_runApplication, APM_rails_macOsARM64_quickStart_runApplication,
// ------------------------------------------------------------------------------------------------
/// //// ROR Done
/// //// .NET Start
// ROR-Kubernetes
APM_dotnet_kubernetes_recommendedSteps_setupOtelCollector,
APM_dotnet_kubernetes_recommendedSteps_instrumentApplication,
APM_dotnet_kubernetes_recommendedSteps_runApplication,
// ROR-LinuxAMD64-quickstart
APM_dotnet_linuxAMD64_quickStart_instrumentApplication,
APM_dotnet_linuxAMD64_quickStart_runApplication,
// ROR-LinuxAMD64-recommended
APM_dotnet_linuxAMD64_recommendedSteps_setupOtelCollector,
APM_dotnet_linuxAMD64_recommendedSteps_instrumentApplication,
APM_dotnet_linuxAMD64_recommendedSteps_runApplication,
// ROR-LinuxARM64-quickstart
APM_dotnet_linuxARM64_quickStart_instrumentApplication,
APM_dotnet_linuxARM64_quickStart_runApplication,
// ROR-LinuxARM64-recommended
APM_dotnet_linuxARM64_recommendedSteps_setupOtelCollector,
APM_dotnet_linuxARM64_recommendedSteps_instrumentApplication,
APM_dotnet_linuxARM64_recommendedSteps_runApplication,
// ROR-MacOsAMD64-quickstart
APM_dotnet_macOsAMD64_quickStart_instrumentApplication,
APM_dotnet_macOsAMD64_quickStart_runApplication,
// ROR-MacOsAMD64-recommended
APM_dotnet_macOsAMD64_recommendedSteps_setupOtelCollector,
APM_dotnet_macOsAMD64_recommendedSteps_instrumentApplication,
APM_dotnet_macOsAMD64_recommendedSteps_runApplication,
// ROR-MacOsARM64-quickstart
APM_dotnet_macOsARM64_quickStart_instrumentApplication,
APM_dotnet_macOsARM64_quickStart_runApplication,
// ROR-MacOsARM64-recommended
APM_dotnet_macOsARM64_recommendedSteps_setupOtelCollector,
APM_dotnet_macOsARM64_recommendedSteps_instrumentApplication,
APM_dotnet_macOsARM64_recommendedSteps_runApplication,
}; };

View File

@ -121,6 +121,40 @@ import LogsManagement_logStash_macOsARM64_setupOtelCollector from '../Modules/Lo
import LogsManagement_logStash_macOsARM64_configureReceiver from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-configureReceiver.md'; import LogsManagement_logStash_macOsARM64_configureReceiver from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-configureReceiver.md';
import LogsManagement_logStash_macOsARM64_restartOtelCollector from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-restartOtelCollector.md'; import LogsManagement_logStash_macOsARM64_restartOtelCollector from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-restartOtelCollector.md';
// Heroku
import LogsManagement_heroku_addHttpDrain from '../Modules/LogsManagement/Heroku/md-docs/heroku-addHttpDrain.md';
// Vercel
import LogsManagement_vercel_setupLogDrains from '../Modules/LogsManagement/Vercel/md-docs/vercel-setupLogDrains.md';
// HTTP
import LogsManagement_http_createHttpPayload from '../Modules/LogsManagement/Http/md-docs/httpJsonPayload.md';
// Cloudwatch
import LogsManagement_cloudwatch_linuxAMD64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-installOtelCollector.md';
import LogsManagement_cloudwatch_linuxAMD64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-configureAws.md';
import LogsManagement_cloudwatch_linuxAMD64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-configureReceiver.md';
import LogsManagement_cloudwatch_linuxAMD64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-sendLogs.md';
import LogsManagement_cloudwatch_linuxARM64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-installOtelCollector.md';
import LogsManagement_cloudwatch_linuxARM64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-configureAws.md';
import LogsManagement_cloudwatch_linuxARM64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-configureReceiver.md';
import LogsManagement_cloudwatch_linuxARM64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-sendLogs.md';
import LogsManagement_cloudwatch_macOsAMD64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-installOtelCollector.md';
import LogsManagement_cloudwatch_macOsAMD64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-configureAws.md';
import LogsManagement_cloudwatch_macOsAMD64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-configureReceiver.md';
import LogsManagement_cloudwatch_macOsAMD64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-sendLogs.md';
import LogsManagement_cloudwatch_macOsARM64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-installOtelCollector.md';
import LogsManagement_cloudwatch_macOsARM64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-configureAws.md';
import LogsManagement_cloudwatch_macOsARM64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-configureReceiver.md';
import LogsManagement_cloudwatch_macOsARM64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-sendLogs.md';
export const LogsManagementDocFilePaths = { export const LogsManagementDocFilePaths = {
// Kubernetes Pod Logs // Kubernetes Pod Logs
LogsManagement_kubernetes_setupOtelCollector, LogsManagement_kubernetes_setupOtelCollector,
@ -235,5 +269,36 @@ export const LogsManagementDocFilePaths = {
LogsManagement_logStash_macOsARM64_setupOtelCollector, LogsManagement_logStash_macOsARM64_setupOtelCollector,
LogsManagement_logStash_macOsARM64_configureReceiver, LogsManagement_logStash_macOsARM64_configureReceiver,
LogsManagement_logStash_macOsARM64_restartOtelCollector, LogsManagement_logStash_macOsARM64_restartOtelCollector,
// Heroku
LogsManagement_heroku_addHttpDrain,
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
// Vercel
LogsManagement_vercel_setupLogDrains,
// HTTP
LogsManagement_http_createHttpPayload,
// Cloudwatch
LogsManagement_cloudwatch_linuxAMD64_setupOtelCollector,
LogsManagement_cloudwatch_linuxAMD64_configureAws,
LogsManagement_cloudwatch_linuxAMD64_configureReceiver,
LogsManagement_cloudwatch_linuxAMD64_sendLogsCloudwatch,
LogsManagement_cloudwatch_linuxARM64_setupOtelCollector,
LogsManagement_cloudwatch_linuxARM64_configureAws,
LogsManagement_cloudwatch_linuxARM64_configureReceiver,
LogsManagement_cloudwatch_linuxARM64_sendLogsCloudwatch,
LogsManagement_cloudwatch_macOsAMD64_setupOtelCollector,
LogsManagement_cloudwatch_macOsAMD64_configureAws,
LogsManagement_cloudwatch_macOsAMD64_configureReceiver,
LogsManagement_cloudwatch_macOsAMD64_sendLogsCloudwatch,
LogsManagement_cloudwatch_macOsARM64_setupOtelCollector,
LogsManagement_cloudwatch_macOsARM64_configureAws,
LogsManagement_cloudwatch_macOsARM64_configureReceiver,
LogsManagement_cloudwatch_macOsARM64_sendLogsCloudwatch,
}; };

View File

@ -22,6 +22,11 @@ export const stepsMap = {
plotMetrics: 'plotMetrics', plotMetrics: 'plotMetrics',
configureHostmetricsJson: 'configureHostmetricsJson', configureHostmetricsJson: 'configureHostmetricsJson',
configureMetricsReceiver: 'configureMetricsReceiver', configureMetricsReceiver: 'configureMetricsReceiver',
addHttpDrain: 'addHttpDrain',
setupLogDrains: `setupLogDrains`,
createHttpPayload: `createHttpPayload`,
configureAws: `configureAws`,
sendLogsCloudwatch: `sendLogsCloudwatch`,
}; };
export const DataSourceStep: SelectedModuleStepProps = { export const DataSourceStep: SelectedModuleStepProps = {
@ -119,3 +124,32 @@ export const ConfigureMetricsReceiver: SelectedModuleStepProps = {
title: 'Configure Metrics Receiver', title: 'Configure Metrics Receiver',
component: <MarkdownStep />, component: <MarkdownStep />,
}; };
export const AddHttpDrain: SelectedModuleStepProps = {
id: stepsMap.addHttpDrain,
title: 'Add HTTP Drain',
component: <MarkdownStep />,
};
export const SetupLogDrains: SelectedModuleStepProps = {
id: stepsMap.setupLogDrains,
title: 'Setup Log Drains',
component: <MarkdownStep />,
};
export const CreateHttpPayload: SelectedModuleStepProps = {
id: stepsMap.createHttpPayload,
title: 'Create Json Payload',
component: <MarkdownStep />,
};
export const ConfigureAws: SelectedModuleStepProps = {
id: stepsMap.configureAws,
title: 'Configure AWS',
component: <MarkdownStep />,
};
export const SendLogsCloudwatch: SelectedModuleStepProps = {
id: stepsMap.sendLogsCloudwatch,
title: 'Send Logs',
component: <MarkdownStep />,
};

View File

@ -54,7 +54,7 @@ function OnboardingContextProvider({
const [selectedFramework, setSelectedFramework] = useState<string>(''); const [selectedFramework, setSelectedFramework] = useState<string>('');
const [selectedMethod, setSelectedMethod] = useState( const [selectedMethod, setSelectedMethod] = useState(
OnboardingMethods.RECOMMENDED_STEPS, OnboardingMethods.QUICK_START,
); );
const [ const [

View File

@ -94,6 +94,11 @@ const supportedLanguages = [
id: 'rails', id: 'rails',
imgURL: `Logos/rails.png`, imgURL: `Logos/rails.png`,
}, },
{
name: '.NET',
id: 'dotnet',
imgURL: `Logos/dotnet.png`,
},
]; ];
export const defaultLogsType = { export const defaultLogsType = {
@ -138,6 +143,26 @@ const supportedLogsTypes = [
id: 'logStash', id: 'logStash',
imgURL: `Logos/logstash.svg`, imgURL: `Logos/logstash.svg`,
}, },
{
name: 'Heroku',
id: 'heroku',
imgURL: `Logos/heroku.png`,
},
{
name: 'Vercel',
id: 'vercel',
imgURL: `Logos/vercel.png`,
},
{
name: 'HTTP',
id: 'http',
imgURL: `Logos/http.png`,
},
{
name: 'Cloudwatch',
id: 'cloudwatch',
imgURL: `Logos/cloudwatch.png`,
},
]; ];
export const defaultInfraMetricsType = { export const defaultInfraMetricsType = {
@ -188,7 +213,8 @@ export const getSupportedFrameworks = ({
if ( if (
(moduleID === ModulesMap.APM && dataSourceName === 'go') || (moduleID === ModulesMap.APM && dataSourceName === 'go') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rails') (moduleID === ModulesMap.APM && dataSourceName === 'rails') ||
(moduleID === ModulesMap.APM && dataSourceName === '.NET')
) { ) {
return []; return [];
} }
@ -213,7 +239,8 @@ export const hasFrameworks = ({
moduleID === ModulesMap.LogsManagement || moduleID === ModulesMap.LogsManagement ||
moduleID === ModulesMap.InfrastructureMonitoring || moduleID === ModulesMap.InfrastructureMonitoring ||
(moduleID === ModulesMap.APM && dataSourceName === 'go') || (moduleID === ModulesMap.APM && dataSourceName === 'go') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rails') (moduleID === ModulesMap.APM && dataSourceName === 'rails') ||
(moduleID === ModulesMap.APM && dataSourceName === '.NET')
) { ) {
return false; return false;
} }

View File

@ -1,9 +1,12 @@
import { import {
AddHttpDrain,
CheckServiceStatus, CheckServiceStatus,
CloneRepo, CloneRepo,
ConfigureAws,
ConfigureHostmetricsJSON, ConfigureHostmetricsJSON,
ConfigureMetricsReceiver, ConfigureMetricsReceiver,
ConfigureReceiver, ConfigureReceiver,
CreateHttpPayload,
DataSourceStep, DataSourceStep,
EnvDetailsStep, EnvDetailsStep,
InstallOpenTelemetryStep, InstallOpenTelemetryStep,
@ -12,6 +15,8 @@ import {
RestartOtelCollector, RestartOtelCollector,
RunApplicationStep, RunApplicationStep,
SelectMethodStep, SelectMethodStep,
SendLogsCloudwatch,
SetupLogDrains,
SetupOtelCollectorStep, SetupOtelCollectorStep,
StartContainer, StartContainer,
TestConnectionStep, TestConnectionStep,
@ -74,6 +79,21 @@ export const getSteps = ({
ConfigureReceiver, ConfigureReceiver,
RestartOtelCollector, RestartOtelCollector,
]; ];
case 'heroku':
return [DataSourceStep, AddHttpDrain];
case 'vercel':
return [DataSourceStep, SetupLogDrains];
case 'http':
return [DataSourceStep, CreateHttpPayload];
case 'cloudwatch':
return [
DataSourceStep,
EnvDetailsStep,
SetupOtelCollectorStep,
ConfigureAws,
ConfigureReceiver,
SendLogsCloudwatch,
];
case 'kubernetesInfraMetrics': case 'kubernetesInfraMetrics':
return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics]; return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics];