diff --git a/sample-apps/hotrod/README.md b/sample-apps/hotrod/README.md new file mode 100644 index 0000000000..1907c23dfc --- /dev/null +++ b/sample-apps/hotrod/README.md @@ -0,0 +1,27 @@ +# HotROD Sample Application (Kubernetes) + +Follow the steps in this section to install a sample application named HotR.O.D, and generate tracing data. + +```console +kubectl create ns sample-application + +kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml +``` + +In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below: + +```console +export HELM_RELEASE=my-release-2 +export SIGNOZ_NAMESPACE=platform-2 +export HOTROD_NAMESPACE=sample-application-2 + +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash +``` + +To delete sample application: + +```console +export HOTROD_NAMESPACE=sample-application-2 + +curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash +``` diff --git a/sample-apps/hotrod/hotrod-delete.sh b/sample-apps/hotrod/hotrod-delete.sh new file mode 100755 index 0000000000..f73f89c1a6 --- /dev/null +++ b/sample-apps/hotrod/hotrod-delete.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")"; + +HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"} + +if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then + echo "Default k8s namespace and SigNoz namespace must not be deleted" + echo "Deleting components only" + kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) +else + echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}" + kubectl delete namespace "${HOTROD_NAMESPACE}" +fi + +if [ $? -ne 0 ]; then + echo "❌ Failed to delete HotROD sample application" +else + echo "✅ Succesfully deleted HotROD sample application" +fi diff --git a/sample-apps/hotrod/hotrod-install.sh b/sample-apps/hotrod/hotrod-install.sh new file mode 100755 index 0000000000..f6f3845205 --- /dev/null +++ b/sample-apps/hotrod/hotrod-install.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")"; + +# Namespace to install sample app +HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"} +SIGNOZ_NAMESPACE="${SIGNOZ_NAMESPACE:-platform}" + +# HotROD's docker image +if [[ -z $HOTROD_IMAGE ]]; then + HOTROD_REPO="${HOTROD_REPO:-jaegertracing/example-hotrod}" + HOTROD_TAG="${HOTROD_TAG:-1.30}" + HOTROD_IMAGE="${HOTROD_REPO}:${HOTROD_TAG}" +fi + +# Locust's docker image +if [[ -z $LOCUST_IMAGE ]]; then + LOCUST_REPO="${LOCUST_REPO:-grubykarol/locust}" + LOCUST_TAG="${LOCUST_TAG:-0.8.1-py3.6}" + LOCUST_IMAGE="${LOCUST_REPO}:${LOCUST_TAG}" +fi + +# Helm release name +HELM_RELEASE="${HELM_RELEASE:-my-release}" + +# Otel Collector service address +if [[ -z $JAEGER_ENDPOINT ]]; then + if [[ "$HELM_RELEASE" == *"signoz"* ]]; then + JAEGER_ENDPOINT="http://${HELM_RELEASE}-otel-collector.${SIGNOZ_NAMESPACE}.svc.cluster.local:14268/api/traces" + else + JAEGER_ENDPOINT="http://${HELM_RELEASE}-signoz-otel-collector.${SIGNOZ_NAMESPACE}.svc.cluster.local:14268/api/traces" + fi +fi + +# Create namespace for sample application if does not exist +kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/dev/null | kubectl apply -f - + +# Setup sample apps into specified namespace +kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \ + (cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \ + HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \ + HOTROD_IMAGE="${HOTROD_IMAGE}" \ + LOCUST_IMAGE="${LOCUST_IMAGE}" \ + JAEGER_ENDPOINT="${JAEGER_ENDPOINT}" \ + envsubst \ + ) + +if [ $? -ne 0 ]; then + echo "❌ Failed to deploy HotROD sample application" +else + echo "✅ Succesfully deployed HotROD sample application" +fi diff --git a/sample-apps/hotrod/hotrod-template.yaml b/sample-apps/hotrod/hotrod-template.yaml new file mode 100644 index 0000000000..6fdd6dd9ae --- /dev/null +++ b/sample-apps/hotrod/hotrod-template.yaml @@ -0,0 +1,223 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: locust-cm +data: + ATTACKED_HOST: http://hotrod:8080 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: scripts-cm +data: + locustfile.py: | + from locust import HttpLocust, TaskSet, task + class UserTasks(TaskSet): + @task + def rachel(self): + self.client.get("/dispatch?customer=123&nonse=0.6308392664170006") + @task + def trom(self): + self.client.get("/dispatch?customer=392&nonse=0.015296363321630757") + @task + def japanese(self): + self.client.get("/dispatch?customer=731&nonse=0.8022286220408668") + @task + def coffee(self): + self.client.get("/dispatch?customer=567&nonse=0.0022220379420636593") + class WebsiteUser(HttpLocust): + task_set = UserTasks +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + service: hotrod + name: hotrod +spec: + replicas: 1 + selector: + matchLabels: + service: hotrod + strategy: {} + template: + metadata: + labels: + service: hotrod + spec: + containers: + - args: + - all + env: + - name: JAEGER_ENDPOINT + value: ${JAEGER_ENDPOINT} + image: ${HOTROD_IMAGE} + imagePullPolicy: IfNotPresent + name: hotrod + ports: + - containerPort: 8080 + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + service: hotrod + name: hotrod +spec: + ports: + - name: "8080" + port: 8080 + targetPort: 8080 + selector: + service: hotrod +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + labels: + role: locust-master + name: locust-master +spec: + replicas: 1 + selector: + matchLabels: + role: locust-master + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + role: locust-master + spec: + containers: + - image: ${LOCUST_IMAGE} + imagePullPolicy: IfNotPresent + name: locust-master + env: + - name: ATTACKED_HOST + valueFrom: + configMapKeyRef: + name: locust-cm + key: ATTACKED_HOST + - name: LOCUST_MODE + value: MASTER + - name: LOCUST_OPTS + value: --print-stats + volumeMounts: + - mountPath: /locust + name: locust-scripts + ports: + - containerPort: 5557 + name: comm + - containerPort: 5558 + name: comm-plus-1 + - containerPort: 8089 + name: web-ui + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: locust-scripts + configMap: + name: scripts-cm +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: locust-master + name: locust-master +spec: + ports: + - port: 5557 + name: communication + - port: 5558 + name: communication-plus-1 + - port: 8089 + targetPort: 8089 + name: web-ui + selector: + role: locust-master +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + labels: + role: locust-slave + name: locust-slave +spec: + replicas: 1 + selector: + matchLabels: + role: locust-slave + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + role: locust-slave + spec: + containers: + - image: ${LOCUST_IMAGE} + imagePullPolicy: IfNotPresent + name: locust-slave + env: + - name: ATTACKED_HOST + valueFrom: + configMapKeyRef: + name: locust-cm + key: ATTACKED_HOST + - name: LOCUST_MODE + value: SLAVE + - name: LOCUST_MASTER + value: locust-master + volumeMounts: + - mountPath: /locust + name: locust-scripts + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: locust-scripts + configMap: + name: scripts-cm