adding separate s3 configs

This commit is contained in:
Ankit Nayan 2021-04-01 16:48:45 +05:30
parent ccf57d9c5c
commit 6334650d22
7 changed files with 801 additions and 34 deletions

View File

@ -0,0 +1,259 @@
version: "2.4"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
# If able to connect to kafka but not able to write to topic otlp_spans look into below link
# https://github.com/wurstmeister/kafka-docker/issues/409#issuecomment-428346707
services:
zookeeper:
image: bitnami/zookeeper:3.6.2-debian-10-r100
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
# image: wurstmeister/kafka
image: bitnami/kafka:2.7.0-debian-10-r1
ports:
- "9092:9092"
hostname: kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'yes'
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_TOPICS: 'otlp_spans:1:1,flattened_spans:1:1'
healthcheck:
# test: ["CMD", "kafka-topics.sh", "--create", "--topic", "otlp_spans", "--zookeeper", "zookeeper:2181"]
test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"]
interval: 30s
timeout: 10s
retries: 10
depends_on:
- zookeeper
postgres:
container_name: postgres
image: postgres:latest
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
coordinator:
image: apache/druid:0.20.0
container_name: coordinator
volumes:
- ./storage:/opt/druid/deepStorage
- coordinator_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment_tiny_s3/coordinator
broker:
image: apache/druid:0.20.0
container_name: broker
volumes:
- broker_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment_tiny_s3/broker
historical:
image: apache/druid:0.20.0
container_name: historical
volumes:
- ./storage:/opt/druid/deepStorage
- historical_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment_tiny_s3/historical
middlemanager:
image: apache/druid:0.20.0
container_name: middlemanager
volumes:
- ./storage:/opt/druid/deepStorage
- middle_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
command:
- middleManager
env_file:
- environment_tiny_s3/middlemanager
router:
image: apache/druid:0.20.0
container_name: router
volumes:
- router_var:/opt/druid/data
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment_tiny_s3/router
flatten-processor:
image: signoz/flattener-processor:0.1.1
container_name: flattener-processor
depends_on:
- kafka
- otel-collector
ports:
- "8000:8000"
environment:
- KAFKA_BROKER=kafka:9092
- KAFKA_INPUT_TOPIC=otlp_spans
- KAFKA_OUTPUT_TOPIC=flattened_spans
query-service:
image: signoz.docker.scarf.sh/signoz/query-service:0.1.4
container_name: query-service
depends_on:
- router
ports:
- "8080:8080"
environment:
- DruidClientUrl=http://router:8888
- DruidDatasource=flattened_spans
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
frontend:
image: signoz/frontend:0.2.0
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
container_name: create-supervisor
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/supervisor-spec.json http://router:8888/druid/indexer/v1/supervisor"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/supervisor-spec.json:/app/supervisor-spec.json
set-retention:
image: theithollow/hollowapp-blog:curl
container_name: set-retention
command:
- /bin/sh
- -c
- "curl -X POST -H 'Content-Type: application/json' -d @/app/retention-spec.json http://router:8888/druid/coordinator/v1/rules/flattened_spans"
depends_on:
- router
restart: on-failure:6
volumes:
- ./druid-jobs/retention-spec.json:/app/retention-spec.json
otel-collector:
image: otel/opentelemetry-collector:0.18.0
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
kafka:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust

View File

@ -27,15 +27,32 @@ DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type: local
druid_storage_type=s3
druid_storage_bucket=signoz-saas
druid_storage_baseKey=solvzy-test1
druid_storage_bucket=solvzy-test2
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test2
druid_indexer_logs_s3Prefix=druid/indexing-logs
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/druid/deepStorage
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
@ -50,10 +67,6 @@ druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxD
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
# druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2

View File

@ -27,15 +27,27 @@ DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type: local
druid_storage_type=s3
druid_storage_bucket=signoz-saas
druid_storage_baseKey=solvzy-test1
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test2
druid_indexer_logs_s3Prefix=druid/indexing-logs
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/druid/deepStorage
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
@ -49,10 +61,6 @@ druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
# druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2

View File

@ -27,15 +27,28 @@ DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type: local
druid_storage_type=s3
druid_storage_bucket=signoz-saas
druid_storage_baseKey=solvzy-test1
druid_storage_bucket=solvzy-test2
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test2
druid_indexer_logs_s3Prefix=druid/indexing-logs
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/druid/deepStorage
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
@ -50,11 +63,6 @@ druid_indexer_runner_javaOptsArray=["-server", "-Xms512m", "-Xmx512m", "-XX:MaxD
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
druid_processing_buffer_sizeBytes=50MiB
# druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2

View File

@ -27,15 +27,28 @@ DRUID_MAXDIRECTMEMORYSIZE=400m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_extensions_loadList =["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type: local
druid_storage_type=s3
druid_storage_bucket=signoz-saas
druid_storage_baseKey=solvzy-test1
druid_storage_bucket=solvzy-test2
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test2
druid_indexer_logs_s3Prefix=druid/indexing-logs
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/druid/deepStorage
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
@ -49,10 +62,6 @@ druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms256m", "-Xmx256m", "-XX:MaxDirectMemorySize=400m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
# druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2

View File

@ -27,15 +27,28 @@ DRUID_MAXDIRECTMEMORYSIZE=128m
druid_emitter_logging_logLevel=debug
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
# druid_storage_type: local
druid_storage_type=s3
druid_storage_bucket=signoz-saas
druid_storage_baseKey=solvzy-test1
druid_storage_bucket=solvzy-test2
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test2
druid_indexer_logs_s3Prefix=druid/indexing-logs
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/druid/deepStorage
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
@ -49,10 +62,6 @@ druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xms64m", "-Xmx64m", "-XX:MaxDirectMemorySize=128m", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=25000000
# druid_storage_type=local
druid_storage_storageDirectory=/opt/druid/deepStorage
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/druid/data/indexing-logs
druid_processing_numThreads=1
druid_processing_numMergeBuffers=2

View File

@ -0,0 +1,461 @@
#!/bin/bash
set -o errexit
is_command_present() {
type "$1" >/dev/null 2>&1
}
# Check whether 'wget' command exists.
has_wget() {
has_cmd wget
}
# Check whether 'curl' command exists.
has_curl() {
has_cmd curl
}
# Check whether the given command exists.
has_cmd() {
command -v "$1" > /dev/null 2>&1
}
is_mac() {
[[ $OSTYPE == darwin* ]]
}
check_os() {
if is_mac; then
package_manager="brew"
desired_os=1
os="Mac"
return
fi
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
Ubuntu*)
desired_os=1
os="ubuntu"
package_manager="apt-get"
;;
Debian*)
desired_os=1
os="debian"
package_manager="apt-get"
;;
Linux\ Mint*)
desired_os=1
os="linux mint"
package_manager="apt-get"
;;
Red\ Hat*)
desired_os=1
os="red hat"
package_manager="yum"
;;
CentOS*)
desired_os=1
os="centos"
package_manager="yum"
;;
SLES*)
desired_os=1
os="sles"
package_manager="zypper"
;;
openSUSE*)
desired_os=1
os="opensuse"
package_manager="zypper"
;;
*)
desired_os=0
os="Not Found"
esac
}
# This function checks if the relevant ports required by SigNoz are available or not
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|443"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
elif is_command_present ss; then
# The `ss` command seems to be a better/faster version of `netstat`, but is not available on all Linux
# distributions by default. Other distributions have `ss` but no `netstat`. So, we try for `ss` first, then
# fallback to `netstat`.
port_check_output="$(ss --all --numeric --tcp | awk '$1 == "LISTEN" && $4 ~ /^.*:('"$ports_pattern"')$/')"
elif is_command_present netstat; then
port_check_output="$(netstat --all --numeric --tcp | awk '$6 == "LISTEN" && $4 ~ /^.*:('"$ports_pattern"')$/')"
fi
if [[ -n $port_check_output ]]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "port not available" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "SigNoz requires ports 80 & 443 to be open. Please shut down any other service(s) that may be running on these ports."
echo "You can run SigNoz on another port following this guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "++++++++++++++++++++++++++++++++++++++++"
echo ""
exit 1
fi
}
install_docker() {
echo "++++++++++++++++++++++++"
echo "Setting up docker repos"
if [[ $package_manager == apt-get ]]; then
apt_cmd="sudo apt-get --yes --quiet"
$apt_cmd update
$apt_cmd install software-properties-common gnupg-agent
curl -fsSL "https://download.docker.com/linux/$os/gpg" | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$os $(lsb_release -cs) stable"
$apt_cmd update
echo "Installing docker"
$apt_cmd install docker-ce docker-ce-cli containerd.io
elif [[ $package_manager == zypper ]]; then
zypper_cmd="sudo zypper --quiet --no-gpg-checks --non-interactive"
echo "Installing docker"
if [[ $os == sles ]]; then
os_sp="$(cat /etc/*-release | awk -F= '$1 == "VERSION_ID" { gsub(/"/, ""); print $2; exit }')"
os_arch="$(uname -i)"
sudo SUSEConnect -p sle-module-containers/$os_sp/$os_arch -r ''
fi
$zypper_cmd install docker docker-runc containerd
sudo systemctl enable docker.service
else
yum_cmd="sudo yum --assumeyes --quiet"
$yum_cmd install yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/$os/docker-ce.repo
echo "Installing docker"
$yum_cmd install docker-ce docker-ce-cli containerd.io
fi
}
install_docker_machine() {
echo "\nInstalling docker machine ..."
if [[ $os == "Mac" ]];then
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine
chmod +x /usr/local/bin/docker-machine
else
curl -sL https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine
chmod +x /tmp/docker-machine
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
fi
}
install_docker_compose() {
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
if [[ ! -f /usr/bin/docker-compose ]];then
echo "++++++++++++++++++++++++"
echo "Installing docker-compose"
sudo curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
echo "docker-compose installed!"
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "docker-compose not found! Please install docker-compose first and then continue with this installation."
echo "Refer https://docs.docker.com/compose/install/ for installing docker-compose."
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
exit 1
fi
}
start_docker() {
echo "Starting Docker ..."
if [ $os == "Mac" ]
then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
if ! sudo systemctl is-active docker.service > /dev/null; then
echo "Starting docker service"
sudo systemctl start docker.service
fi
fi
}
wait_for_containers_start() {
local timeout=$1
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then
break
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo "No Supervisors found... Re-applying docker compose\n"
sudo docker-compose -f ./docker/docker-compose-tiny-with-s3-separate.yaml up -d
fi
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
fi
((timeout--))
sleep 1
done
echo ""
}
bye() { # Prints a friendly good bye message and exits the script.
if [ "$?" -ne 0 ]; then
set +o errexit
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny-with-s3-separate.yaml ps -a"
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
echo "Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo ""
echo -e "\nWe will reach out to you at the email provided shortly, Exiting for now. Bye! 👋 \n"
exit 0
fi
}
echo -e "👋 Thank you for trying out SigNoz! "
echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "🕵️ Detecting your OS"
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
# check_ports_occupied
# Check is Docker daemon is installed and available. If not, the install & start Docker for Linux machines. We cannot automatically install Docker Desktop on Mac OS
if ! is_command_present docker; then
if [[ $package_manager == "apt-get" || $package_manager == "zypper" || $package_manager == "yum" ]]; then
install_docker
else
echo ""
echo "+++++++++++ IMPORTANT READ ++++++++++++++++++++++"
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
fi
fi
# Install docker-compose
if ! is_command_present docker-compose; then
install_docker_compose
fi
# if ! is_command_present docker-compose; then
# install_docker_machine
# docker-machine create -d virtualbox --virtualbox-memory 3584 signoz
# fi
start_docker
echo ""
echo "Pulling the latest container images for SigNoz. To run as sudo it will ask for system password."
sudo docker-compose -f ./docker/docker-compose-tiny-with-s3-separate.yaml pull
echo ""
echo "Starting the SigNoz containers. It may take a few minutes."
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
sudo docker-compose -f ./docker/docker-compose-tiny-with-s3-separate.yaml up --detach --remove-orphans || true
wait_for_containers_start 60
echo ""
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny-with-s3-separate.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo "Your installation is complete!"
echo ""
echo "Your frontend is running on 'http://localhost:3000'."
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "Need help Getting Started?"
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo "Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
echo -e "\nThank you!\n"
##### Changing default memory limit of docker ############
# # Check if memory is less and Confirm to increase size of docker machine
# # https://github.com/docker/machine/releases
# # On OS X
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \
# chmod +x /usr/local/bin/docker-machine
# # On Linux
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
# chmod +x /tmp/docker-machine &&
# sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
# VBoxManage list vms
# docker-machine stop
# VBoxManage modifyvm default --cpus 2
# VBoxManage modifyvm default --memory 4096
# docker-machine start
# VBoxManage showvminfo default | grep Memory
# VBoxManage showvminfo default | grep CPU