Installation changes for docker (#149)

* installation steps WIP

* changing install.sh

* fixes

* fixes

* fixes

* handled enter key press in setup_type

* fixes

* fixes

* fixes

Co-authored-by: Ankit Anand <cruxaki@gmail.com>
This commit is contained in:
Ankit Nayan 2021-06-03 20:54:41 +05:30 committed by GitHub
parent e94d984cdb
commit 362f264bae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 809 additions and 325 deletions

View File

@ -0,0 +1,517 @@
<?xml version="1.0"?>
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<!-- For HTTPS and SSL over native protocol. -->
<!--
<https_port>8443</https_port>
<tcp_ssl_port>9440</tcp_ssl_port>
-->
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
<verificationMode>none</verificationMode>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
<client> <!-- Used for connecting to https dictionary source -->
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
<invalidCertificateHandler>
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
<name>RejectCertificateHandler</name>
</invalidCertificateHandler>
</client>
</openSSL>
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
<!--
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
-->
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<!-- Hostname that is used by other replicas to request this server.
If not specified, than it is determined analoguous to 'hostname -f' command.
This setting could be used to switch replication to another network interface.
-->
<!--
<interserver_http_host>example.yandex.ru</interserver_http_host>
-->
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
<listen_host>::</listen_host>
<!-- Same for hosts with disabled ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
<!-- <listen_host>0.0.0.0</listen_host> -->
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<!-- <max_open_files>262144</max_open_files> -->
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>/var/lib/clickhouse/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>
<!-- Default profile of settings.. -->
<default_profile>default</default_profile>
<!-- Default database. -->
<default_database>default</default_database>
<!-- Server time zone could be set here.
Time zone is used when converting between String and DateTime types,
when printing DateTime in text formats and parsing DateTime from text,
it is used in date and time related functions, if specific time zone was not passed as an argument.
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
If not specified, system time zone at server startup is used.
Please note, that server could display time zone alias instead of specified name.
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
-->
<!-- <timezone>Europe/Moscow</timezone> -->
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
-->
<!-- <umask>022</umask> -->
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.yandex/reference_en.html#Distributed
-->
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage -->
<test_shard_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_shard_localhost>
</remote_servers>
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
-->
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Data%20replication
-->
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Maximum session timeout, in seconds. Default: 3600. -->
<max_session_timeout>3600</max_session_timeout>
<!-- Default session timeout, in seconds. Default: 60. -->
<default_session_timeout>60</default_session_timeout>
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
<!--
interval - send every X second
root_path - prefix for keys
hostname_in_path - append hostname to root_path (default = true)
metrics - send data from table system.metrics
events - send data from table system.events
asynchronous_metrics - send data from table system.asynchronous_metrics
-->
<!--
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>60</interval>
<root_path>one_min</root_path>
<hostname_in_path>true<hostname_in_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
</graphite>
<graphite>
<host>localhost</host>
<port>42000</port>
<timeout>0.1</timeout>
<interval>1</interval>
<root_path>one_sec</root_path>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>false</asynchronous_metrics>
</graphite>
-->
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Uncomment if use part_log
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
-->
<dictionaries_config>*_dictionary.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<!--
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
<case>
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
<!- - What compression method to use. - ->
<method>zstd</method>
</case>
-->
</compression>
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
<!-- Example of parameters for GraphiteMergeTree table engine -->
<graphite_rollup>
<!-- carbon -->
<pattern>
<regexp>^carbon\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>7776000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- collectd -->
<pattern>
<regexp>^collectd\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>43200</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- high -->
<pattern>
<regexp>^high\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>10</precision>
</retention>
<retention>
<age>172800</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- medium -->
<pattern>
<regexp>^medium\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- low -->
<pattern>
<regexp>^low\.</regexp>
<function>any</function>
<retention>
<age>0</age>
<precision>600</precision>
</retention>
<retention>
<age>15552000</age>
<precision>1800</precision>
</retention>
<retention>
<age>31536000</age>
<precision>3600</precision>
</retention>
<retention>
<age>63072000</age>
<precision>21600</precision>
</retention>
<retention>
<age>126144000</age>
<precision>43200</precision>
</retention>
<retention>
<age>252288000</age>
<precision>86400</precision>
</retention>
<retention>
<age>315360000</age>
<precision>604800</precision>
</retention>
</pattern>
<!-- default -->
<default>
<function>any</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>864000</age>
<precision>900</precision>
</retention>
<retention>
<age>1728000</age>
<precision>1800</precision>
</retention>
<retention>
<age>3456000</age>
<precision>3600</precision>
</retention>
<retention>
<age>10368000</age>
<precision>21600</precision>
</retention>
<retention>
<age>34560000</age>
<precision>43200</precision>
</retention>
<retention>
<age>63072000</age>
<precision>86400</precision>
</retention>
<retention>
<age>94608000</age>
<precision>604800</precision>
</retention>
</default>
</graphite_rollup>
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
The directory will be created if it doesn't exist.
-->
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
</yandex>

View File

@ -0,0 +1,97 @@
version: "2.4"
services:
clickhouse:
image: yandex/clickhouse-server
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
interval: 30s
timeout: 5s
retries: 3
query-service:
image: signoz/query-service:0.3.0
container_name: query-service
ports:
- "8080:8080"
environment:
- ClickHouseUrl=tcp://clickhouse:9000
- STORAGE=clickhouse
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
depends_on:
clickhouse:
condition: service_healthy
frontend:
image: signoz/frontend:0.3.0
container_name: frontend
depends_on:
- query-service
links:
- "query-service"
ports:
- "3000:3000"
volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/otelcol:latest
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
depends_on:
clickhouse:
condition: service_healthy
hotrod:
image: jaegertracing/example-hotrod:latest
container_name: hotrod
ports:
- "9000:8080"
command: ["all"]
environment:
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
load-hotrod:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod
hostname: load-hotrod
ports:
- "8089:8089"
environment:
ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone
NO_PROXY: standalone
TASK_DELAY_FROM: 5
TASK_DELAY_TO: 30
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ../common/locust-scripts:/locust

View File

@ -0,0 +1,27 @@
CREATE TABLE IF NOT EXISTS signoz_index (
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
traceID String CODEC(ZSTD(1)),
spanID String CODEC(ZSTD(1)),
parentSpanID String CODEC(ZSTD(1)),
serviceName LowCardinality(String) CODEC(ZSTD(1)),
name LowCardinality(String) CODEC(ZSTD(1)),
kind Int32 CODEC(ZSTD(1)),
durationNano UInt64 CODEC(ZSTD(1)),
tags Array(String) CODEC(ZSTD(1)),
tagsKeys Array(String) CODEC(ZSTD(1)),
tagsValues Array(String) CODEC(ZSTD(1)),
statusCode Int64 CODEC(ZSTD(1)),
references String CODEC(ZSTD(1)),
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
component Nullable(String) CODEC(ZSTD(1)),
dbSystem Nullable(String) CODEC(ZSTD(1)),
dbName Nullable(String) CODEC(ZSTD(1)),
dbOperation Nullable(String) CODEC(ZSTD(1)),
peerService Nullable(String) CODEC(ZSTD(1)),
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
) ENGINE MergeTree()
PARTITION BY toDate(timestamp)
ORDER BY (serviceName, -toUnixTimestamp(timestamp))

View File

@ -0,0 +1,39 @@
receivers:
otlp:
protocols:
grpc:
http:
jaeger:
protocols:
grpc:
thrift_http:
processors:
batch:
send_batch_size: 1000
timeout: 10s
memory_limiter:
# Same as --mem-ballast-size-mib CLI argument
ballast_size_mib: 683
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
extensions:
health_check: {}
zpages: {}
exporters:
clickhouse:
datasource: tcp://clickhouse:9000
service:
extensions: [health_check, zpages]
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [batch]
exporters: [clickhouse]

View File

@ -192,7 +192,7 @@ services:
ports:
- "3000:3000"
volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
create-supervisor:
image: theithollow/hollowapp-blog:curl
@ -269,5 +269,5 @@ services:
QUIET_MODE: "${QUIET_MODE:-false}"
LOCUST_OPTS: "--headless -u 10 -r 1"
volumes:
- ./locust-scripts:/locust
- ../common/locust-scripts:/locust

View File

@ -0,0 +1,26 @@
# For S3 storage
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
druid_storage_type=s3
druid_storage_bucket=solvzy-test3
druid_storage_baseKey=druid/segments
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
AWS_SECRET_ACCESS_KEY=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
AWS_REGION=us-east-2
druid_indexer_logs_type=s3
druid_indexer_logs_s3Bucket=solvzy-test3
druid_indexer_logs_s3Prefix=druid/indexing-logs
# -----------------------------------------------------------
# For local storage
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
# druid_storage_type=local
# druid_storage_storageDirectory=/opt/data/segments
# druid_indexer_logs_type=file
# druid_indexer_logs_directory=/opt/data/indexing-logs

View File

@ -1,256 +0,0 @@
#!/bin/bash
set -o errexit
is_command_present() {
type "$1" >/dev/null 2>&1
}
is_mac() {
[[ $OSTYPE == darwin* ]]
}
check_k8s_setup() {
echo "Checking your k8s setup status"
if ! is_command_present kubectl; then
echo "Please install kubectl on your machine"
exit 1
else
if ! is_command_present jq; then
install_jq
fi
clusters=`kubectl config view -o json | jq -r '."current-context"'`
if [[ ! -n $clusters ]]; then
echo "Please setup a k8s cluster & config kubectl to connect to it"
exit 1
fi
k8s_minor_version=`kubectl version --short -o json | jq ."serverVersion.minor" | sed 's/[^0-9]*//g'`
# if [[ $k8s_minor_version < 18 ]]; then
# echo "+++++++++++ ERROR ++++++++++++++++++++++"
# echo "SigNoz deployments require Kubernetes >= v1.18. Found version: v1.$k8s_minor_version"
# echo "+++++++++++ ++++++++++++++++++++++++++++"
# exit 1
# fi;
fi
}
install_jq(){
if [ $package_manager == "brew" ]; then
brew install jq
elif [ $package_manager == "yum" ]; then
yum_cmd="sudo yum --assumeyes --quiet"
$yum_cmd install jq
else
apt_cmd="sudo apt-get --yes --quiet"
$apt_cmd update
$apt_cmd install jq
fi
}
check_os() {
if is_mac; then
package_manager="brew"
desired_os=1
os="Mac"
return
fi
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
case "$os_name" in
Ubuntu*)
desired_os=1
os="ubuntu"
package_manager="apt-get"
;;
Debian*)
desired_os=1
os="debian"
package_manager="apt-get"
;;
Red\ Hat*)
desired_os=1
os="red hat"
package_manager="yum"
;;
CentOS*)
desired_os=1
os="centos"
package_manager="yum"
;;
*)
desired_os=0
os="Not Found"
esac
}
echo_contact_support() {
echo "Please contact <support@signoz.io> with your OS details and version${1:-.}"
}
bye() { # Prints a friendly good bye message and exits the script.
set +o errexit
echo "Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo -e "\nExiting for now. Bye! \U1F44B\n"
exit 1
}
deploy_app() {
kubectl apply -f "$install_dir/config-template"
kubectl apply -f "$install_dir"
}
wait_for_application_start() {
local timeout=$1
address=$custom_domain
if [[ "$ssl_enable" == "true" ]]; then
protocol="https"
else
protocol="http"
fi
# The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do
if [[ $address == "" || $address == null ]]; then
address=`kubectl get ingress appsmith-ingress -o json | jq -r '.status.loadBalancer.ingress[0].ip'`
fi
status_code="$(curl -s -o /dev/null -w "%{http_code}" $protocol://$address/api/v1 || true)"
if [[ status_code -eq 401 ]]; then
break
else
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
fi
((timeout--))
sleep 1
done
echo ""
}
echo -e "👋 Thank you for trying out SigNoz! "
echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "🕵️ Detecting your OS"
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
# Check for kubernetes setup
check_k8s_setup
echo ""
echo "Deploy Appmisth on your cluster"
echo ""
deploy_app
wait_for_application_start 60
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo "Your installation is complete!"
echo ""
echo "Your frontend is running on 'http://localhost:3000'."
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "Need help Getting Started?"
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo "Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
do
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
if has_curl; then
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
elif has_wget; then
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
fi
fi
echo -e "\nThank you!\n"

View File

@ -2,6 +2,16 @@
set -o errexit
# Regular Colors
Black='\033[0;30m' # Black
Red='\[\e[0;31m\]' # Red
Green='\033[0;32m' # Green
Yellow='\033[0;33m' # Yellow
Blue='\033[0;34m' # Blue
Purple='\033[0;35m' # Purple
Cyan='\033[0;36m' # Cyan
White='\033[0;37m' # White
NC='\033[0m' # No Color
is_command_present() {
type "$1" >/dev/null 2>&1
@ -88,7 +98,7 @@ check_os() {
# The script should error out in case they aren't available
check_ports_occupied() {
local port_check_output
local ports_pattern="80|443"
local ports_pattern="80|3000|8080"
if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@ -192,7 +202,7 @@ install_docker_compose() {
echo ""
fi
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -212,8 +222,7 @@ install_docker_compose() {
start_docker() {
echo "Starting Docker ..."
if [ $os == "Mac" ]
then
if [ $os = "Mac" ]; then
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
else
if ! sudo systemctl is-active docker.service > /dev/null; then
@ -231,16 +240,17 @@ wait_for_containers_start() {
if [[ status_code -eq 200 ]]; then
break
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [ $setup_type == 'druid' ]; then
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
LEN_SUPERVISORS="${#SUPERVISORS}"
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo "No Supervisors found... Re-applying docker compose\n"
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up -d
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
fi
fi
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
fi
((timeout--))
sleep 1
@ -253,14 +263,18 @@ bye() { # Prints a friendly good bye message and exits the script.
if [ "$?" -ne 0 ]; then
set +o errexit
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
echo "Please share your email to receive support with the installation"
echo -e "\n📨 Please share your email to receive support with the installation"
read -rp 'Email: ' email
while [[ $email == "" ]]
@ -268,7 +282,7 @@ bye() { # Prints a friendly good bye message and exits the script.
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -294,17 +308,39 @@ echo ""
# Checking OS and assigning package manager
desired_os=0
os=""
echo -e "🕵️ Detecting your OS"
echo -e "Detecting your OS ..."
check_os
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
echo ""
echo -e "👉 ${RED}Two ways to go forward\n"
echo -e "${RED}1) ClickHouse as database (recommended for low memory usage)\n"
echo -e "${RED}2) Kafka + Druid setup to handle scale (recommended for production use)\n"
read -p "⚙️ Enter your preference (1/2):" choice_setup
while [[ $choice_setup == "" || ( $choice_setup != "1" && $choice_setup != "2" ) ]]
do
# echo $choice_setup
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
read -rp "⚙️ Enter your preference (1/2): " choice_setup
# echo $choice_setup
done
if [ $choice_setup == "1" ];then
setup_type='clickhouse'
else
setup_type='druid'
fi
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
# Run bye if failure happens
trap bye EXIT
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -316,7 +352,7 @@ fi
if [[ $desired_os -eq 0 ]];then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -340,7 +376,7 @@ if ! is_command_present docker; then
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
echo "https://docs.docker.com/docker-for-mac/install/"
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -358,43 +394,59 @@ if ! is_command_present docker-compose; then
install_docker_compose
fi
# if ! is_command_present docker-compose; then
# install_docker_machine
# docker-machine create -d virtualbox --virtualbox-memory 3584 signoz
# fi
start_docker
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
echo ""
echo "Pulling the latest container images for SigNoz. To run as sudo it will ask for system password."
sudo docker-compose -f ./docker/docker-compose-tiny.yaml pull
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
fi
echo ""
echo "Starting the SigNoz containers. It may take a few minute ..."
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
echo
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
# script doesn't exit because this command looks like it failed to do it's thing.
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up --detach --remove-orphans || true
if [ $setup_type == 'clickhouse' ]; then
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
else
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
fi
wait_for_containers_start 60
echo ""
if [[ $status_code -ne 200 ]]; then
echo "+++++++++++ ERROR ++++++++++++++++++++++"
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
echo ""
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
if [ $setup_type == 'clickhouse' ]; then
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
else
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
fi
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "++++++++++++++++++++++++++++++++++++++++"
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
if [ $setup_type == 'clickhouse' ]; then
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
else
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
fi
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -408,7 +460,7 @@ if [[ $status_code -ne 200 ]]; then
exit 1
else
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -419,18 +471,24 @@ else
fi
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
echo ""
echo "Your installation is complete!"
echo "🟢 Your installation is complete!"
echo ""
echo "Your frontend is running on 'http://localhost:3000'."
echo -e "🟢 Your frontend is running on http://localhost:3000"
echo ""
echo "To bring down SigNoz and clean volumes : sudo docker-compose -f docker/docker-compose-tiny.yaml down -v"
if [ $setup_type == 'clickhouse' ]; then
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
else
echo " To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
fi
echo ""
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
echo "Need help Getting Started?"
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo "👉 Need help Getting Started?"
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
echo ""
echo "Please share your email to receive support & updates about SigNoz!"
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
read -rp 'Email: ' email
while [[ $email == "" ]]
@ -438,7 +496,7 @@ else
read -rp 'Email: ' email
done
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
URL="https://app.posthog.com/capture"
HEADER="Content-Type: application/json"
@ -450,28 +508,4 @@ else
fi
echo -e "\nThank you!\n"
##### Changing default memory limit of docker ############
# # Check if memory is less and Confirm to increase size of docker machine
# # https://github.com/docker/machine/releases
# # On OS X
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \
# chmod +x /usr/local/bin/docker-machine
# # On Linux
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
# chmod +x /tmp/docker-machine &&
# sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
# VBoxManage list vms
# docker-machine stop
# VBoxManage modifyvm default --cpus 2
# VBoxManage modifyvm default --memory 4096
# docker-machine start
# VBoxManage showvminfo default | grep Memory
# VBoxManage showvminfo default | grep CPU
echo -e "\n🙏 Thank you!\n"