refactor(ports): 💥 avoid exposing unnecessary ports and update frontend port to 3301 (#679)

* refactor(compose-yaml): ♻️ avoid unused and unnecessary ports mapping from compose files

Signed-off-by: Prashant Shahi <prashant@signoz.io>

* refactor(frontend): 💥 change frontend port to 3301
BREAKING CHANGE:

Signed-off-by: Prashant Shahi <prashant@signoz.io>
This commit is contained in:
Prashant Shahi 2022-02-08 22:47:06 +05:30 committed by GitHub
parent 6342e1cebc
commit d22d1d1c3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 22 additions and 80 deletions

View File

@ -36,7 +36,7 @@ If you don't want to install SigNoz backend just for doing frontend development,
- `yarn install` - `yarn install`
- `yarn dev` - `yarn dev`
**_Frontend should now be accessible at `http://localhost:3000/application`_** **_Frontend should now be accessible at `http://localhost:3301/application`_**
# Contribute to Query-Service # Contribute to Query-Service
@ -69,7 +69,7 @@ Need to update [https://github.com/SigNoz/charts](https://github.com/SigNoz/char
- [minikube](https://minikube.sigs.k8s.io/docs/start/) - [minikube](https://minikube.sigs.k8s.io/docs/start/)
- create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster - create a k8s cluster and make sure `kubectl` points to the locally created k8s cluster
- run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart - run `helm install -n platform --create-namespace my-release charts/signoz` to install SigNoz chart
- run `kubectl -n platform port-forward svc/my-release-frontend 3000:3000` to make SigNoz UI available at [localhost:3000](http://localhost:3000) - run `kubectl -n platform port-forward svc/my-release-frontend 3301:3301` to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**To load data with HotROD sample app:** **To load data with HotROD sample app:**

View File

@ -50,7 +50,7 @@ services:
links: links:
- "query-service" - "query-service"
ports: ports:
- "3000:3000" - "3301:3301"
volumes: volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -1,5 +1,5 @@
server { server {
listen 3000; listen 3301;
server_name _; server_name _;
gzip on; gzip on;

View File

@ -3,12 +3,6 @@ version: "2.4"
services: services:
clickhouse: clickhouse:
image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm image: altinity/clickhouse-server:21.12.3.32.altinitydev.arm
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes: volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/ - ./data/clickhouse/:/var/lib/clickhouse/
@ -27,15 +21,11 @@ services:
command: command:
- '--config.file=/prometheus/alertmanager.yml' - '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data' - '--storage.path=/data'
ports:
- 9093:9093
query-service: query-service:
image: signoz/query-service:0.5.4 image: signoz/query-service:0.5.4
container_name: query-service container_name: query-service
command: ["-config=/root/config/prometheus.yml"] command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes: volumes:
- ./prometheus.yml:/root/config/prometheus.yml - ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
@ -54,10 +44,8 @@ services:
container_name: frontend container_name: frontend
depends_on: depends_on:
- query-service - query-service
links:
- "query-service"
ports: ports:
- "3000:3000" - "3301:3301"
volumes: volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
@ -67,16 +55,7 @@ services:
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports: ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver - "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
- "8889:8889" # prometheus exporter
mem_limit: 2000m mem_limit: 2000m
restart: always restart: always
depends_on: depends_on:
@ -95,8 +74,6 @@ services:
hotrod: hotrod:
image: jaegertracing/example-hotrod:1.30 image: jaegertracing/example-hotrod:1.30
container_name: hotrod container_name: hotrod
ports:
- "9000:8080"
logging: logging:
options: options:
max-size: 50m max-size: 50m
@ -109,8 +86,6 @@ services:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod container_name: load-hotrod
hostname: load-hotrod hostname: load-hotrod
ports:
- "8089:8089"
environment: environment:
ATTACKED_HOST: http://hotrod:8080 ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone LOCUST_MODE: standalone

View File

@ -3,16 +3,9 @@ version: "2.4"
services: services:
clickhouse: clickhouse:
image: yandex/clickhouse-server:21.12.3.32 image: yandex/clickhouse-server:21.12.3.32
expose:
- 8123
- 9000
ports:
- 9001:9000
- 8123:8123
volumes: volumes:
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
- ./data/clickhouse/:/var/lib/clickhouse/ - ./data/clickhouse/:/var/lib/clickhouse/
healthcheck: healthcheck:
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
@ -28,15 +21,11 @@ services:
command: command:
- '--config.file=/prometheus/alertmanager.yml' - '--config.file=/prometheus/alertmanager.yml'
- '--storage.path=/data' - '--storage.path=/data'
ports:
- 9093:9093
query-service: query-service:
image: signoz/query-service:0.5.4 image: signoz/query-service:0.5.4
container_name: query-service container_name: query-service
command: ["-config=/root/config/prometheus.yml"] command: ["-config=/root/config/prometheus.yml"]
ports:
- "8080:8080"
volumes: volumes:
- ./prometheus.yml:/root/config/prometheus.yml - ./prometheus.yml:/root/config/prometheus.yml
- ../dashboards:/root/config/dashboards - ../dashboards:/root/config/dashboards
@ -46,7 +35,6 @@ services:
- STORAGE=clickhouse - STORAGE=clickhouse
- GODEBUG=netdns=go - GODEBUG=netdns=go
- TELEMETRY_ENABLED=true - TELEMETRY_ENABLED=true
depends_on: depends_on:
clickhouse: clickhouse:
condition: service_healthy condition: service_healthy
@ -56,10 +44,8 @@ services:
container_name: frontend container_name: frontend
depends_on: depends_on:
- query-service - query-service
links:
- "query-service"
ports: ports:
- "3000:3000" - "3301:3301"
volumes: volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
@ -69,16 +55,7 @@ services:
volumes: volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
ports: ports:
- "1777:1777" # pprof extension
- "8887:8888" # Prometheus metrics exposed by the agent
- "14268:14268" # Jaeger receiver
- "55678" # OpenCensus receiver
- "55680:55680" # OTLP HTTP/2.0 legacy port
- "55681:55681" # OTLP HTTP/1.0 receiver
- "4317:4317" # OTLP GRPC receiver - "4317:4317" # OTLP GRPC receiver
- "55679:55679" # zpages extension
- "13133" # health_check
- "8889:8889" # prometheus exporter
mem_limit: 2000m mem_limit: 2000m
restart: always restart: always
depends_on: depends_on:
@ -97,8 +74,6 @@ services:
hotrod: hotrod:
image: jaegertracing/example-hotrod:1.30 image: jaegertracing/example-hotrod:1.30
container_name: hotrod container_name: hotrod
ports:
- "9000:8080"
logging: logging:
options: options:
max-size: 50m max-size: 50m
@ -111,8 +86,6 @@ services:
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12" image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
container_name: load-hotrod container_name: load-hotrod
hostname: load-hotrod hostname: load-hotrod
ports:
- "8089:8089"
environment: environment:
ATTACKED_HOST: http://hotrod:8080 ATTACKED_HOST: http://hotrod:8080
LOCUST_MODE: standalone LOCUST_MODE: standalone

View File

@ -1,5 +1,5 @@
server { server {
listen 3000; listen 3301;
server_name _; server_name _;
gzip on; gzip on;

View File

@ -167,7 +167,8 @@ services:
container_name: query-service container_name: query-service
depends_on: depends_on:
- router router:
condition: service_healthy
ports: ports:
- "8080:8080" - "8080:8080"
volumes: volumes:
@ -180,10 +181,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w - POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go - GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend: frontend:
image: signoz/frontend:0.4.1 image: signoz/frontend:0.4.1
container_name: frontend container_name: frontend
@ -193,7 +190,7 @@ services:
links: links:
- "query-service" - "query-service"
ports: ports:
- "3000:3000" - "3301:3301"
volumes: volumes:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -162,7 +162,8 @@ services:
container_name: query-service container_name: query-service
depends_on: depends_on:
- router router:
condition: service_healthy
ports: ports:
- "8080:8080" - "8080:8080"
@ -176,10 +177,6 @@ services:
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w - POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
- GODEBUG=netdns=go - GODEBUG=netdns=go
depends_on:
router:
condition: service_healthy
frontend: frontend:
image: signoz/frontend:0.4.1 image: signoz/frontend:0.4.1
container_name: frontend container_name: frontend
@ -189,7 +186,7 @@ services:
links: links:
- "query-service" - "query-service"
ports: ports:
- "3000:3000" - "3301:3301"
volumes: volumes:
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf - ./nginx-config.conf:/etc/nginx/conf.d/default.conf

View File

@ -102,7 +102,7 @@ check_os() {
# The script should error out in case they aren't available # The script should error out in case they aren't available
check_ports_occupied() { check_ports_occupied() {
local port_check_output local port_check_output
local ports_pattern="80|3000|8080" local ports_pattern="80|3301|8080"
if is_mac; then if is_mac; then
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')" port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
@ -225,7 +225,7 @@ wait_for_containers_start() {
# The while loop is important because for-loops don't work for dynamic values # The while loop is important because for-loops don't work for dynamic values
while [[ $timeout -gt 0 ]]; do while [[ $timeout -gt 0 ]]; do
status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/api/v1/services/list || true)" status_code="$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3301/api/v1/services/list || true)"
if [[ status_code -eq 200 ]]; then if [[ status_code -eq 200 ]]; then
break break
else else
@ -492,7 +492,7 @@ else
echo "" echo ""
echo "🟢 Your installation is complete!" echo "🟢 Your installation is complete!"
echo "" echo ""
echo -e "🟢 Your frontend is running on http://localhost:3000" echo -e "🟢 Your frontend is running on http://localhost:3301"
echo "" echo ""
if [ $setup_type == 'clickhouse' ]; then if [ $setup_type == 'clickhouse' ]; then

View File

@ -32,6 +32,6 @@ RUN rm -rf /usr/share/nginx/html/*
# Copy from the stahg 1 # Copy from the stahg 1
COPY --from=builder /frontend/build /usr/share/nginx/html COPY --from=builder /frontend/build /usr/share/nginx/html
EXPOSE 3000 EXPOSE 3301
ENTRYPOINT ["nginx", "-g", "daemon off;"] ENTRYPOINT ["nginx", "-g", "daemon off;"]

View File

@ -44,7 +44,7 @@ In the project directory, you can run:
### `yarn start` ### `yarn start`
Runs the app in the development mode.\ Runs the app in the development mode.\
Open [http://localhost:3000](http://localhost:3000) to view it in the browser. Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
The page will reload if you make edits.\ The page will reload if you make edits.\
You will also see any lint errors in the console. You will also see any lint errors in the console.

View File

@ -1,5 +1,5 @@
server { server {
listen 3000; listen 3301;
server_name _; server_name _;
gzip on; gzip on;

View File

@ -4,4 +4,4 @@ services:
build: . build: .
image: signoz/frontend:latest image: signoz/frontend:latest
ports: ports:
- "3000:3000" - "3301:3301"

View File

@ -35,7 +35,7 @@ const config = {
open: true, open: true,
hot: true, hot: true,
liveReload: true, liveReload: true,
port: portFinderSync.getPort(3000), port: portFinderSync.getPort(3301),
static: { static: {
directory: resolve(__dirname, 'public'), directory: resolve(__dirname, 'public'),
publicPath: '/', publicPath: '/',

View File

@ -168,7 +168,7 @@ func (r *ClickHouseReader) Start() {
notifier := notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) notifier := notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
// notifier.ApplyConfig(conf) // notifier.ApplyConfig(conf)
ExternalURL, err := computeExternalURL("", "0.0.0.0:3000") ExternalURL, err := computeExternalURL("", "0.0.0.0:3301")
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", ExternalURL.String())) fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", ExternalURL.String()))
os.Exit(2) os.Exit(2)