refactor(local): docker compose (#2892)

* refactor(local): docker compose

* fix(compose): disable logging in gateway

* docs: guide for local development

* docs(local): quickstart

* docs: resources

* use docker-compose dns

* Apply suggestions from code review

Co-authored-by: Florian Forster <florian@caos.ch>

Co-authored-by: Elio Bischof <eliobischof@gmail.com>
Co-authored-by: Florian Forster <florian@caos.ch>
This commit is contained in:
Silvan
2021-12-31 18:04:20 +01:00
committed by GitHub
parent 738a5821da
commit 9a374f9c5c
6 changed files with 150 additions and 176 deletions

View File

@@ -23,9 +23,6 @@ RUN dep ensure && \
go env -w GO111MODULE=auto && \
go install ./go/grpcwebproxy
# ADD ./etc/localhost.crt /etc
# ADD ./etc/localhost.key /etc
ENV BKD_HOST=backend-run
ENV BKD_PORT=50001

View File

@@ -1,10 +0,0 @@
FROM ubuntu:latest AS started
#install dependencies with a workaround for the 412 error
RUN apt-get update \
&& apt-get install curl -y
#prepare script
COPY build/local/zitadel-started.sh zitadel-started.sh
RUN chmod +x /zitadel-started.sh
ENTRYPOINT [ "/zitadel-started.sh" ]

View File

@@ -2,18 +2,47 @@ version: '3.8'
services:
db:
profiles: ['database']
profiles: ['backend', 'storage']
restart: always
networks:
- zitadel
image: cockroachdb/cockroach:v21.2.3
command: start-single-node --insecure --listen-addr=0.0.0.0
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
ports:
- 8080:8080
- 26257:26257
# schema changes on the database
db-migrations:
profiles: ['backend', 'storage']
restart: on-failure
networks:
- zitadel
depends_on:
db:
condition: service_healthy
image: flyway/flyway:latest
volumes:
- ../../migrations/cockroach:/flyway/sql
environment:
- FLYWAY_PLACEHOLDERS_eventstorepassword=NULL
- FLYWAY_PLACEHOLDERS_managementpassword=NULL
- FLYWAY_PLACEHOLDERS_adminapipassword=NULL
- FLYWAY_PLACEHOLDERS_authpassword=NULL
- FLYWAY_PLACEHOLDERS_notificationpassword=NULL
- FLYWAY_PLACEHOLDERS_authzpassword=NULL
- FLYWAY_PLACEHOLDERS_queriespassword=NULL
command: -url=jdbc:postgresql://db:26257/defaultdb -user=root -password= -connectRetries=5 migrate
# minio is used to store assets
minio:
profiles: ['backend']
profiles: ['backend', 'storage']
image: minio/minio:RELEASE.2021-06-14T01-29-23Z
restart: on-failure
networks:
@@ -32,28 +61,11 @@ services:
- nas
- /export
db-migrations:
profiles: ['database']
restart: on-failure
networks:
- zitadel
depends_on:
- db
image: flyway/flyway:latest
volumes:
- ../../migrations/cockroach:/flyway/sql
environment:
- FLYWAY_PLACEHOLDERS_eventstorepassword=NULL
- FLYWAY_PLACEHOLDERS_managementpassword=NULL
- FLYWAY_PLACEHOLDERS_adminapipassword=NULL
- FLYWAY_PLACEHOLDERS_authpassword=NULL
- FLYWAY_PLACEHOLDERS_notificationpassword=NULL
- FLYWAY_PLACEHOLDERS_authzpassword=NULL
- FLYWAY_PLACEHOLDERS_queriespassword=NULL
command: -url=jdbc:postgresql://db:26257/defaultdb -user=root -password= -connectRetries=5 migrate
# ZITADEL needs several keys to encrypt data
# this container generates the required keys
# and stores them into zitadel/.keys
keys:
profiles: ['init-backend']
profiles: ['backend', 'backend-stub']
restart: on-failure
networks:
- zitadel
@@ -66,13 +78,20 @@ services:
env_file:
- ./local.env
# To interact with ZITADEL requires some data setted up.
# Due to the evolution of ZITADEL it's required to add additional
# setup steps, because of this fact it's recommended to rerun the setup
# on each restart, at least after a new version got released
backend-setup:
profiles: ['init-backend']
profiles: ['backend']
restart: on-failure
networks:
- zitadel
depends_on:
- keys
db-migrations:
condition: service_completed_successfully
keys:
condition: service_completed_successfully
build:
context: ../..
dockerfile: build/zitadel/Dockerfile
@@ -93,14 +112,28 @@ services:
'setup',
]
# starts the backend (API's) of ZITADEL
# Port 50001 serves the GRPC API
# Port 50002 serves the REST API
# Port 50003 serves the login GUI
backend-run:
profiles: ['backend']
restart: on-failure
networks:
- zitadel
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:50002/management/v1/healthz"]
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
depends_on:
- db
- minio
db:
condition: service_healthy
minio:
condition: service_healthy
backend-setup:
condition: service_completed_successfully
build:
context: ../..
dockerfile: build/zitadel/Dockerfile
@@ -114,6 +147,7 @@ services:
environment:
- ZITADEL_EVENTSTORE_HOST=db
ports:
- 50001:50001
- 50002:50002
- 50003:50003
command:
@@ -126,26 +160,17 @@ services:
'start',
]
zitadel-started-up:
profiles: ['setup']
networks:
- zitadel
extra_hosts:
host.docker.internal: host-gateway
build:
context: ../..
dockerfile: build/local/Dockerfile.started
volumes:
- ./environment.json:/environment.json
environment:
- BE_PORT=50002
- FE_PORT=4200
# the GRPC web gateway proxies the
# GRPC web calls to GRPC
# it's used in console (frontend) for example
grpc-web-gateway:
profiles: ['frontend']
profiles: ['backend']
restart: on-failure
logging:
driver: none
depends_on:
backend-run:
condition: service_healthy
networks:
- zitadel
build:
@@ -154,16 +179,38 @@ services:
image: grpcweb/grpcwebproxy
ports:
- '50000:8080'
environment:
- BKD_HOST=host.docker.internal
- BKD_PORT=50001
frontend-local-run:
# this service generates the environemnt.json
# needed in console.
# It curl's the client id of console and writes it to
# the environment.json
client-id:
profiles: ['frontend', 'console-stub']
depends_on:
backend-run:
condition: service_healthy
networks:
- zitadel
build:
context: ../..
dockerfile: build/local/Dockerfile.clientid
target: client-id
volumes:
- ./environment.json:/environment.json
environment:
- HOST=backend-run
- PORT=50002
# starts console in development mode
frontend-run:
profiles: ['frontend']
networks:
- zitadel
depends_on:
- grpc-web-gateway
grpc-web-gateway:
condition: service_started
client-id:
condition: service_completed_successfully
build:
context: ../..
dockerfile: build/console/Dockerfile
@@ -176,19 +223,5 @@ services:
ports:
- 4200:4200
client-id:
profiles: ['init-frontend']
networks:
- zitadel
build:
context: ../..
dockerfile: build/local/Dockerfile.clientid
target: client-id
volumes:
- ./environment.json:/environment.json
environment:
- HOST=host.docker.internal
- PORT=50002
networks:
zitadel: {}

View File

@@ -1,57 +0,0 @@
#!/bin/bash
# ------------------------------
# prints a message as soon as
# ZITADEL is ready
# ------------------------------
be_status=""
env_status=""
console_status=""
while [[ $be_status != 200 && $env_status != 200 ]]; do
sleep 5
## This is a workaround for a race condition
if [[ $be_status -eq 412 ]]; then
echo "please restart the process once again to get rid of the 412 error!"
fi
be_status=$(curl -s -o /dev/null -I -w "%{http_code}" host.docker.internal:${BE_PORT}/clientID)
env_status=$(curl -s -o /dev/null -I -w "%{http_code}" host.docker.internal:${FE_PORT}/assets/environment.json)
echo "backend (${be_status}) or environment (${env_status}) not ready yet ==> retrying in 5 seconds"
done
echo "backend and environment.json ready!"
while [[ $console_status != 200 ]]; do
sleep 15
console_status=$(curl -s -o /dev/null -I -w "%{http_code}" host.docker.internal:${FE_PORT}/index.html)
echo "console (${console_status}) not ready yet ==> retrying in 15 seconds"
done
echo "console ready - please wait shortly!"
sleep 15
echo -e "++=======================================================================================++
|| ||
|| ZZZZZZZZZZZZ II TTTTTTTTTTTT AAAA DDDDDD EEEEEEEEEE LL ||
|| ZZ II TT AA AA DD DD EE LL ||
|| ZZ II TT AA AA DD DD EE LL ||
|| ZZ II TT AA AA DD DD EEEEEEEE LL ||
|| ZZ II TT AAAAAAAAAAAA DD DD EE LL ||
|| ZZ II TT AA AA DD DD EE LL ||
|| ZZZZZZZZZZZZ II TT AA AA DDDDDD EEEEEEEEEE LLLLLLLLLL ||
|| ||
|| ||
|| SSSSSSSSSS TTTTTTTTTTTT AAAA RRRRRRRR TTTTTTTTTTTT EEEEEEEEEE DDDDDD ||
|| SS TT AA AA RR RR TT EE DD DD ||
|| SS TT AA AA RR RR TT EE DD DD ||
|| SSSSSS TT AA AA RRRRRRRR TT EEEEEEEE DD DD ||
|| SS TT AAAAAAAAAAAA RRRR TT EE DD DD ||
|| SS TT AA AA RR RR TT EE DD DD ||
|| SSSSSSSSSS TT AA AA RR RR TT EEEEEEEEEE DDDDDD ||
|| ||
++=======================================================================================++"
echo "access the console here http://localhost:${FE_PORT}"
echo "access the login here http://localhost:50003/login"
echo "access the apis here http://localhost:50002"