From 103b190af749748c06b5cfc0e83f14cec6e0d41c Mon Sep 17 00:00:00 2001 From: Elio Bischof Date: Fri, 21 Mar 2025 16:49:37 +0100 Subject: [PATCH] docs(self-hosting): use postgres for deployment docs (#9571) # Which Problems Are Solved The Kubernetes and Knative deployment docs suggest to depend on CockroachDB installations even though we sunset the CockroachDB support with the upcoming major version. This can be annoying for users who create new environments using CockroachDB. # How the Problems Are Solved - The Kubernetes deployment is removed and points to examples in the chart repo directy. This removes redundancies that are prone to getting outdated without notice. - The Knative deployment uses commands to deploy a PostgreSQL StatefulSet instead of a CockroachDB StatefulSet. The DB command from the Knative Tutorial is used, as users are likely to be familiar with the tutorials configuration already. The static Kubernetes YAML files for the DB as well as for the Zitadel Knative service are removed altogether, as they are prone to getting outdated without notice and don't serve any purpose. # Additional Changes - The outdated and boring video guide for Knative is removed. - The Knative configuration is changed, so the first-time login becomes easier which improves DevX. - The current docker compose file doesn't work, this PR fixes it and upgrades the used Postgres to v17. # Additional Context - Closes https://github.com/zitadel/zitadel-charts/issues/322 - Replaces https://github.com/zitadel/zitadel/pull/9540 Co-authored-by: Silvan <27845747+adlerhurst@users.noreply.github.com> --- .../cockroachdb-statefulset-single-node.yaml | 169 ------------------ deploy/knative/zitadel-knative-service.yaml | 42 ----- docs/docs/self-hosting/deploy/knative.mdx | 45 +++-- .../self-hosting/manage/configure/_helm.mdx | 42 +---- .../self-hosting/manage/configure/_login.md | 1 + .../manage/configure/configure.mdx | 9 +- .../manage/configure/docker-compose.yaml | 8 +- .../example-zitadel-values-secrets.yaml | 16 -- .../configure/example-zitadel-values.yaml | 17 -- 9 files changed, 37 insertions(+), 312 deletions(-) delete mode 100644 deploy/knative/cockroachdb-statefulset-single-node.yaml delete mode 100644 deploy/knative/zitadel-knative-service.yaml create mode 100644 docs/docs/self-hosting/manage/configure/_login.md delete mode 100644 docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml delete mode 100644 docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml diff --git a/deploy/knative/cockroachdb-statefulset-single-node.yaml b/deploy/knative/cockroachdb-statefulset-single-node.yaml deleted file mode 100644 index cf22db6f44..0000000000 --- a/deploy/knative/cockroachdb-statefulset-single-node.yaml +++ /dev/null @@ -1,169 +0,0 @@ -# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cockroachdb-statefulset.yaml -apiVersion: v1 -kind: Service -metadata: - # This service is meant to be used by clients of the database. It exposes a ClusterIP that will - # automatically load balance connections to the different database pods. - name: cockroachdb-public - labels: - app: cockroachdb -spec: - ports: - # The main port, served by gRPC, serves Postgres-flavor SQL, internode - # traffic and the cli. - - port: 26257 - targetPort: 26257 - name: grpc - # The secondary port serves the UI as well as health and debug endpoints. - - port: 8080 - targetPort: 8080 - name: http - selector: - app: cockroachdb ---- -apiVersion: v1 -kind: Service -metadata: - # This service only exists to create DNS entries for each pod in the stateful - # set such that they can resolve each other's IP addresses. It does not - # create a load-balanced ClusterIP and should not be used directly by clients - # in most circumstances. - name: cockroachdb - labels: - app: cockroachdb - annotations: - # Use this annotation in addition to the actual publishNotReadyAddresses - # field below because the annotation will stop being respected soon but the - # field is broken in some versions of Kubernetes: - # https://github.com/kubernetes/kubernetes/issues/58662 - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - # Enable automatic monitoring of all instances when Prometheus is running in the cluster. - prometheus.io/scrape: "true" - prometheus.io/path: "_status/vars" - prometheus.io/port: "8080" -spec: - ports: - - port: 26257 - targetPort: 26257 - name: grpc - - port: 8080 - targetPort: 8080 - name: http - # We want all pods in the StatefulSet to have their addresses published for - # the sake of the other CockroachDB pods even before they're ready, since they - # have to be able to talk to each other in order to become ready. - publishNotReadyAddresses: true - clusterIP: None - selector: - app: cockroachdb ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: cockroachdb -spec: - serviceName: "cockroachdb" - replicas: 1 - selector: - matchLabels: - app: cockroachdb - template: - metadata: - labels: - app: cockroachdb - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - cockroachdb - topologyKey: kubernetes.io/hostname - containers: - - name: cockroachdb - image: cockroachdb/cockroach:latest-v24.3 - imagePullPolicy: IfNotPresent - # TODO: Change these to appropriate values for the hardware that you're running. You can see - # the resources that can be allocated on each of your Kubernetes nodes by running: - # kubectl describe nodes - # Note that requests and limits should have identical values. - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "1" - memory: "4Gi" - ports: - - containerPort: 26257 - name: grpc - - containerPort: 8080 - name: http -# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. -# livenessProbe: -# httpGet: -# path: "/health" -# port: http -# initialDelaySeconds: 30 -# periodSeconds: 5 - readinessProbe: - httpGet: - path: "/health?ready=1" - port: http - initialDelaySeconds: 10 - periodSeconds: 5 - failureThreshold: 2 - volumeMounts: - - name: datadir - mountPath: /cockroach/cockroach-data - env: - - name: COCKROACH_CHANNEL - value: kubernetes-insecure - - name: GOMAXPROCS - valueFrom: - resourceFieldRef: - resource: limits.cpu - divisor: "1" - - name: MEMORY_LIMIT_MIB - valueFrom: - resourceFieldRef: - resource: limits.memory - divisor: "1Mi" - command: - - "/bin/bash" - - "-ecx" - # The use of qualified `hostname -f` is crucial: - # Other nodes aren't able to look up the unqualified hostname. - - exec - /cockroach/cockroach - start-single-node - --logtostderr - --insecure - --advertise-host $(hostname -f) - --http-addr 0.0.0.0 - --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB - --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB - # No pre-stop hook is required, a SIGTERM plus some time is all that's - # needed for graceful shutdown of a node. - terminationGracePeriodSeconds: 60 - volumes: - - name: datadir - persistentVolumeClaim: - claimName: datadir - podManagementPolicy: Parallel - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - metadata: - name: datadir - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 100Gi diff --git a/deploy/knative/zitadel-knative-service.yaml b/deploy/knative/zitadel-knative-service.yaml deleted file mode 100644 index 5271f99253..0000000000 --- a/deploy/knative/zitadel-knative-service.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - creationTimestamp: null - name: zitadel -spec: - template: - metadata: - annotations: - client.knative.dev/user-image: ghcr.io/zitadel/zitadel:latest - creationTimestamp: null - spec: - containerConcurrency: 0 - containers: - - args: - - admin - - start-from-init - - --masterkey - - MasterkeyNeedsToHave32Characters - env: - - name: ZITADEL_DATABASE_COCKROACH_HOST - value: cockroachdb - - name: ZITADEL_EXTERNALSECURE - value: "false" - - name: ZITADEL_TLS_ENABLED - value: "false" - - name: ZITADEL_EXTERNALPORT - value: "80" - - name: ZITADEL_EXTERNALDOMAIN - value: zitadel.default.127.0.0.1.sslip.io - image: ghcr.io/zitadel/zitadel:latest - name: user-container - ports: - - containerPort: 8080 - protocol: TCP - readinessProbe: - successThreshold: 1 - tcpSocket: - port: 0 - resources: {} - enableServiceLinks: false - timeoutSeconds: 300 diff --git a/docs/docs/self-hosting/deploy/knative.mdx b/docs/docs/self-hosting/deploy/knative.mdx index b26c7189bd..0c8e7f0a36 100644 --- a/docs/docs/self-hosting/deploy/knative.mdx +++ b/docs/docs/self-hosting/deploy/knative.mdx @@ -13,34 +13,39 @@ import NoteInstanceNotFound from './troubleshooting/_note_instance_not_found.mdx Follow the [Knative quickstart guide](https://knative.dev/docs/getting-started/quickstart-install/) to get a local kind/minikube environment with Knative capabilities. -## Run CockroachDB +For example, to install Knative on a kind cluster, run `kn quickstart kind`. -Start a single-node cockroachdb as statefulset +## Run PostgreSQL + +If you are following the Knative Tutorial, you can deploy Postgres as a StatefulSet for the tutorials Bookstore sample app. For example: ```bash -kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/main/deploy/knative/cockroachdb-statefulset-single-node.yaml +git clone https://github.com/knative/docs.git +kubectl apply -f docs/code-samples/eventing/bookstore-sample-app/solution/db-service/ ``` -## Start ZITADEL - -### Knative Command +## Start Zitadel as a Knative Service ```bash kn service create zitadel \ --image ghcr.io/zitadel/zitadel:latest \ --port 8080 \ ---env ZITADEL_DATABASE_COCKROACH_HOST=cockroachdb \ +--env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \ --env ZITADEL_EXTERNALSECURE=false \ --env ZITADEL_EXTERNALPORT=80 \ --env ZITADEL_TLS_ENABLED=false \ ---env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \ ---arg "start-from-init" --arg "--masterkey" --arg "MasterkeyNeedsToHave32Characters" -``` - -### Knavite yaml - -```bash -kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/main/deploy/knative/zitadel-knative-service.yaml +--env ZITADEL_DATABASE_POSTGRES_HOST=postgresql \ +--env ZITADEL_DATABASE_POSTGRES_PORT=5432 \ +--env ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel \ +--env ZITADEL_DATABASE_POSTGRES_USER_USERNAME=myzitadeluser \ +--env ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=myzitadelpassword \ +--env ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable \ +--env ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=myuser \ +--env ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=mypassword \ +--env ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable \ +--env ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false \ +--env ZITADEL_DEFAULTINSTANCE_LOGINPOLICY_MFAINITSKIPLIFETIME="0s" \ +--arg "start-from-init" --arg "--masterkey=MasterkeyNeedsToHave32Characters" ``` ## Access ZITADEL @@ -54,17 +59,9 @@ NAME URL LATEST AGE COND zitadel http://zitadel.default.127.0.0.1.sslip.io zitadel-00001 10m 3 OK / 3 True ``` -Add the console path to the URL and open in browser -http://zitadel.default.127.0.0.1.sslip.io/ui/console - -If you didn't configure something else, this is the default IAM admin users login: - -* username: zitadel-admin@zitadel.zitadel.default.127.0.0.1.sslip.io -* password: Password1! +Open your browser at http://zitadel.default.127.0.0.1.sslip.io/ui/console?login_hint=zitadel-admin@zitadel.zitadel.default.127.0.0.1.sslip.io and use the initial password _Password1!_ -## VideoGuide - diff --git a/docs/docs/self-hosting/manage/configure/_helm.mdx b/docs/docs/self-hosting/manage/configure/_helm.mdx index 9f03e4237a..b35957abb8 100644 --- a/docs/docs/self-hosting/manage/configure/_helm.mdx +++ b/docs/docs/self-hosting/manage/configure/_helm.mdx @@ -1,36 +1,6 @@ -import CodeBlock from '@theme/CodeBlock'; -import ExampleZITADELValuesSource from '!!raw-loader!./example-zitadel-values.yaml' -import ExampleZITADELValuesSecretsSource from '!!raw-loader!./example-zitadel-values-secrets.yaml' - -By default, the chart installs a secure ZITADEL and CockroachDB. -The example files makes an insecure ZITADEL accessible by port forwarding the ZITADEL service to localhost. -For more configuration options, [go to the chart repo descriptions](https://github.com/zitadel/zitadel-charts). -For a secure installation with Docker Compose, [go to the loadbalancing example](/self-hosting/deploy/loadbalancing-example) - -By executing the commands below, you will download the following files: - -
- example-zitadel-values.yaml - {ExampleZITADELValuesSource} -
- -
- example-zitadel-values-secrets.yaml - {ExampleZITADELValuesSecretsSource} -
- -```bash -# Download and adjust the example configuration file containing standard configuration -wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml - -# Download and adjust the example configuration file containing secret configuration -wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml - -# Install an insecure zitadel release that works with localhost -helm install --namespace zitadel --create-namespace my-zitadel zitadel/zitadel \ - --values ./example-zitadel-values.yaml \ - --values ./example-zitadel-values-secrets.yaml - -# Forward the ZITADEL service port to your local machine -kubectl --namespace zitadel port-forward svc/my-zitadel 8080:80 -``` +To run Zitadel on Kubernetes, use [the official Zitadel Helm chart](https://github.com/zitadel/zitadel-charts). +Configure Zitadel using native Helm values. +You can manage secrets through Helm values, letting Helm create Kubernetes secrets. +Alternatively, reference existing Kubernetes secrets managed outside of Helm. +See the [referenced secrets example](https://github.com/zitadel/zitadel-charts/tree/main/examples/3-referenced-secrets) in the charts */examples* folder. +For a quick setup, check out the [insecure Postgres example](https://github.com/zitadel/zitadel-charts/tree/main/examples/1-insecure-postgres). diff --git a/docs/docs/self-hosting/manage/configure/_login.md b/docs/docs/self-hosting/manage/configure/_login.md new file mode 100644 index 0000000000..2fc258b299 --- /dev/null +++ b/docs/docs/self-hosting/manage/configure/_login.md @@ -0,0 +1 @@ +Open your favorite internet browser at http://localhost:8080/ui/console?login_hint=root@zitadel.localhost and use the password _RootPassword1!_ diff --git a/docs/docs/self-hosting/manage/configure/configure.mdx b/docs/docs/self-hosting/manage/configure/configure.mdx index aaf221dfda..c68f716d63 100644 --- a/docs/docs/self-hosting/manage/configure/configure.mdx +++ b/docs/docs/self-hosting/manage/configure/configure.mdx @@ -8,6 +8,7 @@ import TabItem from "@theme/TabItem"; import LinuxUnix from "./_linuxunix.mdx"; import Compose from "./_compose.mdx"; import Helm from "./_helm.mdx"; +import Login from "./_login.md"; import CodeBlock from "@theme/CodeBlock"; import DefaultsYamlSource from "!!raw-loader!./defaults.yaml"; import StepsYamlSource from "!!raw-loader!./steps.yaml"; @@ -90,21 +91,17 @@ There are three ways to pass the masterkey to the `zitadel` binary: > + + -Open your favorite internet browser at [http://localhost:8080/ui/console](http://localhost:8080/ui/console). -This is the IAM admin users login according to your configuration in the [example-zitadel-init-steps.yaml](./example-zitadel-init-steps.yaml): - -- **username**: _root@zitadel.localhost_ -- **password**: _RootPassword1!_ - ## What's next - Read more about [the login process](/guides/integrate/login/login-users). diff --git a/docs/docs/self-hosting/manage/configure/docker-compose.yaml b/docs/docs/self-hosting/manage/configure/docker-compose.yaml index abd1818a7b..3fd0e5471c 100644 --- a/docs/docs/self-hosting/manage/configure/docker-compose.yaml +++ b/docs/docs/self-hosting/manage/configure/docker-compose.yaml @@ -11,9 +11,12 @@ services: - "./example-zitadel-config.yaml:/example-zitadel-config.yaml:ro" - "./example-zitadel-secrets.yaml:/example-zitadel-secrets.yaml:ro" - "./example-zitadel-init-steps.yaml:/example-zitadel-init-steps.yaml:ro" + depends_on: + db: + condition: "service_healthy" db: - image: postgres:16-alpine + image: postgres:17-alpine restart: always environment: - POSTGRES_USER=root @@ -25,7 +28,7 @@ services: interval: 10s timeout: 60s retries: 5 - start_period: 10s + start_period: 10s volumes: - 'data:/var/lib/postgresql/data:rw' @@ -34,3 +37,4 @@ networks: volumes: data: + diff --git a/docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml b/docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml deleted file mode 100644 index 99e5ce5647..0000000000 --- a/docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml -zitadel: - - masterkey: 'MasterkeyNeedsToHave32Characters' - - secretConfig: - - Database: - postgres: - User: - # If the user doesn't exist already, it is created - Username: 'root' - Password: 'Secret_DB_User_Password' - Admin: - Username: 'root' - Password: '' diff --git a/docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml b/docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml deleted file mode 100644 index 571c7af699..0000000000 --- a/docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml -zitadel: - configmapConfig: - Log: - Level: 'info' - - # Make ZITADEL accessible over HTTP, not HTTPS - ExternalSecure: false - ExternalDomain: localhost - - # the configmap is also passed to the zitadel binary via the --steps flag - FirstInstance: - Org: - Human: - # use the loginname root@zitadel.localhost - Username: 'root' - Password: 'RootPassword1!'