docs(self-hosting): use postgres for deployment docs (#9571)

# Which Problems Are Solved

The Kubernetes and Knative deployment docs suggest to depend on
CockroachDB installations even though we sunset the CockroachDB support
with the upcoming major version. This can be annoying for users who
create new environments using CockroachDB.

# How the Problems Are Solved

- The Kubernetes deployment is removed and points to examples in the
chart repo directy. This removes redundancies that are prone to getting
outdated without notice.

- The Knative deployment uses commands to deploy a PostgreSQL
StatefulSet instead of a CockroachDB StatefulSet. The DB command from
the Knative Tutorial is used, as users are likely to be familiar with
the tutorials configuration already. The static Kubernetes YAML files
for the DB as well as for the Zitadel Knative service are removed
altogether, as they are prone to getting outdated without notice and
don't serve any purpose.

# Additional Changes

- The outdated and boring video guide for Knative is removed.
- The Knative configuration is changed, so the first-time login becomes
easier which improves DevX.
- The current docker compose file doesn't work, this PR fixes it and
upgrades the used Postgres to v17.

# Additional Context

- Closes https://github.com/zitadel/zitadel-charts/issues/322
- Replaces https://github.com/zitadel/zitadel/pull/9540

Co-authored-by: Silvan <27845747+adlerhurst@users.noreply.github.com>
This commit is contained in:
Elio Bischof 2025-03-21 16:49:37 +01:00 committed by GitHub
parent 8b1b9cbb98
commit 103b190af7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 37 additions and 312 deletions

View File

@ -1,169 +0,0 @@
# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cockroachdb-statefulset.yaml
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other CockroachDB pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
clusterIP: None
selector:
app: cockroachdb
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 1
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:latest-v24.3
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
# kubectl describe nodes
# Note that requests and limits should have identical values.
resources:
requests:
cpu: "1"
memory: "4Gi"
limits:
cpu: "1"
memory: "4Gi"
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases.
# livenessProbe:
# httpGet:
# path: "/health"
# port: http
# initialDelaySeconds: 30
# periodSeconds: 5
readinessProbe:
httpGet:
path: "/health?ready=1"
port: http
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 2
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
env:
- name: COCKROACH_CHANNEL
value: kubernetes-insecure
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: "1"
- name: MEMORY_LIMIT_MIB
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: "1Mi"
command:
- "/bin/bash"
- "-ecx"
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
- exec
/cockroach/cockroach
start-single-node
--logtostderr
--insecure
--advertise-host $(hostname -f)
--http-addr 0.0.0.0
--cache $(expr $MEMORY_LIMIT_MIB / 4)MiB
--max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 100Gi

View File

@ -1,42 +0,0 @@
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
creationTimestamp: null
name: zitadel
spec:
template:
metadata:
annotations:
client.knative.dev/user-image: ghcr.io/zitadel/zitadel:latest
creationTimestamp: null
spec:
containerConcurrency: 0
containers:
- args:
- admin
- start-from-init
- --masterkey
- MasterkeyNeedsToHave32Characters
env:
- name: ZITADEL_DATABASE_COCKROACH_HOST
value: cockroachdb
- name: ZITADEL_EXTERNALSECURE
value: "false"
- name: ZITADEL_TLS_ENABLED
value: "false"
- name: ZITADEL_EXTERNALPORT
value: "80"
- name: ZITADEL_EXTERNALDOMAIN
value: zitadel.default.127.0.0.1.sslip.io
image: ghcr.io/zitadel/zitadel:latest
name: user-container
ports:
- containerPort: 8080
protocol: TCP
readinessProbe:
successThreshold: 1
tcpSocket:
port: 0
resources: {}
enableServiceLinks: false
timeoutSeconds: 300

View File

@ -13,34 +13,39 @@ import NoteInstanceNotFound from './troubleshooting/_note_instance_not_found.mdx
Follow the [Knative quickstart guide](https://knative.dev/docs/getting-started/quickstart-install/) to get a local kind/minikube environment with Knative capabilities. Follow the [Knative quickstart guide](https://knative.dev/docs/getting-started/quickstart-install/) to get a local kind/minikube environment with Knative capabilities.
## Run CockroachDB For example, to install Knative on a kind cluster, run `kn quickstart kind`.
Start a single-node cockroachdb as statefulset ## Run PostgreSQL
If you are following the Knative Tutorial, you can deploy Postgres as a StatefulSet for the tutorials Bookstore sample app. For example:
```bash ```bash
kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/main/deploy/knative/cockroachdb-statefulset-single-node.yaml git clone https://github.com/knative/docs.git
kubectl apply -f docs/code-samples/eventing/bookstore-sample-app/solution/db-service/
``` ```
## Start ZITADEL ## Start Zitadel as a Knative Service
### Knative Command
```bash ```bash
kn service create zitadel \ kn service create zitadel \
--image ghcr.io/zitadel/zitadel:latest \ --image ghcr.io/zitadel/zitadel:latest \
--port 8080 \ --port 8080 \
--env ZITADEL_DATABASE_COCKROACH_HOST=cockroachdb \ --env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \
--env ZITADEL_EXTERNALSECURE=false \ --env ZITADEL_EXTERNALSECURE=false \
--env ZITADEL_EXTERNALPORT=80 \ --env ZITADEL_EXTERNALPORT=80 \
--env ZITADEL_TLS_ENABLED=false \ --env ZITADEL_TLS_ENABLED=false \
--env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \ --env ZITADEL_DATABASE_POSTGRES_HOST=postgresql \
--arg "start-from-init" --arg "--masterkey" --arg "MasterkeyNeedsToHave32Characters" --env ZITADEL_DATABASE_POSTGRES_PORT=5432 \
``` --env ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel \
--env ZITADEL_DATABASE_POSTGRES_USER_USERNAME=myzitadeluser \
### Knavite yaml --env ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=myzitadelpassword \
--env ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable \
```bash --env ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=myuser \
kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/main/deploy/knative/zitadel-knative-service.yaml --env ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=mypassword \
--env ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable \
--env ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false \
--env ZITADEL_DEFAULTINSTANCE_LOGINPOLICY_MFAINITSKIPLIFETIME="0s" \
--arg "start-from-init" --arg "--masterkey=MasterkeyNeedsToHave32Characters"
``` ```
## Access ZITADEL ## Access ZITADEL
@ -54,17 +59,9 @@ NAME URL LATEST AGE COND
zitadel http://zitadel.default.127.0.0.1.sslip.io zitadel-00001 10m 3 OK / 3 True zitadel http://zitadel.default.127.0.0.1.sslip.io zitadel-00001 10m 3 OK / 3 True
``` ```
Add the console path to the URL and open in browser Open your browser at http://zitadel.default.127.0.0.1.sslip.io/ui/console?login_hint=zitadel-admin@zitadel.zitadel.default.127.0.0.1.sslip.io and use the initial password _Password1!_
http://zitadel.default.127.0.0.1.sslip.io/ui/console
If you didn't configure something else, this is the default IAM admin users login:
* username: zitadel-admin@<span></span>zitadel.zitadel.default.127.0.0.1.sslip.io
* password: Password1!
<NoteInstanceNotFound/> <NoteInstanceNotFound/>
## VideoGuide
<iframe width="100%" height="315" src="https://www.youtube.com/embed/m3TXmz3cK7E" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<Next components={props.components} /> <Next components={props.components} />
<Disclaimer components={props.components} /> <Disclaimer components={props.components} />

View File

@ -1,36 +1,6 @@
import CodeBlock from '@theme/CodeBlock'; To run Zitadel on Kubernetes, use [the official Zitadel Helm chart](https://github.com/zitadel/zitadel-charts).
import ExampleZITADELValuesSource from '!!raw-loader!./example-zitadel-values.yaml' Configure Zitadel using native Helm values.
import ExampleZITADELValuesSecretsSource from '!!raw-loader!./example-zitadel-values-secrets.yaml' You can manage secrets through Helm values, letting Helm create Kubernetes secrets.
Alternatively, reference existing Kubernetes secrets managed outside of Helm.
By default, the chart installs a secure ZITADEL and CockroachDB. See the [referenced secrets example](https://github.com/zitadel/zitadel-charts/tree/main/examples/3-referenced-secrets) in the charts */examples* folder.
The example files makes an insecure ZITADEL accessible by port forwarding the ZITADEL service to localhost. For a quick setup, check out the [insecure Postgres example](https://github.com/zitadel/zitadel-charts/tree/main/examples/1-insecure-postgres).
For more configuration options, [go to the chart repo descriptions](https://github.com/zitadel/zitadel-charts).
For a secure installation with Docker Compose, [go to the loadbalancing example](/self-hosting/deploy/loadbalancing-example)
By executing the commands below, you will download the following files:
<details>
<summary>example-zitadel-values.yaml</summary>
<CodeBlock language="yaml">{ExampleZITADELValuesSource}</CodeBlock>
</details>
<details>
<summary>example-zitadel-values-secrets.yaml</summary>
<CodeBlock language="yaml">{ExampleZITADELValuesSecretsSource}</CodeBlock>
</details>
```bash
# Download and adjust the example configuration file containing standard configuration
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/manage/configure/example-zitadel-values.yaml
# Download and adjust the example configuration file containing secret configuration
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/manage/configure/example-zitadel-values-secrets.yaml
# Install an insecure zitadel release that works with localhost
helm install --namespace zitadel --create-namespace my-zitadel zitadel/zitadel \
--values ./example-zitadel-values.yaml \
--values ./example-zitadel-values-secrets.yaml
# Forward the ZITADEL service port to your local machine
kubectl --namespace zitadel port-forward svc/my-zitadel 8080:80
```

View File

@ -0,0 +1 @@
Open your favorite internet browser at http://localhost:8080/ui/console?login_hint=root@zitadel.localhost and use the password _RootPassword1!_

View File

@ -8,6 +8,7 @@ import TabItem from "@theme/TabItem";
import LinuxUnix from "./_linuxunix.mdx"; import LinuxUnix from "./_linuxunix.mdx";
import Compose from "./_compose.mdx"; import Compose from "./_compose.mdx";
import Helm from "./_helm.mdx"; import Helm from "./_helm.mdx";
import Login from "./_login.md";
import CodeBlock from "@theme/CodeBlock"; import CodeBlock from "@theme/CodeBlock";
import DefaultsYamlSource from "!!raw-loader!./defaults.yaml"; import DefaultsYamlSource from "!!raw-loader!./defaults.yaml";
import StepsYamlSource from "!!raw-loader!./steps.yaml"; import StepsYamlSource from "!!raw-loader!./steps.yaml";
@ -90,21 +91,17 @@ There are three ways to pass the masterkey to the `zitadel` binary:
> >
<TabItem value="linuxunix"> <TabItem value="linuxunix">
<LinuxUnix /> <LinuxUnix />
<Login/>
</TabItem> </TabItem>
<TabItem value="compose"> <TabItem value="compose">
<Compose /> <Compose />
<Login/>
</TabItem> </TabItem>
<TabItem value="k8s"> <TabItem value="k8s">
<Helm /> <Helm />
</TabItem> </TabItem>
</Tabs> </Tabs>
Open your favorite internet browser at [http://localhost:8080/ui/console](http://localhost:8080/ui/console).
This is the IAM admin users login according to your configuration in the [example-zitadel-init-steps.yaml](./example-zitadel-init-steps.yaml):
- **username**: _root@<span></span>zitadel.localhost_
- **password**: _RootPassword1!_
## What's next ## What's next
- Read more about [the login process](/guides/integrate/login/login-users). - Read more about [the login process](/guides/integrate/login/login-users).

View File

@ -11,9 +11,12 @@ services:
- "./example-zitadel-config.yaml:/example-zitadel-config.yaml:ro" - "./example-zitadel-config.yaml:/example-zitadel-config.yaml:ro"
- "./example-zitadel-secrets.yaml:/example-zitadel-secrets.yaml:ro" - "./example-zitadel-secrets.yaml:/example-zitadel-secrets.yaml:ro"
- "./example-zitadel-init-steps.yaml:/example-zitadel-init-steps.yaml:ro" - "./example-zitadel-init-steps.yaml:/example-zitadel-init-steps.yaml:ro"
depends_on:
db:
condition: "service_healthy"
db: db:
image: postgres:16-alpine image: postgres:17-alpine
restart: always restart: always
environment: environment:
- POSTGRES_USER=root - POSTGRES_USER=root
@ -25,7 +28,7 @@ services:
interval: 10s interval: 10s
timeout: 60s timeout: 60s
retries: 5 retries: 5
start_period: 10s start_period: 10s
volumes: volumes:
- 'data:/var/lib/postgresql/data:rw' - 'data:/var/lib/postgresql/data:rw'
@ -34,3 +37,4 @@ networks:
volumes: volumes:
data: data:

View File

@ -1,16 +0,0 @@
# All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
zitadel:
masterkey: 'MasterkeyNeedsToHave32Characters'
secretConfig:
Database:
postgres:
User:
# If the user doesn't exist already, it is created
Username: 'root'
Password: 'Secret_DB_User_Password'
Admin:
Username: 'root'
Password: ''

View File

@ -1,17 +0,0 @@
# All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
zitadel:
configmapConfig:
Log:
Level: 'info'
# Make ZITADEL accessible over HTTP, not HTTPS
ExternalSecure: false
ExternalDomain: localhost
# the configmap is also passed to the zitadel binary via the --steps flag
FirstInstance:
Org:
Human:
# use the loginname root@zitadel.localhost
Username: 'root'
Password: 'RootPassword1!'