docs: installation (#3646)

* improve compose

* docs: installation v2

* ZITADEL_EXTERNALSECURE=true is sane

* Update docs/docs/guides/installation/configuration.mdx

* docs: download binaries from command line

* docs: better config description

* fixes

* default instance

* default user credentials

Co-authored-by: Livio Amstutz <livio.a@gmail.com>
This commit is contained in:
Elio Bischof 2022-05-18 14:42:13 +02:00 committed by GitHub
parent 17baf252dd
commit c8300b61b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 207 additions and 749 deletions

View File

@ -1,48 +0,0 @@
version: '3.8'
services:
db:
restart: always
networks:
- zitadel
image: cockroachdb/cockroach:v21.2.5
command: start-single-node --insecure --listen-addr=0.0.0.0
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
ports:
- 8080:8080
- 26257:26257
# starts the backend (API's) of ZITADEL
# Port 50001 serves the GRPC API
# Port 50002 serves the REST API
# Port 50003 serves the login GUI
backend-run:
restart: on-failure
networks:
- zitadel
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:50002/management/v1/healthz"]
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
depends_on:
db:
condition: service_healthy
build:
context: ../..
dockerfile: build/zitadel/Dockerfile
target: dev-go-build
environment:
- ZITADEL_EVENTSTORE_HOST=db
ports:
- 50001:50001
command:
[
'start',
]

View File

@ -1,66 +0,0 @@
#tracing is disabled locally
ZITADEL_TRACING_TYPE=none
#metrics is disabled locally
ZITADEL_METRICS_TYPE=none
#recommended log level for local is debug
ZITADEL_LOG_LEVEL=debug
#database connection (cockroach insecure)
ZITADEL_EVENTSTORE_HOST=localhost
ZITADEL_EVENTSTORE_PORT=26257
CR_SSL_MODE=disable
#keys for cryptography
ZITADEL_KEY_PATH=.keys/local_keys.yaml
ZITADEL_USER_VERIFICATION_KEY=userverificationkey_1
ZITADEL_OTP_VERIFICATION_KEY=OTPVerificationKey_1
ZITADEL_OIDC_KEYS_ID=oidckey_1
ZITADEL_COOKIE_KEY=cookiekey_1
ZITADEL_CSRF_KEY=cookiekey_1
ZITADEL_IDP_CONFIG_VERIFICATION_KEY=idpconfigverificationkey_1
ZITADEL_DOMAIN_VERIFICATION_KEY=domainverificationkey_1
#debug mode is used for notifications
DEBUG_MODE=true
#used in the oidc library
#true enables usage of (insecure) http for localhost as issuer
CAOS_OIDC_DEV=true
#sets the cookies insecure in login (never use this in production!)
ZITADEL_CSRF_DEV=true
LOG_NOTIFICATIONS_ENABLED=true
LOG_NOTIFICATIONS_COMPACT=true
FS_NOTIFICATIONS_ENABLED=true
FS_NOTIFICATIONS_PATH=./.notifications
FS_NOTIFICATIONS_COMPACT=false
CHAT_ENABLED=false
#configuration for api/browser calls
ZITADEL_DEFAULT_DOMAIN=localhost
ZITADEL_ISSUER=http://localhost:50002/oauth/v2
ZITADEL_ACCOUNTS=http://localhost:50003/login
ZITADEL_AUTHORIZE=http://localhost:50002/oauth/v2
ZITADEL_OAUTH=http://localhost:50002/oauth/v2
ZITADEL_CONSOLE=http://localhost:4200
ZITADEL_COOKIE_DOMAIN=localhost
ZITADEL_API_DOMAIN=http://localhost:50002
#caching is used in UI's and API's
ZITADEL_CACHE_MAXAGE=12h
ZITADEL_CACHE_SHARED_MAXAGE=168h
ZITADEL_SHORT_CACHE_MAXAGE=5m
ZITADEL_SHORT_CACHE_SHARED_MAXAGE=15m
#console authorization configuration
ZITADEL_CONSOLE_RESPONSE_TYPE=CODE
ZITADEL_CONSOLE_GRANT_TYPE=AUTHORIZATION_CODE
ZITADEL_CONSOLE_DEV_MODE=true
ZITADEL_CONSOLE_DIR=console/src/assets/
#minio configuration
ZITADEL_ASSET_STORAGE_ACCESS_KEY_ID=access_key
ZITADEL_ASSET_STORAGE_SECRET_ACCESS_KEY=secret_key
ZITADEL_ASSET_STORAGE_TYPE=s3
ZITADEL_ASSET_STORAGE_ENDPOINT=minio:9000
ZITADEL_ASSET_STORAGE_BUCKET_PREFIX=local

View File

@ -0,0 +1,46 @@
version: '3.8'
services:
zitadel:
restart: always
networks:
- zitadel
image: ghcr.io/zitadel/zitadel:v1.80.0-v2.20-amd64
command: admin start-from-init --masterkeyFromEnv
environment:
- ZITADEL_MASTERKEY=${ZITADEL_MASTERKEY}
- ZITADEL_DATABASE_HOST=db
- ZITADEL_EXTERNALSECURE=false
# TODO: Use the following variables as defaults
- ZITADEL_EXTERNALDOMAIN=localhost
- ZITADEL_S3DEFAULTINSTANCE_CUSTOMDOMAIN=localhost
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/debug"]
interval: 10s
timeout: 30s
retries: 5
start_period: 40s
depends_on:
db:
condition: service_healthy
ports:
- 8080:8080
db:
restart: always
networks:
- zitadel
image: cockroachdb/cockroach:v21.2.8
command: start-single-node --insecure --listen-addr=0.0.0.0
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
interval: 10s
timeout: 30s
retries: 5
start_period: 20s
ports:
- 9090:8080
- 26257:26257
networks:
zitadel:

View File

@ -1,38 +0,0 @@
---
title: ZITADEL Checkup
---
We provide services to check the setup of our ZITADEL with the operators also provided by us.
### In Scope
- Check prerequisites and architecture
- Check configuration for ZITADEL and ORBOS
- Functional testing of the ZITADEL instance
### Out of Scope
- Integration of external S3-storage or other types of storage
- Integration into internal monitoring and alerting
- Changes for specific environments
- Performance testing
- Application-side coding, configuration, or tuning
- Changes or configuration on assets used in ZITADEL
- Setting up or maintaining backup storage
### Prerequisites
- Access to relevant Kubernetes cluster or physical/virtual nodes
- Access to relevant asset, backup and provisioned storage
- Environment to test and check (should not be in productive use)
- Access to used configuration
- Access to relevant monitoring
- Access to relevant system and application logs
### Deliverable
- Document detailing findings and description of the suggested configuration changes
### Time Estimate
10 hours

View File

@ -0,0 +1,10 @@
```bash
# Generate a master encryption key
export ZITADEL_MASTERKEY="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)"
# Download the docker compose example configuration
wget https://raw.githubusercontent.com/zitadel/zitadel/v2-alpha/deploy/compose/docker-compose.yml
# Run the database and application containers
docker compose up
```

View File

@ -0,0 +1,36 @@
## Overwrite ZITADEL Defaults
See a description of all possible configuration options with their defaults [in the source code](https://github.com/zitadel/zitadel/blob/v2/cmd/defaults.yaml).
You can overwrite these values using environment variables or configuration files.
### Configure by environment variables
Prefix the key with *ZITADEL\_*, uppercase the propery and join sections by an underscore _.
For example, if you want to configure the cockroach admin user name and password, export these two variables.
```bash
export ZITADEL_ADMINUSER_USERNAME="my_root_users_name"
export ZITADEL_ADMINUSER_PASSWORD="my_root_users_password"
```
### Configure by configuration files
Probably, you want to keep some configuration secret.
Fortunately, you can pass multiple configuration files to the zitadel command.
```bash
# Create a configuration file containing normal configuration
cat << EOF > ./zitadel-config.yaml
AdminUser:
Username: my_root_users_name
EOF
# Create a configuration file containing secrets
cat << EOF > ./zitadel-secrets.yaml
AdminUser:
Password: "my_root_users_password"
EOF
# Pass multiple config files using the --config argument
zitadel admin start-from-init --masterkeyFromEnv --config ./zitadel-config.yaml --config ./zitadel-secrets.yaml
```

View File

@ -1,67 +0,0 @@
---
title: CRD Mode on an existing Kubernetes cluster
---
:::tip What I need
I'd like to see an automatically operated ZITADEL instance running on my own [Kubernetes](https://kubernetes.io/) cluster
:::
First, download the template configuration files [database.yml](./templates/crd/database.yml) and [zitadel.yml](./templates/crd/zitadel.yml). Then adjust the values in database.yml and zitadel.yml to match your environment. Especially the values for the domain, cluster DNS, storage class, email and Twilio are important.
```bash
# Download the zitadelctl binary
curl -s https://api.github.com/repos/zitadel/zitadel/releases/latest | grep "browser_download_url.*zitadelctl-$(uname | awk '{print tolower($0)}')-amd64" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/zitadelctl && sudo chmod +x /usr/local/bin/zitadelctl && sudo chown $(id -u):$(id -g) /usr/local/bin/zitadelctl
sudo chmod +x /usr/local/bin/zitadelctl
sudo chown $(id -u):$(id -g) /usr/local/bin/zitadelctl
# Deploy the operators to the current-context of your ~/.kube/config file
zitadelctl takeoff
# As soon as the configuration is applied, the operators start their work
kubectl apply --filename ./database.yml,./zitadel.yml
# Write the encryption keys
cat EOF << zitadelctl writesecret zitadel.keys.existing --stdin
otpverificationkey_1: $(openssl rand -base64 22)
cookiekey_1: $(openssl rand -base64 22)
domainverificationkey_1: $(openssl rand -base64 22)
idpconfigverificationkey_1: $(openssl rand -base64 22)
oidckey_1: $(openssl rand -base64 22)
userverificationkey_1: $(openssl rand -base64 22)
EOF
# Write the Twiilio sender ID and auth token so that ZITADEL is able to send your users SMS.
TWILIO_SID=<My Twilio Sender ID>
TWILIO_AUTH_TOKEN=<My Twilio auth token>
zitadelctl writesecret zitadel.twiliosid.existing --value $SID
zitadelctl writesecret zitadel.twilioauthtoken.existing --value $TWILIO_AUTH_TOKEN
# Write your email relays app key so that ZITADEL is able to verify your users email addresses
EMAIL_APP_KEY=<My email relays app key>
zitadelctl writesecret zitadel.emailappkey.existing --value $EMAIL_APP_KEY
# Enjoy watching the zitadel pods becoming ready
watch "kubectl --namespace caos-zitadel get pods"
```
ZITADEL needs [gRPC-Web](https://grpc.io/docs/platforms/web/basics/) for client-server communication, which the widely spread [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/) doesn't support out-of-the-box but Ambassador does. If you don't have an [Ambassador Version 1.x](https://www.getambassador.io/docs/edge-stack/1.14/tutorials/getting-started/) running, we recommend you run it with our operator [BOOM](https://github.com/caos/orbos/blob/v4.0.0/docs/boom/boom.md).
Download the template configuration file [boom.yml](./templates/boom.yml). Then adjust the values in boom.yml to match your environment.
```bash
# Download the orbctl binary
curl -s https://api.github.com/repos/caos/orbos/releases/latest | grep "browser_download_url.*orbctl.$(uname).$(uname -m)" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/orbctl
sudo chmod +x /usr/local/bin/orbctl
sudo chown $(id -u):$(id -g) /usr/local/bin/orbctl
# Deploy the operator to the current-context of your ~/.kube/config file
orbctl takeoff
# As soon as the configuration is applied, BOOM starts its work
kubectl apply --filename ./boom.yml
# Enjoy watching the ambassador pod becoming ready
watch "kubectl --namespace caos-system get pods"
```
Congratulations, you can accept traffic at four new ZITADEL [subdomains](/docs/apis/introduction#domains) now.

View File

@ -1,55 +0,0 @@
---
title: GitOps Mode on an existing Kubernetes cluster
---
:::tip What I need
I'd like to have a reproducible ZITADEL environment and a pull-based configuration management for safe and comfortable day-two operations.
:::
First, copy the template files [database.yml](./templates/gitops/database.yml) and [zitadel.yml](./templates/gitops/zitadel.yml) to the root of a new git Repository. Then adjust the values in database.yml and zitadel.yml to match your environment. Especially the values for the domain, cluster DNS, storage class, email and Twilio are important.
Now open a terminal and execute the following commands.
```bash
# Download the zitadelctl binary
curl -s https://api.github.com/repos/zitadel/zitadel/releases/latest | grep "browser_download_url.*zitadelctl-$(uname | awk '{print tolower($0)}')-amd64" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/zitadelctl && sudo chmod +x /usr/local/bin/zitadelctl && sudo chown $(id -u):$(id -g) /usr/local/bin/zitadelctl
sudo chmod +x /usr/local/bin/zitadelctl
sudo chown $(id -u):$(id -g) /usr/local/bin/zitadelctl
# Create an orb file at ${HOME}/.orb/config
MY_GIT_REPO="git@github.com:me/my-orb.git"
zitadelctl --gitops configure --repourl ${MY_GIT_REPO} --masterkey "$(openssl rand -base64 21)"
# Write the Twiilio sender ID and auth token so that ZITADEL is able to send your users SMS.
TWILIO_SID=<My Twilio Sender ID>
TWILIO_AUTH_TOKEN=<My Twilio auth token>
zitadelctl --gitops writesecret zitadel.twiliosid.encrypted --value $SID
zitadelctl --gitops writesecret zitadel.twilioauthtoken.encrypted --value $TWILIO_AUTH_TOKEN
# Write your email relays app key so that ZITADEL is able to verify your users email addresses
EMAIL_APP_KEY=<My email relays app key>
zitadelctl --gitops writesecret zitadel.emailappkey.encrypted --value $EMAIL_APP_KEY
# Deploy the operators to the current-context of your ~/.kube/config file
zitadelctl --gitops takeoff
# Enjoy watching the zitadel pods becoming ready
watch "kubectl --namespace caos-zitadel get pods"
```
ZITADEL needs [gRPC-Web](https://grpc.io/docs/platforms/web/basics/) for client-server communication, which the widely spread [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/) doesn't support out-of-the-box but Ambassador does. If you don't have an [Ambassador Version 1.x](https://www.getambassador.io/docs/edge-stack/1.14/tutorials/getting-started/) running, we recommend you run it with our operator [BOOM](https://github.com/caos/orbos/blob/v4.0.0/docs/boom/boom.md). Do so by adding the template [boom.yml](./templates/boom.yml) to the root of your Repository and execute the following commands.
```bash
# Download the orbctl binary
curl -s https://api.github.com/repos/caos/orbos/releases/latest | grep "browser_download_url.*orbctl.$(uname).$(uname -m)" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/orbctl
sudo chmod +x /usr/local/bin/orbctl
sudo chown $(id -u):$(id -g) /usr/local/bin/orbctl
# Deploy the operator to the current-context of your ~/.kube/config file
orbctl --gitops takeoff
# Enjoy watching the ambassador pod becoming ready
watch "kubectl --namespace caos-system get pods"
```
Congratulations, you can accept traffic at four new ZITADEL [subdomains](/docs/apis/introduction#domains) now.

View File

@ -0,0 +1 @@
Coming soon

View File

@ -0,0 +1,55 @@
---
title: Installation
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import Linux from './linux.mdx'
import MacOS from './macos.mdx'
import Windows from './windows.mdx'
import Compose from './compose.mdx'
import Helm from './helm.mdx'
import Configuration from './configuration.mdx'
import OpenLocal from './openlocal.mdx'
# Run ZITADEL
Choose your platform and run ZITADEL with the most minimal configuration possible.
<Tabs
groupId="installation-target"
default="saas"
values={[
{'label': 'SaaS', 'value': 'saas'},
{'label': 'Linux', 'value': 'linux'},
{'label': 'MacOS', 'value': 'macos'},
{'label': 'Windows', 'value': 'windows'},
{'label': 'Docker Compose', 'value': 'compose'},
{'label': 'Kubernetes', 'value': 'k8s'}
]}
>
<TabItem value="saas">
Try our <a href="https://zitadel.com">SaaS offering</a>. The first 25k requests are free.
</TabItem>
<TabItem value="linux">
<Linux/>
<Configuration/>
<OpenLocal/>
</TabItem>
<TabItem value="macos">
<MacOS/>
<Configuration/>
<OpenLocal/>
</TabItem>
<TabItem value="windows">
<Windows/>
</TabItem>
<TabItem value="compose">
<Compose/>
<Configuration/>
<OpenLocal/>
</TabItem>
<TabItem value="k8s">
<Helm/>
</TabItem>
</Tabs>

View File

@ -0,0 +1,26 @@
## Download The CockroachDB binary
Download a `cockroach` binary as described [here](https://www.cockroachlabs.com/docs/v21.2/install-cockroachdb).
ZITADEL is tested against CockroachDB v21.2.8.
## Run CockroachDB and ZITADEL
```bash
# Run a CockroachDB instance
cockroach start-single-node --insecure --background --http-addr :9090
# Configure your environment
# Generate a master encryption key
export ZITADEL_MASTERKEY="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)"
export ZITADEL_EXTERNALSECURE=false
export ZITADEL_EXTERNALDOMAIN=localhost
export ZITADEL_DEFAULTINSTANCE_CUSTOMDOMAIN=localhost
# Download the zitadel binary
curl -s https://api.github.com/repos/zitadel/zitadel/releases/tags/v2.0.0-v2-alpha.1 | grep "browser_download_url.*zitadel_.*_linux_$(dpkg --print-architecture)" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/zitadel && sudo chmod +x /usr/local/bin/zitadel && sudo chown $(id -u):$(id -g) /usr/local/bin/zitadel
sudo chmod +x /usr/local/bin/zitadel
sudo chown $(id -u):$(id -g) /usr/local/bin/zitadel
# Run the database and application containers
zitadel admin start-from-init --masterkeyFromEnv
```

View File

@ -0,0 +1,27 @@
## Download The CockroachDB binary
Download a `cockroach` binary as described [here](https://www.cockroachlabs.com/docs/v21.2/install-cockroachdb).
ZITADEL is tested against CockroachDB v21.2.8.
## Run CockroachDB and ZITADEL
```bash
# Run a CockroachDB instance
cockroach start-single-node --insecure --background --http-addr :9090
# Configure your environment
# Generate a master encryption key
export ZITADEL_MASTERKEY="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)"
export ZITADEL_EXTERNALSECURE=false
export ZITADEL_EXTERNALDOMAIN=localhost
export ZITADEL_DEFAULTINSTANCE_CUSTOMDOMAIN=localhost
export MY_ARCHITECTURE="arm64"
# Download the zitadel binary
curl -s https://api.github.com/repos/zitadel/zitadel/releases/tags/v2.0.0-v2-alpha.1 | grep "browser_download_url.*zitadel_.*_darwin_${MY_ARCHITECTURE}" | cut -d '"' -f 4 | sudo wget -i - -O /usr/local/bin/zitadel && sudo chmod +x /usr/local/bin/zitadel && sudo chown $(id -u):$(id -g) /usr/local/bin/zitadel
sudo chmod +x /usr/local/bin/zitadel
sudo chown $(id -u):$(id -g) /usr/local/bin/zitadel
# Run the database and application containers
zitadel admin start-from-init --masterkeyFromEnv
```

View File

@ -1,65 +0,0 @@
---
title: Managed Dedicated Instance
---
:::tip What I need
I'd like to simply use ZITADEL without having to take care of any operational tasks, yet keeping control over all its data.
:::
CAOS bootstraps and maintains a new ZITADEL instance just for you. This includes its underlying infrastructure with Kubernetes on top of it as well as monitoring tools and an API gateway. Contact us at <hi@zitadel.ch> for purchasing ZITADEL Enterprise Cloud.
# Prerequisites
Depending on the infrastructure provider you choose, you need to ensure some prerequisites.
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
<Tabs
defaultValue="gce"
values={[
{label: 'Google Compute Engine', value: 'gce'},
{label: 'Cloudscale', value: 'cs'},
{label: 'Static Provider', value: 'static'},
]}>
<TabItem value="gce">
<ul>
<li>
A JSON key for the infrastructure management to authenticate with a service account named orbiter-system assigned with the following roles
<ul>
<li>roles/compute.admin</li>
<li>roles/iap.tunnelResourceAccessor</li>
<li>roles/serviceusage.serviceUsageAdmin</li>
<li>roles/iam.serviceAccountUser</li>
</ul>
</li>
<li>
A JSON key for the backups storage to authenticate with a service account assigned with the role roles/storage.admin
<ul>
<li>roles/storage.admin</li>
</ul>
</li>
</ul>
</TabItem>
<TabItem value="cs">
<ul>
<li>A Cloudscale API token</li>
</ul>
</TabItem>
<TabItem value="static">
<ul>
<li>A List of available CentOS 7 machines with their IP addresses</li>
<li>A passwordless sudo user called orbiter on all machines</li>
<li>A Bootstrap SSH key to login as orbiter (are replaced by ORBITER)</li>
<li>A storage solution (contact us)</li>
</ul>
</TabItem>
</Tabs>
- We need you to point [four DNS subdomains](/docs/apis/introduction#domains) to the CAOS-generated IP address.
- For being able to send SMS, we need a Twilio sender name, SID and an auth token.
- ZITADEL also needs to connect to an email relay of your choice. We need the SMTP host, user and app key as well as the ZITADEL emails sender address and name.
If you give us a Cloudflare user, an API key and a user service key, we can also manage the DNS entries, wildcard certificate and the IP whitelisting automatically using the Cloudflare API.
Metrics, logs and traces are collected and monitored by CAOS.

View File

@ -0,0 +1,4 @@
Open your favorite internet browser and navigate to [http://localhost:8080/ui/console](http://localhost:8080/ui/console).
Log in as default admin user:
- **username**: *zitadel-admin@<span></span>zitadel.localhost*
- **password**: *Password1!*

View File

@ -1,9 +0,0 @@
---
title: GitOps Mode on dedicated Kubernetes Clusters using ORBOS
---
:::tip What I need
I'd like to avoid platform failures caused by other applications or environments to have an impact on ZITADEL's production availability. So I run a near-identical dedicated GitOps managed Kubernetes cluster for each ZITADEL environment and region for proper isolation.
:::
> This will be added later on

View File

@ -1,43 +0,0 @@
---
title: ZITADEL setup with ORBOS
---
We provide services to setup our ORBOS and ZITADEL with the operators also provided by us.
### In Scope
- Check prerequisites and architecture
- Setup of VMs, Loadbalancing and Kubernetes with [ORBOS](https://github.com/caos/orbos)
- Setup of in-cluster toolset with ORBOS, which includes monitoring and an API gateway (Ambassador)
- Installation and configuration of ZITADEL with the ZITADEL-operator
- Installation and configuration of CockroachDB with the Database-operator
- Functional testing of the ZITADEL instance
### Out of Scope
- Integration of external S3-storage or other types of storage
- Integration into internal monitoring and alerting
- Multi-cluster architecture deployments
- Changes for specific environments
- Performance testing
- Production deployment
- Application-side coding, configuration, or tuning
- Changes or configuration on assets used in ZITADEL
- Setting up or maintaining backup storage
### Prerequisites
- S3-storage for assets in ZITADEL
- S3-storage or Google Cloud Bucket for backups of the database
- [Prerequisites listed for a managed instance](/docs/guides/installation/managed-dedicated-instance)
### Deliverable
- Running Kubernetes
- Running toolset for monitoring and alerting
- Running CockroachDB
- Running ZITADEL
- Running backups for ZITADEL
### Time Estimate
12 hours

View File

@ -1,45 +0,0 @@
---
title: ZITADEL setup
---
We provide services to setup our ZITADEL with the operators also provided by us.
### In Scope
- Check prerequisites and architecture
- Installation and configuration of ZITADEL with the ZITADEL-operator
- Installation and configuration of CockroachDB with the Database-operator
- Functional testing of the ZITADEL instance
### Out of Scope
- Running multiple ZITADEL instances on the same cluster
- Integration into internal monitoring and alerting
- Multi-cluster architecture deployments
- DNS, Network and Firewall configuration
- Kubernetes configuration
- Changes for specific environments
- Performance testing
- Production deployment
- Application-side coding, configuration, or tuning
- Changes or configuration on assets used in ZITADEL
- Setting up or maintaining backup storage
### Prerequisites
- Running Kubernetes with possibility to deploy to namespaces caos-system and caos-zitadel
- Volume provisioner for Kubernetes to fill Persistent Volume Claims
- S3-storage for assets in ZITADEL
- S3-storage or Google Cloud Bucket for backups of the database
- Inbound and outbound gRPC-Web traffic possible(for example not natively supported by nginx)
- [Prerequisites listed for a managed instance, limited to functionality for ZITADEL](/docs/guides/installation/managed-dedicated-instance)
### Deliverable
- Running CockroachDB
- Running ZITADEL
- Running backups for ZITADEL
### Time Estimate
8 hours

View File

@ -1,13 +0,0 @@
---
title: Shared Cloud at zitadel.ch
---
:::tip What I need
I'd like to simply use ZITADEL without having to take care of any operational tasks.
:::
Just register your ZITADEL instance in the ZITADEL Customer Portal //TODO: AddLink
Per default you will start in the [ZITADEL Free](https://zitadel.com/pricing) Tier, which already includes all the features.
You will immediately be able to integrate as many applications with as many users as you want. Serve your users multiple secure login methods for free.
[Jump](../basics/get-started) to the more detailed docs.

View File

@ -1,37 +0,0 @@
apiVersion: caos.ch/v1
kind: Boom
metadata:
name: boom
namespace: caos-system
spec:
boom:
version: v4.0.0
forceApply: true
currentStatePath: caos-internal/boom
apiGateway:
deploy: true
replicaCount: 1
proxyProtocol: true
metricCollection:
deploy: false
logCollection:
deploy: false
nodeMetricsExporter:
deploy: false
systemdMetricsExporter:
deploy: false
monitoring:
deploy: false
kubeMetricsExporter:
deploy: false
reconciling:
deploy: false
metricsPersisting:
deploy: false
logsPersisting:
deploy: false
metricsServer:
deploy: false
preApply:
deploy: false
folder: preapply

View File

@ -1,53 +0,0 @@
apiVersion: caos.ch/v1
kind: Database
metadata:
# This value must be database
name: database
# This value must be caos-system
namespace: caos-system
spec:
kind: databases.caos.ch/Orb
version: v0
spec:
# Print debug logs
verbose: false
# This is the version of the database operator deployment
version: 1.0.0
# If true, the operator overwrites its own deployment using the version above
selfReconciling: true
database:
kind: databases.caos.ch/CockroachDB
version: v0
spec:
# Resources for the database pods
resources:
limits:
memory: 8Gi
requests:
cpu: 1
memory: 6Gi
# Print debug logs
verbose: false
# Number of database pods
replicaCount: 1
# PVC storage request
storageCapacity: 368Gi
# PVC storage class
storageClass: fast
# This value must match with your cluster DNS
clusterDNS: cluster.local
# If empty, the database isn't backuped
backups: {}
# bucket:
# kind: databases.caos.ch/BucketBackup
# version: v0
# spec:
# # Print debug logs
# verbose: true
# # Schedule periodic backups
# cron: 0 * * * *
# # Specify the S3 bucket name
# bucket: dummy
# # The Google service accounts json key to use
# # Use the zitadelctl writesecret command
# serviceAccountJSON: {}

View File

@ -1,74 +0,0 @@
apiVersion: caos.ch/v1
kind: Zitadel
metadata:
# This value must be zitadel
name: zitadel
# This value must be caos-system
namespace: caos-system
spec:
kind: zitadel.caos.ch/Orb
version: v0
spec:
# Print debug logs
verbose: false
# This is the version of the ZITADEL operator deployment and the ZITADEL binary deployment
version: 1.0.0
# If true, the operator overwrites its own deployment using the version above
selfReconciling: true
iam:
kind: zitadel.caos.ch/ZITADEL
version: v0
spec:
# Print debug logs
verbose: false
# If true, Kubernetes resources are applied forcefully
force: false
# Number of ZITADEL pods
replicaCount: 1
# ZITADEL deployment configuration
configuration:
cache:
maxAge: 12h
sharedMaxAge: 168h
shortMaxAge: 5m
shortSharedMaxAge: 15m
# Keys ZITADEL uses for symmetric encryption
secrets:
existingKeys:
key: keys
name: zitadel
userVerificationID: userverificationkey_1
otpVerificationID: otpverificationkey_1
oidcKeysID: oidckey_1
cookieID: cookiekey_1
csrfID: cookiekey_1
domainVerificationID: domainverificationkey_1
idpConfigVerificationID: idpconfigverificationkey_1
notifications:
# Email configuration is used for sending verification emails
email:
smtpHost: smtp.gmail.com:465
smtpUser: dummy
senderAddress: dummy
senderName: dummy
tls: true
# Twilio configuration is used for sending second factor SMS
twilio:
senderName: dummy
# If true, no emails or SMS are sent to users
debugMode: true
# ZITADEL pods log level
logLevel: info
dns:
# The main domain, the ZITADEL APIs should be published at
domain: myzitadel.ch
# The tls wildcard certificate used for publishing the zitadel endpoints over HTTPS
tlsSecret: tls-cert-wildcard
# The subdomains, the ZITADEL APIs should be published at
subdomains:
accounts: accounts
api: api
console: console
issuer: issuer
# This value must match with your cluster DNS
clusterdns: cluster.local

View File

@ -1,45 +0,0 @@
kind: databases.caos.ch/Orb
version: v0
spec:
# Print debug logs
verbose: false
# This is the version of the database operator deployment
version: 1.0.0
# If true, the operator overwrites its own deployment using the version above
selfReconciling: true
database:
kind: databases.caos.ch/CockroachDB
version: v0
spec:
# Resources for the database pods
resources:
limits:
memory: 8Gi
requests:
cpu: 1
memory: 6Gi
# Print debug logs
verbose: false
# Number of database pods
replicaCount: 1
# PVC storage request
storageCapacity: 368Gi
# PVC storage class
storageClass: fast
# This value must match with your cluster DNS
clusterDNS: cluster.local
# If empty, the database isn't backuped
backups: {}
# bucket:
# kind: databases.caos.ch/BucketBackup
# version: v0
# spec:
# # Print debug logs
# verbose: true
# # Schedule periodic backups
# cron: 0 * * * *
# # Specify the S3 bucket name
# bucket: dummy
# # The Google service accounts json key to use
# # Use the zitadelctl writesecret command
# serviceAccountJSON: {}

View File

@ -1,54 +0,0 @@
kind: zitadel.caos.ch/Orb
version: v0
spec:
# Print debug logs
verbose: false
# This is the version of the ZITADEL operator deployment and the ZITADEL binary deployment
version: 1.0.0
# If true, the operator overwrites its own deployment using the version above
selfReconciling: true
iam:
kind: zitadel.caos.ch/ZITADEL
version: v0
spec:
# Print debug logs
verbose: false
# If true, Kubernetes resources are applied forcefully
force: false
# Number of zitadel pods
replicaCount: 1
# ZITADEL deployment configuration
configuration:
cache:
maxAge: 12h
sharedMaxAge: 168h
shortMaxAge: 5m
shortSharedMaxAge: 15m
notifications:
# Email configuration is used for sending verification emails
email:
smtpHost: smtp.gmail.com:465
smtpUser: dummy
senderAddress: dummy
senderName: dummy
tls: true
# Twilio configuration is used for sending second factor SMS
twilio:
senderName: dummy
# If true, no emails or SMS are sent to users
debugMode: true
# ZITADEL pods log level
logLevel: info
dns:
# The main domain, the ZITADEL APIs should be published at
domain: myzitadel.ch
# The tls wildcard certificate used for publishing the zitadel endpoints over HTTPS
tlsSecret: tls-cert-wildcard
# The subdomains, the ZITADEL APIs should be published at
subdomains:
accounts: accounts
api: api
console: console
issuer: issuer
# This value must match with your cluster DNS
clusterdns: cluster.local

View File

@ -0,0 +1 @@
Coming soon

View File

@ -36,6 +36,7 @@ module.exports = {
],
guides: [
"guides/overview",
"guides/installation/installation",
{
type: "category",
label: "Get to know ZITADEL",
@ -93,43 +94,6 @@ module.exports = {
collapsed: false,
items: ["guides/customization/branding", "guides/customization/texts"],
},
{
type: "category",
label: "Installation",
collapsed: true,
items: [
{
type: "category",
label: "CAOS Managed",
collapsed: true,
items: [
"guides/installation/shared-cloud",
"guides/installation/managed-dedicated-instance",
],
},
{
type: "category",
label: "CAOS Service Packages",
collapsed: true,
items: [
"guides/installation/setup",
"guides/installation/setup-orbos",
"guides/installation/checkup",
],
},
{
type: "category",
label: "Self Managed",
collapsed: true,
items: [
"guides/installation/crd",
"guides/installation/gitops",
"guides/installation/orbos",
],
},
],
},
{
type: "category",
label: "Trainings",