chore!: remove CockroachDB Support (#9444)

This PR removes CockroachDB from the list of supported databases for
Zitadel v3. This decision allows us to focus our development and support
efforts on PostgreSQL, ensuring a more robust and streamlined experience
for our users.

## Key Changes

End of Support: CockroachDB is no longer a supported database.
Focus on PostgreSQL: All future development and support efforts will be
directed towards PostgreSQL.

## Impact

Existing deployments using CockroachDB will need to migrate to
PostgreSQL.

CockroachDB-specific configurations will no longer be valid.

## Migration Guidance

Users currently relying on CockroachDB are encouraged to migrate to
PostgreSQL. We will provide documentation and tools to assist with this
process. Please refer to [the mirror
docs](https://zitadel.com/docs/self-hosting/manage/cli/mirror) for
detailed instructions.

Benefits:

* Simplified Development: Concentrating on a single database reduces
complexity and allows for more efficient development.
* Improved Support: Focusing on PostgreSQL enables us to provide better
support and documentation.
* Enhanced Performance: We can dedicate resources to optimizing
performance specifically for PostgreSQL.

## Testing

This change primarily affects deployment and configuration. Ensure that
your PostgreSQL deployments are functioning correctly after this change.

You can find more information about testing in [the mirror
guide](https://zitadel.com/docs/self-hosting/manage/cli/mirror)


## Additional info

- part of https://github.com/zitadel/zitadel/issues/9414
- docs are updated on a separate PR
- changes to the mirror command will be made on a separate PR

## Changes

* Pipelines use postgres
* no distinction of database type
* remove cockroachdb specific code
* added [embedded-postgres](github.com/fergusstrange/embedded-postgres)
for testing
* fail commands if cockroach is configured and print information linking
to the docs
This commit is contained in:
Silvan
2025-03-18 08:18:52 +01:00
committed by GitHub
parent 47a2ab5343
commit 29514961e8
289 changed files with 1374 additions and 2993 deletions

View File

@@ -76,7 +76,6 @@ jobs:
if: ${{ steps.cache.outputs.cache-hit != 'true' }} if: ${{ steps.cache.outputs.cache-hit != 'true' }}
env: env:
ZITADEL_MASTERKEY: MasterkeyNeedsToHave32Characters ZITADEL_MASTERKEY: MasterkeyNeedsToHave32Characters
INTEGRATION_DB_FLAVOR: postgres
run: make core_integration_test run: make core_integration_test
- -
name: upload server logs name: upload server logs
@@ -102,71 +101,3 @@ jobs:
with: with:
key: integration-test-postgres-${{ inputs.core_cache_key }} key: integration-test-postgres-${{ inputs.core_cache_key }}
path: ${{ steps.go-cache-path.outputs.GO_CACHE_PATH }} path: ${{ steps.go-cache-path.outputs.GO_CACHE_PATH }}
# TODO: produces the following output: ERROR: unknown command "cockroach start-single-node --insecure" for "cockroach"
# cockroach:
# runs-on: ubuntu-latest
# services:
# cockroach:
# image: cockroachdb/cockroach:latest
# ports:
# - 26257:26257
# - 8080:8080
# env:
# COCKROACH_ARGS: "start-single-node --insecure"
# options: >-
# --health-cmd "curl http://localhost:8080/health?ready=1 || exit 1"
# --health-interval 10s
# --health-timeout 5s
# --health-retries 5
# --health-start-period 10s
# steps:
# -
# uses: actions/checkout@v4
# -
# uses: actions/setup-go@v5
# with:
# go-version: ${{ inputs.go_version }}
# -
# uses: actions/cache/restore@v4
# timeout-minutes: 1
# name: restore core
# with:
# path: ${{ inputs.core_cache_path }}
# key: ${{ inputs.core_cache_key }}
# fail-on-cache-miss: true
# -
# id: go-cache-path
# name: set cache path
# run: echo "GO_CACHE_PATH=$(go env GOCACHE)" >> $GITHUB_OUTPUT
# -
# uses: actions/cache/restore@v4
# id: cache
# timeout-minutes: 1
# name: restore previous results
# with:
# key: integration-test-crdb-${{ inputs.core_cache_key }}
# restore-keys: |
# integration-test-crdb-core-
# path: ${{ steps.go-cache-path.outputs.GO_CACHE_PATH }}
# -
# name: test
# if: ${{ steps.cache.outputs.cache-hit != 'true' }}
# env:
# ZITADEL_MASTERKEY: MasterkeyNeedsToHave32Characters
# INTEGRATION_DB_FLAVOR: cockroach
# run: make core_integration_test
# -
# name: publish coverage
# uses: codecov/codecov-action@v4.3.0
# with:
# file: profile.cov
# name: core-integration-tests-cockroach
# flags: core-integration-tests-cockroach
# -
# uses: actions/cache/save@v4
# name: cache results
# if: ${{ steps.cache.outputs.cache-hit != 'true' }}
# with:
# key: integration-test-crdb-${{ inputs.core_cache_key }}
# path: ${{ steps.go-cache-path.outputs.GO_CACHE_PATH }}

View File

@@ -216,12 +216,6 @@ Integration tests are run as gRPC clients against a running ZITADEL server binar
The server binary is typically [build with coverage enabled](https://go.dev/doc/build-cover). The server binary is typically [build with coverage enabled](https://go.dev/doc/build-cover).
It is also possible to run a ZITADEL sever in a debugger and run the integrations tests like that. In order to run the server, a database is required. It is also possible to run a ZITADEL sever in a debugger and run the integrations tests like that. In order to run the server, a database is required.
The database flavor can **optionally** be set in the environment to `cockroach` or `postgres`. The default is `postgres`.
```bash
export INTEGRATION_DB_FLAVOR="cockroach"
```
In order to prepare the local system, the following will bring up the database, builds a coverage binary, initializes the database and starts the sever. In order to prepare the local system, the following will bring up the database, builds a coverage binary, initializes the database and starts the sever.
```bash ```bash

View File

@@ -8,10 +8,9 @@ COMMIT_SHA ?= $(shell git rev-parse HEAD)
ZITADEL_IMAGE ?= zitadel:local ZITADEL_IMAGE ?= zitadel:local
GOCOVERDIR = tmp/coverage GOCOVERDIR = tmp/coverage
INTEGRATION_DB_FLAVOR ?= postgres
ZITADEL_MASTERKEY ?= MasterkeyNeedsToHave32Characters ZITADEL_MASTERKEY ?= MasterkeyNeedsToHave32Characters
export GOCOVERDIR INTEGRATION_DB_FLAVOR ZITADEL_MASTERKEY export GOCOVERDIR ZITADEL_MASTERKEY
.PHONY: compile .PHONY: compile
compile: core_build console_build compile_pipeline compile: core_build console_build compile_pipeline
@@ -113,7 +112,7 @@ core_unit_test:
.PHONY: core_integration_db_up .PHONY: core_integration_db_up
core_integration_db_up: core_integration_db_up:
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} cache docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait cache
.PHONY: core_integration_db_down .PHONY: core_integration_db_down
core_integration_db_down: core_integration_db_down:
@@ -123,13 +122,13 @@ core_integration_db_down:
core_integration_setup: core_integration_setup:
go build -cover -race -tags integration -o zitadel.test main.go go build -cover -race -tags integration -o zitadel.test main.go
mkdir -p $${GOCOVERDIR} mkdir -p $${GOCOVERDIR}
GORACE="halt_on_error=1" ./zitadel.test init --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml GORACE="halt_on_error=1" ./zitadel.test init --config internal/integration/config/zitadel.yaml --config internal/integration/config/postgres.yaml
GORACE="halt_on_error=1" ./zitadel.test setup --masterkeyFromEnv --init-projections --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml --steps internal/integration/config/steps.yaml GORACE="halt_on_error=1" ./zitadel.test setup --masterkeyFromEnv --init-projections --config internal/integration/config/zitadel.yaml --config internal/integration/config/postgres.yaml --steps internal/integration/config/steps.yaml
.PHONY: core_integration_server_start .PHONY: core_integration_server_start
core_integration_server_start: core_integration_setup core_integration_server_start: core_integration_setup
GORACE="log_path=tmp/race.log" \ GORACE="log_path=tmp/race.log" \
./zitadel.test start --masterkeyFromEnv --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml \ ./zitadel.test start --masterkeyFromEnv --config internal/integration/config/zitadel.yaml --config internal/integration/config/postgres.yaml \
> tmp/zitadel.log 2>&1 \ > tmp/zitadel.log 2>&1 \
& printf $$! > tmp/zitadel.pid & printf $$! > tmp/zitadel.pid

View File

@@ -199,7 +199,6 @@ ENV PATH="/go/bin:/usr/local/go/bin:${PATH}"
WORKDIR /go/src/github.com/zitadel/zitadel WORKDIR /go/src/github.com/zitadel/zitadel
# default vars # default vars
ENV DB_FLAVOR=postgres
ENV POSTGRES_USER=zitadel ENV POSTGRES_USER=zitadel
ENV POSTGRES_DB=zitadel ENV POSTGRES_DB=zitadel
ENV POSTGRES_PASSWORD=postgres ENV POSTGRES_PASSWORD=postgres
@@ -231,12 +230,6 @@ COPY --from=test-core-unit /go/src/github.com/zitadel/zitadel/profile.cov /cover
# integration test core # integration test core
# ####################################### # #######################################
FROM test-core-base AS test-core-integration FROM test-core-base AS test-core-integration
ENV DB_FLAVOR=cockroach
# install cockroach
COPY --from=cockroachdb/cockroach:latest /cockroach/cockroach /usr/local/bin/
ENV COCKROACH_BINARY=/cockroach/cockroach
ENV ZITADEL_MASTERKEY=MasterkeyNeedsToHave32Characters ENV ZITADEL_MASTERKEY=MasterkeyNeedsToHave32Characters
COPY build/core-integration-test.sh /usr/local/bin/run-tests.sh COPY build/core-integration-test.sh /usr/local/bin/run-tests.sh

View File

@@ -110,67 +110,36 @@ PublicHostHeaders: # ZITADEL_PUBLICHOSTHEADERS
WebAuthNName: ZITADEL # ZITADEL_WEBAUTHNNAME WebAuthNName: ZITADEL # ZITADEL_WEBAUTHNNAME
Database: Database:
# CockroachDB is the default database of ZITADEL # Postgres is the default database of ZITADEL
cockroach: postgres:
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST Host: localhost # ZITADEL_DATABASE_POSTGRES_HOST
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT Port: 5432 # ZITADEL_DATABASE_POSTGRES_PORT
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE Database: zitadel # ZITADEL_DATABASE_POSTGRES_DATABASE
MaxOpenConns: 5 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS MaxOpenConns: 5 # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
MaxIdleConns: 2 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS MaxIdleConns: 2 # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME MaxConnLifetime: 30m # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME MaxConnIdleTime: 5m # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS Options: "" # ZITADEL_DATABASE_POSTGRES_OPTIONS
User: User:
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME Username: zitadel # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD Password: "" # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
SSL: SSL:
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE Mode: disable # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT RootCert: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT Cert: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY Key: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
Admin: Admin:
# By default, ExistingDatabase is not specified in the connection string # By default, ExistingDatabase is not specified in the connection string
# If the connection resolves to a database that is not existing in your system, configure an existing one here # If the connection resolves to a database that is not existing in your system, configure an existing one here
# It is used in zitadel init to connect to cockroach and create a dedicated database for ZITADEL.
ExistingDatabase: # ZITADEL_DATABASE_COCKROACH_ADMIN_EXISTINGDATABASE
Username: root # ZITADEL_DATABASE_COCKROACH_ADMIN_USERNAME
Password: "" # ZITADEL_DATABASE_COCKROACH_ADMIN_PASSWORD
SSL:
Mode: disable # ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_MODE
RootCert: "" # ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_ROOTCERT
Cert: "" # ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_CERT
Key: "" # ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres:
Host: # ZITADEL_DATABASE_POSTGRES_HOST
Port: # ZITADEL_DATABASE_POSTGRES_PORT
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
User:
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
SSL:
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
Admin:
# The default ExistingDatabase is postgres
# If your db system doesn't have a database named postgres, configure an existing database here
# It is used in zitadel init to connect to postgres and create a dedicated database for ZITADEL. # It is used in zitadel init to connect to postgres and create a dedicated database for ZITADEL.
ExistingDatabase: # ZITADEL_DATABASE_POSTGRES_ADMIN_EXISTINGDATABASE ExistingDatabase: # ZITADEL_DATABASE_POSTGRES_ADMIN_EXISTINGDATABASE
Username: # ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME Username: postgres # ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME
Password: # ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD Password: postgres # ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD
SSL: SSL:
Mode: # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE Mode: disable # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE
RootCert: # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_ROOTCERT RootCert: "" # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_ROOTCERT
Cert: # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_CERT Cert: "" # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_CERT
Key: # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_KEY Key: "" # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_KEY
# Caches are EXPERIMENTAL. The following config may have breaking changes in the future. # Caches are EXPERIMENTAL. The following config may have breaking changes in the future.
# If no config is provided, caching is disabled by default. # If no config is provided, caching is disabled by default.
@@ -444,7 +413,6 @@ Projections:
Notifications: Notifications:
# Notifications can be processed by either a sequential mode (legacy) or a new parallel mode. # Notifications can be processed by either a sequential mode (legacy) or a new parallel mode.
# The parallel mode is currently only recommended for Postgres databases. # The parallel mode is currently only recommended for Postgres databases.
# For CockroachDB, the sequential mode is recommended, see: https://github.com/zitadel/zitadel/issues/9002
# If legacy mode is enabled, the worker config below is ignored. # If legacy mode is enabled, the worker config below is ignored.
LegacyEnabled: true # ZITADEL_NOTIFICATIONS_LEGACYENABLED LegacyEnabled: true # ZITADEL_NOTIFICATIONS_LEGACYENABLED
# The amount of workers processing the notification request events. # The amount of workers processing the notification request events.

View File

@@ -19,7 +19,7 @@ func MustNewConfig(v *viper.Viper) *Config {
config := new(Config) config := new(Config)
err := v.Unmarshal(config, err := v.Unmarshal(config,
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc( viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
database.DecodeHook, database.DecodeHook(false),
mapstructure.TextUnmarshallerHookFunc(), mapstructure.TextUnmarshallerHookFunc(),
)), )),
) )

View File

@@ -12,20 +12,17 @@ import (
) )
var ( var (
//go:embed sql/cockroach/* //go:embed sql/*.sql
//go:embed sql/postgres/*
stmts embed.FS stmts embed.FS
createUserStmt string createUserStmt string
grantStmt string grantStmt string
settingsStmt string
databaseStmt string databaseStmt string
createEventstoreStmt string createEventstoreStmt string
createProjectionsStmt string createProjectionsStmt string
createSystemStmt string createSystemStmt string
createEncryptionKeysStmt string createEncryptionKeysStmt string
createEventsStmt string createEventsStmt string
createSystemSequenceStmt string
createUniqueConstraints string createUniqueConstraints string
roleAlreadyExistsCode = "42710" roleAlreadyExistsCode = "42710"
@@ -39,7 +36,7 @@ func New() *cobra.Command {
Long: `Sets up the minimum requirements to start ZITADEL. Long: `Sets up the minimum requirements to start ZITADEL.
Prerequisites: Prerequisites:
- database (PostgreSql or cockroachdb) - PostgreSql database
The user provided by flags needs privileges to The user provided by flags needs privileges to
- create the database if it does not exist - create the database if it does not exist
@@ -53,7 +50,7 @@ The user provided by flags needs privileges to
}, },
} }
cmd.AddCommand(newZitadel(), newDatabase(), newUser(), newGrant(), newSettings()) cmd.AddCommand(newZitadel(), newDatabase(), newUser(), newGrant())
return cmd return cmd
} }
@@ -62,7 +59,6 @@ func InitAll(ctx context.Context, config *Config) {
VerifyUser(config.Database.Username(), config.Database.Password()), VerifyUser(config.Database.Username(), config.Database.Password()),
VerifyDatabase(config.Database.DatabaseName()), VerifyDatabase(config.Database.DatabaseName()),
VerifyGrant(config.Database.DatabaseName(), config.Database.Username()), VerifyGrant(config.Database.DatabaseName(), config.Database.Username()),
VerifySettings(config.Database.DatabaseName(), config.Database.Username()),
) )
logging.OnError(err).Fatal("unable to initialize the database") logging.OnError(err).Fatal("unable to initialize the database")
@@ -73,7 +69,7 @@ func InitAll(ctx context.Context, config *Config) {
func initialise(ctx context.Context, config database.Config, steps ...func(context.Context, *database.DB) error) error { func initialise(ctx context.Context, config database.Config, steps ...func(context.Context, *database.DB) error) error {
logging.Info("initialization started") logging.Info("initialization started")
err := ReadStmts(config.Type()) err := ReadStmts()
if err != nil { if err != nil {
return err return err
} }
@@ -97,58 +93,48 @@ func Init(ctx context.Context, db *database.DB, steps ...func(context.Context, *
return nil return nil
} }
func ReadStmts(typ string) (err error) { func ReadStmts() (err error) {
createUserStmt, err = readStmt(typ, "01_user") createUserStmt, err = readStmt("01_user")
if err != nil { if err != nil {
return err return err
} }
databaseStmt, err = readStmt(typ, "02_database") databaseStmt, err = readStmt("02_database")
if err != nil { if err != nil {
return err return err
} }
grantStmt, err = readStmt(typ, "03_grant_user") grantStmt, err = readStmt("03_grant_user")
if err != nil { if err != nil {
return err return err
} }
createEventstoreStmt, err = readStmt(typ, "04_eventstore") createEventstoreStmt, err = readStmt("04_eventstore")
if err != nil { if err != nil {
return err return err
} }
createProjectionsStmt, err = readStmt(typ, "05_projections") createProjectionsStmt, err = readStmt("05_projections")
if err != nil { if err != nil {
return err return err
} }
createSystemStmt, err = readStmt(typ, "06_system") createSystemStmt, err = readStmt("06_system")
if err != nil { if err != nil {
return err return err
} }
createEncryptionKeysStmt, err = readStmt(typ, "07_encryption_keys_table") createEncryptionKeysStmt, err = readStmt("07_encryption_keys_table")
if err != nil { if err != nil {
return err return err
} }
createEventsStmt, err = readStmt(typ, "08_events_table") createEventsStmt, err = readStmt("08_events_table")
if err != nil { if err != nil {
return err return err
} }
createSystemSequenceStmt, err = readStmt(typ, "09_system_sequence") createUniqueConstraints, err = readStmt("10_unique_constraints_table")
if err != nil {
return err
}
createUniqueConstraints, err = readStmt(typ, "10_unique_constraints_table")
if err != nil {
return err
}
settingsStmt, err = readStmt(typ, "11_settings")
if err != nil { if err != nil {
return err return err
} }
@@ -156,7 +142,7 @@ func ReadStmts(typ string) (err error) {
return nil return nil
} }
func readStmt(typ, step string) (string, error) { func readStmt(step string) (string, error) {
stmt, err := stmts.ReadFile("sql/" + typ + "/" + step + ".sql") stmt, err := stmts.ReadFile("sql/" + step + ".sql")
return string(stmt), err return string(stmt), err
} }

View File

@@ -1,2 +1,2 @@
-- replace %[1]s with the name of the user -- replace %[1]s with the name of the user
CREATE USER IF NOT EXISTS "%[1]s" CREATE USER "%[1]s"

View File

@@ -1,2 +1,2 @@
-- replace %[1]s with the name of the database -- replace %[1]s with the name of the database
CREATE DATABASE IF NOT EXISTS "%[1]s"; CREATE DATABASE "%[1]s"

View File

@@ -11,6 +11,5 @@ The sql-files in this folder initialize the ZITADEL database and user. These obj
- 05_projections.sql: creates the schema needed to read the data - 05_projections.sql: creates the schema needed to read the data
- 06_system.sql: creates the schema needed for ZITADEL itself - 06_system.sql: creates the schema needed for ZITADEL itself
- 07_encryption_keys_table.sql: creates the table for encryption keys (for event data) - 07_encryption_keys_table.sql: creates the table for encryption keys (for event data)
- files 08_enable_hash_sharded_indexes.sql and 09_events_table.sql must run in the same session - 08_events_table.sql creates the table for eventsourcing
- 08_enable_hash_sharded_indexes.sql enables the [hash sharded index](https://www.cockroachlabs.com/docs/stable/hash-sharded-indexes.html) feature for this session - 10_unique_constraints_table.sql creates the table to check unique constraints for events
- 09_events_table.sql creates the table for eventsourcing

View File

@@ -1,4 +0,0 @@
-- replace the first %[1]s with the database
-- replace the second \%[2]s with the user
GRANT ALL ON DATABASE "%[1]s" TO "%[2]s";
GRANT SYSTEM VIEWACTIVITY TO "%[2]s";

View File

@@ -1,116 +0,0 @@
CREATE TABLE IF NOT EXISTS eventstore.events2 (
instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_type TEXT NOT NULL
, "sequence" BIGINT NOT NULL
, revision SMALLINT NOT NULL
, created_at TIMESTAMPTZ NOT NULL
, payload JSONB
, creator TEXT NOT NULL
, "owner" TEXT NOT NULL
, "position" DECIMAL NOT NULL
, in_tx_order INTEGER NOT NULL
, PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
, INDEX es_active_instances (created_at DESC) STORING ("position")
, INDEX es_wm (aggregate_id, instance_id, aggregate_type, event_type)
, INDEX es_projection (instance_id, aggregate_type, event_type, "position" DESC)
);
-- represents an event to be created.
CREATE TYPE IF NOT EXISTS eventstore.command AS (
instance_id TEXT
, aggregate_type TEXT
, aggregate_id TEXT
, command_type TEXT
, revision INT2
, payload JSONB
, creator TEXT
, owner TEXT
);
CREATE OR REPLACE FUNCTION eventstore.commands_to_events(commands eventstore.command[]) RETURNS SETOF eventstore.events2 VOLATILE AS $$
SELECT
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
, ("c").command_type AS event_type
, cs.sequence + ROW_NUMBER() OVER (PARTITION BY ("c").instance_id, ("c").aggregate_type, ("c").aggregate_id ORDER BY ("c").in_tx_order) AS sequence
, ("c").revision
, hlc_to_timestamp(cluster_logical_timestamp()) AS created_at
, ("c").payload
, ("c").creator
, cs.owner
, cluster_logical_timestamp() AS position
, ("c").in_tx_order
FROM (
SELECT
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
, ("c").command_type
, ("c").revision
, ("c").payload
, ("c").creator
, ("c").owner
, ROW_NUMBER() OVER () AS in_tx_order
FROM
UNNEST(commands) AS "c"
) AS "c"
JOIN (
SELECT
cmds.instance_id
, cmds.aggregate_type
, cmds.aggregate_id
, CASE WHEN (e.owner IS NOT NULL OR e.owner <> '') THEN e.owner ELSE command_owners.owner END AS owner
, COALESCE(MAX(e.sequence), 0) AS sequence
FROM (
SELECT DISTINCT
("cmds").instance_id
, ("cmds").aggregate_type
, ("cmds").aggregate_id
, ("cmds").owner
FROM UNNEST(commands) AS "cmds"
) AS cmds
LEFT JOIN eventstore.events2 AS e
ON cmds.instance_id = e.instance_id
AND cmds.aggregate_type = e.aggregate_type
AND cmds.aggregate_id = e.aggregate_id
JOIN (
SELECT
DISTINCT ON (
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
)
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
, ("c").owner
FROM
UNNEST(commands) AS "c"
) AS command_owners ON
cmds.instance_id = command_owners.instance_id
AND cmds.aggregate_type = command_owners.aggregate_type
AND cmds.aggregate_id = command_owners.aggregate_id
GROUP BY
cmds.instance_id
, cmds.aggregate_type
, cmds.aggregate_id
, 4 -- owner
) AS cs
ON ("c").instance_id = cs.instance_id
AND ("c").aggregate_type = cs.aggregate_type
AND ("c").aggregate_id = cs.aggregate_id
ORDER BY
in_tx_order
$$ LANGUAGE SQL;
CREATE OR REPLACE FUNCTION eventstore.push(commands eventstore.command[]) RETURNS SETOF eventstore.events2 AS $$
INSERT INTO eventstore.events2
SELECT * FROM eventstore.commands_to_events(commands)
RETURNING *
$$ LANGUAGE SQL;

View File

@@ -1 +0,0 @@
CREATE SEQUENCE IF NOT EXISTS eventstore.system_seq

View File

@@ -1,6 +0,0 @@
CREATE TABLE IF NOT EXISTS eventstore.unique_constraints (
instance_id TEXT,
unique_type TEXT,
unique_field TEXT,
PRIMARY KEY (instance_id, unique_type, unique_field)
)

View File

@@ -1,4 +0,0 @@
-- replace the first %[1]q with the database in double quotes
-- replace the second \%[2]q with the user in double quotes$
-- For more information see technical advisory 10009 (https://zitadel.com/docs/support/advisory/a10009)
ALTER ROLE %[2]q IN DATABASE %[1]q SET enable_durable_locking_for_serializable = on;

View File

@@ -1 +0,0 @@
CREATE USER "%[1]s"

View File

@@ -1 +0,0 @@
CREATE DATABASE "%[1]s"

View File

@@ -1,3 +0,0 @@
CREATE SCHEMA IF NOT EXISTS eventstore;
GRANT ALL ON ALL TABLES IN SCHEMA eventstore TO "%[1]s";

View File

@@ -1,3 +0,0 @@
CREATE SCHEMA IF NOT EXISTS projections;
GRANT ALL ON ALL TABLES IN SCHEMA projections TO "%[1]s";

View File

@@ -1,3 +0,0 @@
CREATE SCHEMA IF NOT EXISTS system;
GRANT ALL ON ALL TABLES IN SCHEMA system TO "%[1]s";

View File

@@ -1,6 +0,0 @@
CREATE TABLE IF NOT EXISTS system.encryption_keys (
id TEXT NOT NULL
, key TEXT NOT NULL
, PRIMARY KEY (id)
);

View File

@@ -1 +0,0 @@
CREATE SEQUENCE IF NOT EXISTS eventstore.system_seq;

View File

@@ -19,7 +19,7 @@ func newDatabase() *cobra.Command {
Long: `Sets up the ZITADEL database. Long: `Sets up the ZITADEL database.
Prerequisites: Prerequisites:
- cockroachDB or postgreSQL - postgreSQL
The user provided by flags needs privileges to The user provided by flags needs privileges to
- create the database if it does not exist - create the database if it does not exist

View File

@@ -8,7 +8,7 @@ import (
) )
func Test_verifyDB(t *testing.T) { func Test_verifyDB(t *testing.T) {
err := ReadStmts("cockroach") //TODO: check all dialects err := ReadStmts()
if err != nil { if err != nil {
t.Errorf("unable to read stmts: %v", err) t.Errorf("unable to read stmts: %v", err)
t.FailNow() t.FailNow()
@@ -27,7 +27,7 @@ func Test_verifyDB(t *testing.T) {
name: "doesn't exists, create fails", name: "doesn't exists, create fails",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE IF NOT EXISTS \"zitadel\"", sql.ErrTxDone), expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", sql.ErrTxDone),
), ),
database: "zitadel", database: "zitadel",
}, },
@@ -37,7 +37,7 @@ func Test_verifyDB(t *testing.T) {
name: "doesn't exists, create successful", name: "doesn't exists, create successful",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE IF NOT EXISTS \"zitadel\"", nil), expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", nil),
), ),
database: "zitadel", database: "zitadel",
}, },
@@ -47,7 +47,7 @@ func Test_verifyDB(t *testing.T) {
name: "already exists", name: "already exists",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE IF NOT EXISTS \"zitadel\"", nil), expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", nil),
), ),
database: "zitadel", database: "zitadel",
}, },

View File

@@ -19,7 +19,7 @@ func newGrant() *cobra.Command {
Long: `Sets ALL grant to the database user. Long: `Sets ALL grant to the database user.
Prerequisites: Prerequisites:
- cockroachDB or postgreSQL - postgreSQL
`, `,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
config := MustNewConfig(viper.GetViper()) config := MustNewConfig(viper.GetViper())

View File

@@ -1,45 +0,0 @@
package initialise
import (
"context"
_ "embed"
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
func newSettings() *cobra.Command {
return &cobra.Command{
Use: "settings",
Short: "Ensures proper settings on the database",
Long: `Ensures proper settings on the database.
Prerequisites:
- cockroachDB or postgreSQL
Cockroach
- Sets enable_durable_locking_for_serializable to on for the zitadel user and database
`,
Run: func(cmd *cobra.Command, args []string) {
config := MustNewConfig(viper.GetViper())
err := initialise(cmd.Context(), config.Database, VerifySettings(config.Database.DatabaseName(), config.Database.Username()))
logging.OnError(err).Fatal("unable to set settings")
},
}
}
func VerifySettings(databaseName, username string) func(context.Context, *database.DB) error {
return func(ctx context.Context, db *database.DB) error {
if db.Type() == "postgres" {
return nil
}
logging.WithFields("user", username, "database", databaseName).Info("verify settings")
return exec(ctx, db, fmt.Sprintf(settingsStmt, databaseName, username), nil)
}
}

View File

@@ -19,7 +19,7 @@ func newUser() *cobra.Command {
Long: `Sets up the ZITADEL database user. Long: `Sets up the ZITADEL database user.
Prerequisites: Prerequisites:
- cockroachDB or postgreSQL - postgreSQL
The user provided by flags needs privileges to The user provided by flags needs privileges to
- create the database if it does not exist - create the database if it does not exist

View File

@@ -8,7 +8,7 @@ import (
) )
func Test_verifyUser(t *testing.T) { func Test_verifyUser(t *testing.T) {
err := ReadStmts("cockroach") //TODO: check all dialects err := ReadStmts()
if err != nil { if err != nil {
t.Errorf("unable to read stmts: %v", err) t.Errorf("unable to read stmts: %v", err)
t.FailNow() t.FailNow()
@@ -28,7 +28,7 @@ func Test_verifyUser(t *testing.T) {
name: "doesn't exists, create fails", name: "doesn't exists, create fails",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER IF NOT EXISTS \"zitadel-user\"", sql.ErrTxDone), expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\"", sql.ErrTxDone),
), ),
username: "zitadel-user", username: "zitadel-user",
password: "", password: "",
@@ -39,7 +39,7 @@ func Test_verifyUser(t *testing.T) {
name: "correct without password", name: "correct without password",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER IF NOT EXISTS \"zitadel-user\"", nil), expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\"", nil),
), ),
username: "zitadel-user", username: "zitadel-user",
password: "", password: "",
@@ -50,7 +50,7 @@ func Test_verifyUser(t *testing.T) {
name: "correct with password", name: "correct with password",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER IF NOT EXISTS \"zitadel-user\" WITH PASSWORD 'password'", nil), expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\" WITH PASSWORD 'password'", nil),
), ),
username: "zitadel-user", username: "zitadel-user",
password: "password", password: "password",
@@ -61,7 +61,7 @@ func Test_verifyUser(t *testing.T) {
name: "already exists", name: "already exists",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER IF NOT EXISTS \"zitadel-user\" WITH PASSWORD 'password'", nil), expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\" WITH PASSWORD 'password'", nil),
), ),
username: "zitadel-user", username: "zitadel-user",
password: "", password: "",

View File

@@ -21,7 +21,7 @@ func newZitadel() *cobra.Command {
Long: `initialize ZITADEL internals. Long: `initialize ZITADEL internals.
Prerequisites: Prerequisites:
- cockroachDB or postgreSQL with user and database - postgreSQL with user and database
`, `,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
config := MustNewConfig(viper.GetViper()) config := MustNewConfig(viper.GetViper())
@@ -32,7 +32,7 @@ Prerequisites:
} }
func VerifyZitadel(ctx context.Context, db *database.DB, config database.Config) error { func VerifyZitadel(ctx context.Context, db *database.DB, config database.Config) error {
err := ReadStmts(config.Type()) err := ReadStmts()
if err != nil { if err != nil {
return err return err
} }
@@ -68,11 +68,6 @@ func VerifyZitadel(ctx context.Context, db *database.DB, config database.Config)
return err return err
} }
logging.WithFields().Info("verify system sequence")
if err := exec(ctx, conn, createSystemSequenceStmt, nil); err != nil {
return err
}
logging.WithFields().Info("verify unique constraints") logging.WithFields().Info("verify unique constraints")
if err := exec(ctx, conn, createUniqueConstraints, nil); err != nil { if err := exec(ctx, conn, createUniqueConstraints, nil); err != nil {
return err return err

View File

@@ -9,7 +9,7 @@ import (
) )
func Test_verifyEvents(t *testing.T) { func Test_verifyEvents(t *testing.T) {
err := ReadStmts("cockroach") //TODO: check all dialects err := ReadStmts()
if err != nil { if err != nil {
t.Errorf("unable to read stmts: %v", err) t.Errorf("unable to read stmts: %v", err)
t.FailNow() t.FailNow()

View File

@@ -40,7 +40,7 @@ func newKey() *cobra.Command {
Long: `create new encryption key(s) (encrypted by the provided master key) Long: `create new encryption key(s) (encrypted by the provided master key)
provide key(s) by YAML file and/or by argument provide key(s) by YAML file and/or by argument
Requirements: Requirements:
- cockroachdb`, - postgreSQL`,
Example: `new -f keys.yaml Example: `new -f keys.yaml
new key1=somekey key2=anotherkey new key1=somekey key2=anotherkey
new -f keys.yaml key2=anotherkey`, new -f keys.yaml key2=anotherkey`,

View File

@@ -71,7 +71,7 @@ func mustNewConfig(v *viper.Viper, config any) {
mapstructure.StringToTimeDurationHookFunc(), mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToTimeHookFunc(time.RFC3339), mapstructure.StringToTimeHookFunc(time.RFC3339),
mapstructure.StringToSliceHookFunc(","), mapstructure.StringToSliceHookFunc(","),
database.DecodeHook, database.DecodeHook(true),
actions.HTTPConfigDecodeHook, actions.HTTPConfigDecodeHook,
hook.EnumHookFunc(internal_authz.MemberTypeString), hook.EnumHookFunc(internal_authz.MemberTypeString),
mapstructure.TextUnmarshallerHookFunc(), mapstructure.TextUnmarshallerHookFunc(),

View File

@@ -5,8 +5,6 @@ Source:
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
EventPushConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
@@ -39,41 +37,20 @@ Source:
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
Destination: Destination:
cockroach:
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
MaxOpenConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
EventPushConnRatio: 0.01 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
User:
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
SSL:
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres: postgres:
Host: # ZITADEL_DATABASE_POSTGRES_HOST Host: localhost # ZITADEL_DATABASE_POSTGRES_HOST
Port: # ZITADEL_DATABASE_POSTGRES_PORT Port: 5432 # ZITADEL_DATABASE_POSTGRES_PORT
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE Database: zitadel # ZITADEL_DATABASE_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS MaxOpenConns: 5 # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS MaxIdleConns: 2 # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME MaxConnLifetime: 30m # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME MaxConnIdleTime: 5m # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS Options: "" # ZITADEL_DATABASE_POSTGRES_OPTIONS
User: User:
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME Username: zitadel # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
SSL: SSL:
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE Mode: disable # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY

View File

@@ -56,15 +56,15 @@ func copyEventstore(ctx context.Context, config *Migration) {
} }
func positionQuery(db *db.DB) string { func positionQuery(db *db.DB) string {
switch db.Type() { // switch db.Type() {
case "postgres": // case "postgres":
return "SELECT EXTRACT(EPOCH FROM clock_timestamp())" return "SELECT EXTRACT(EPOCH FROM clock_timestamp())"
case "cockroach": // case "cockroach":
return "SELECT cluster_logical_timestamp()" // return "SELECT cluster_logical_timestamp()"
default: // default:
logging.WithFields("db_type", db.Type()).Fatal("database type not recognized") // logging.WithFields("db_type", db.Type()).Fatal("database type not recognized")
return "" // return ""
} // }
} }
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) { func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {

View File

@@ -117,7 +117,7 @@ func projections(
staticStorage, err := config.AssetStorage.NewStorage(client.DB) staticStorage, err := config.AssetStorage.NewStorage(client.DB)
logging.OnError(err).Fatal("unable create static storage") logging.OnError(err).Fatal("unable create static storage")
config.Eventstore.Querier = old_es.NewCRDB(client) config.Eventstore.Querier = old_es.NewPostgres(client)
config.Eventstore.Pusher = new_es.NewEventstore(client) config.Eventstore.Pusher = new_es.NewEventstore(client)
es := eventstore.NewEventstore(config.Eventstore) es := eventstore.NewEventstore(config.Eventstore)
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{ esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{

View File

@@ -3,7 +3,7 @@ package setup
import ( import (
"context" "context"
"database/sql" "database/sql"
"embed" _ "embed"
"strings" "strings"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
@@ -12,31 +12,20 @@ import (
var ( var (
//go:embed 07/logstore.sql //go:embed 07/logstore.sql
createLogstoreSchema07 string createLogstoreSchema07 string
//go:embed 07/cockroach/access.sql //go:embed 07/access.sql
//go:embed 07/postgres/access.sql createAccessLogsTable07 string
createAccessLogsTable07 embed.FS //go:embed 07/execution.sql
//go:embed 07/cockroach/execution.sql createExecutionLogsTable07 string
//go:embed 07/postgres/execution.sql
createExecutionLogsTable07 embed.FS
) )
type LogstoreTables struct { type LogstoreTables struct {
dbClient *sql.DB dbClient *sql.DB
username string username string
dbType string
} }
func (mig *LogstoreTables) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *LogstoreTables) Execute(ctx context.Context, _ eventstore.Event) error {
accessStmt, err := readStmt(createAccessLogsTable07, "07", mig.dbType, "access.sql") stmt := strings.ReplaceAll(createLogstoreSchema07, "%[1]s", mig.username) + createAccessLogsTable07 + createExecutionLogsTable07
if err != nil { _, err := mig.dbClient.ExecContext(ctx, stmt)
return err
}
executionStmt, err := readStmt(createExecutionLogsTable07, "07", mig.dbType, "execution.sql")
if err != nil {
return err
}
stmt := strings.ReplaceAll(createLogstoreSchema07, "%[1]s", mig.username) + accessStmt + executionStmt
_, err = mig.dbClient.ExecContext(ctx, stmt)
return err return err
} }

View File

@@ -1,14 +0,0 @@
CREATE TABLE IF NOT EXISTS logstore.access (
log_date TIMESTAMPTZ NOT NULL
, protocol INT NOT NULL
, request_url TEXT NOT NULL
, response_status INT NOT NULL
, request_headers JSONB
, response_headers JSONB
, instance_id TEXT NOT NULL
, project_id TEXT NOT NULL
, requested_domain TEXT
, requested_host TEXT
, INDEX protocol_date_desc (instance_id, protocol, log_date DESC) STORING (request_url, response_status, request_headers)
);

View File

@@ -1,11 +0,0 @@
CREATE TABLE IF NOT EXISTS logstore.execution (
log_date TIMESTAMPTZ NOT NULL
, took INTERVAL
, message TEXT NOT NULL
, loglevel INT NOT NULL
, instance_id TEXT NOT NULL
, action_id TEXT NOT NULL
, metadata JSONB
, INDEX log_date_desc (instance_id, log_date DESC) STORING (took)
);

View File

@@ -2,16 +2,15 @@ package setup
import ( import (
"context" "context"
"embed" _ "embed"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
) )
var ( var (
//go:embed 08/cockroach/08.sql //go:embed 08/08.sql
//go:embed 08/postgres/08.sql tokenIndexes08 string
tokenIndexes08 embed.FS
) )
type AuthTokenIndexes struct { type AuthTokenIndexes struct {
@@ -19,11 +18,7 @@ type AuthTokenIndexes struct {
} }
func (mig *AuthTokenIndexes) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *AuthTokenIndexes) Execute(ctx context.Context, _ eventstore.Event) error {
stmt, err := readStmt(tokenIndexes08, "08", mig.dbClient.Type(), "08.sql") _, err := mig.dbClient.ExecContext(ctx, tokenIndexes08)
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, stmt)
return err return err
} }

View File

@@ -1,5 +0,0 @@
CREATE INDEX IF NOT EXISTS inst_refresh_tkn_idx ON auth.tokens(instance_id, refresh_token_id);
CREATE INDEX IF NOT EXISTS inst_app_tkn_idx ON auth.tokens(instance_id, application_id);
CREATE INDEX IF NOT EXISTS inst_ro_tkn_idx ON auth.tokens(instance_id, resource_owner);
DROP INDEX IF EXISTS auth.tokens@user_user_agent_idx;
CREATE INDEX IF NOT EXISTS inst_usr_agnt_tkn_idx ON auth.tokens(instance_id, user_id, user_agent_id);

View File

@@ -3,7 +3,7 @@ package setup
import ( import (
"context" "context"
"database/sql" "database/sql"
"embed" _ "embed"
"time" "time"
"github.com/cockroachdb/cockroach-go/v2/crdb" "github.com/cockroachdb/cockroach-go/v2/crdb"
@@ -18,9 +18,8 @@ var (
correctCreationDate10CreateTable string correctCreationDate10CreateTable string
//go:embed 10/10_fill_table.sql //go:embed 10/10_fill_table.sql
correctCreationDate10FillTable string correctCreationDate10FillTable string
//go:embed 10/cockroach/10_update.sql //go:embed 10/10_update.sql
//go:embed 10/postgres/10_update.sql correctCreationDate10Update string
correctCreationDate10Update embed.FS
//go:embed 10/10_count_wrong_events.sql //go:embed 10/10_count_wrong_events.sql
correctCreationDate10CountWrongEvents string correctCreationDate10CountWrongEvents string
//go:embed 10/10_empty_table.sql //go:embed 10/10_empty_table.sql
@@ -40,11 +39,6 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context, _ eventstore.Event)
logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration") logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration")
var affected int64 var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error { err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
if mig.dbClient.Type() == "cockroach" {
if _, err := tx.Exec("SET experimental_enable_temp_tables=on"); err != nil {
return err
}
}
_, err := tx.ExecContext(ctx, correctCreationDate10CreateTable) _, err := tx.ExecContext(ctx, correctCreationDate10CreateTable)
if err != nil { if err != nil {
return err return err
@@ -66,11 +60,7 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context, _ eventstore.Event)
return err return err
} }
updateStmt, err := readStmt(correctCreationDate10Update, "10", mig.dbClient.Type(), "10_update.sql") _, err = tx.ExecContext(ctx, correctCreationDate10Update)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, updateStmt)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1 +0,0 @@
UPDATE eventstore.events e SET (creation_date, "position") = (we.next_cd, we.next_cd::DECIMAL) FROM wrong_events we WHERE e.event_sequence = we.event_sequence AND e.instance_id = we.instance_id;

View File

@@ -15,8 +15,7 @@ import (
) )
var ( var (
//go:embed 14/cockroach/*.sql //go:embed 14/*.sql
//go:embed 14/postgres/*.sql
newEventsTable embed.FS newEventsTable embed.FS
) )
@@ -40,7 +39,7 @@ func (mig *NewEventsTable) Execute(ctx context.Context, _ eventstore.Event) erro
return err return err
} }
statements, err := readStatements(newEventsTable, "14", mig.dbClient.Type()) statements, err := readStatements(newEventsTable, "14")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,33 +0,0 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order,
PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
creation_date::DECIMAL,
event_sequence
FROM eventstore.events_old;

View File

@@ -1,7 +0,0 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN revision SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN created_at SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN creator SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "owner" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "position" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN in_tx_order SET NOT NULL;

View File

@@ -1,3 +0,0 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC) STORING ("position");
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

View File

@@ -1 +0,0 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@@ -11,8 +11,7 @@ import (
) )
var ( var (
//go:embed 15/cockroach/*.sql //go:embed 15/*.sql
//go:embed 15/postgres/*.sql
currentProjectionState embed.FS currentProjectionState embed.FS
) )
@@ -21,7 +20,7 @@ type CurrentProjectionState struct {
} }
func (mig *CurrentProjectionState) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *CurrentProjectionState) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(currentProjectionState, "15", mig.dbClient.Type()) statements, err := readStatements(currentProjectionState, "15")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,29 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,28 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,28 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,16 +0,0 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,15 +0,0 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@@ -3,17 +3,14 @@ package setup
import ( import (
"context" "context"
_ "embed" _ "embed"
"fmt"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
) )
var ( var (
//go:embed 34/cockroach/34_cache_schema.sql //go:embed 34/34_cache_schema.sql
addCacheSchemaCockroach string addCacheSchema string
//go:embed 34/postgres/34_cache_schema.sql
addCacheSchemaPostgres string
) )
type AddCacheSchema struct { type AddCacheSchema struct {
@@ -21,14 +18,7 @@ type AddCacheSchema struct {
} }
func (mig *AddCacheSchema) Execute(ctx context.Context, _ eventstore.Event) (err error) { func (mig *AddCacheSchema) Execute(ctx context.Context, _ eventstore.Event) (err error) {
switch mig.dbClient.Type() { _, err = mig.dbClient.ExecContext(ctx, addCacheSchema)
case "cockroach":
_, err = mig.dbClient.ExecContext(ctx, addCacheSchemaCockroach)
case "postgres":
_, err = mig.dbClient.ExecContext(ctx, addCacheSchemaPostgres)
default:
err = fmt.Errorf("add cache schema: unsupported db type %q", mig.dbClient.Type())
}
return err return err
} }

View File

@@ -1,27 +0,0 @@
create schema if not exists cache;
create table if not exists cache.objects (
cache_name varchar not null,
id uuid not null default gen_random_uuid(),
created_at timestamptz not null default now(),
last_used_at timestamptz not null default now(),
payload jsonb not null,
primary key(cache_name, id)
);
create table if not exists cache.string_keys(
cache_name varchar not null check (cache_name <> ''),
index_id integer not null check (index_id > 0),
index_key varchar not null check (index_key <> ''),
object_id uuid not null,
primary key (cache_name, index_id, index_key),
constraint fk_object
foreign key(cache_name, object_id)
references cache.objects(cache_name, id)
on delete cascade
);
create index if not exists string_keys_object_id_idx
on cache.string_keys (cache_name, object_id); -- for delete cascade

View File

@@ -21,7 +21,7 @@ type AddPositionToIndexEsWm struct {
} }
func (mig *AddPositionToIndexEsWm) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *AddPositionToIndexEsWm) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(addPositionToEsWmIndex, "35", "") statements, err := readStatements(addPositionToEsWmIndex, "35")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -24,8 +24,7 @@ const (
) )
var ( var (
//go:embed 40/cockroach/*.sql //go:embed 40/*.sql
//go:embed 40/postgres/*.sql
initPushFunc embed.FS initPushFunc embed.FS
) )
@@ -112,5 +111,5 @@ func (mig *InitPushFunc) inTxOrderType(ctx context.Context) (typeName string, er
} }
func (mig *InitPushFunc) filePath(fileName string) string { func (mig *InitPushFunc) filePath(fileName string) string {
return path.Join("40", mig.dbClient.Type(), fileName) return path.Join("40", fileName)
} }

View File

@@ -1,10 +0,0 @@
CREATE TYPE IF NOT EXISTS eventstore.command AS (
instance_id TEXT
, aggregate_type TEXT
, aggregate_id TEXT
, command_type TEXT
, revision INT2
, payload JSONB
, creator TEXT
, owner TEXT
);

View File

@@ -1,137 +0,0 @@
CREATE OR REPLACE FUNCTION eventstore.latest_aggregate_state(
instance_id TEXT
, aggregate_type TEXT
, aggregate_id TEXT
, sequence OUT BIGINT
, owner OUT TEXT
)
LANGUAGE 'plpgsql'
AS $$
BEGIN
SELECT
COALESCE(e.sequence, 0) AS sequence
, e.owner
INTO
sequence
, owner
FROM
eventstore.events2 e
WHERE
e.instance_id = $1
AND e.aggregate_type = $2
AND e.aggregate_id = $3
ORDER BY
e.sequence DESC
LIMIT 1;
RETURN;
END;
$$;
CREATE OR REPLACE FUNCTION eventstore.commands_to_events2(commands eventstore.command[])
RETURNS eventstore.events2[]
LANGUAGE 'plpgsql'
AS $$
DECLARE
current_sequence BIGINT;
current_owner TEXT;
instance_id TEXT;
aggregate_type TEXT;
aggregate_id TEXT;
_events eventstore.events2[];
_aggregates CURSOR FOR
select
DISTINCT ("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
FROM
UNNEST(commands) AS c;
BEGIN
OPEN _aggregates;
LOOP
FETCH NEXT IN _aggregates INTO instance_id, aggregate_type, aggregate_id;
-- crdb does not support EXIT WHEN NOT FOUND
EXIT WHEN instance_id IS NULL;
SELECT
*
INTO
current_sequence
, current_owner
FROM eventstore.latest_aggregate_state(
instance_id
, aggregate_type
, aggregate_id
);
-- RETURN QUERY is not supported by crdb: https://github.com/cockroachdb/cockroach/issues/105240
SELECT
ARRAY_CAT(_events, ARRAY_AGG(e))
INTO
_events
FROM (
SELECT
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
, ("c").command_type -- AS event_type
, COALESCE(current_sequence, 0) + ROW_NUMBER() OVER () -- AS sequence
, ("c").revision
, NOW() -- AS created_at
, ("c").payload
, ("c").creator
, COALESCE(current_owner, ("c").owner) -- AS owner
, cluster_logical_timestamp() -- AS position
, ordinality::{{ .InTxOrderType }} -- AS in_tx_order
FROM
UNNEST(commands) WITH ORDINALITY AS c
WHERE
("c").instance_id = instance_id
AND ("c").aggregate_type = aggregate_type
AND ("c").aggregate_id = aggregate_id
) AS e;
END LOOP;
CLOSE _aggregates;
RETURN _events;
END;
$$;
CREATE OR REPLACE FUNCTION eventstore.push(commands eventstore.command[]) RETURNS SETOF eventstore.events2 AS $$
INSERT INTO eventstore.events2
SELECT
("e").instance_id
, ("e").aggregate_type
, ("e").aggregate_id
, ("e").event_type
, ("e").sequence
, ("e").revision
, ("e").created_at
, ("e").payload
, ("e").creator
, ("e").owner
, ("e")."position"
, ("e").in_tx_order
FROM
UNNEST(eventstore.commands_to_events2(commands)) e
ORDER BY
in_tx_order
RETURNING *
$$ LANGUAGE SQL;
/*
select (c).* from UNNEST(eventstore.commands_to_events2(
ARRAY[
ROW('', 'system', 'SYSTEM', 'ct1', 1, '{"key": "value"}', 'c1', 'SYSTEM')
, ROW('', 'system', 'SYSTEM', 'ct2', 1, '{"key": "value"}', 'c1', 'SYSTEM')
, ROW('289525561255060732', 'org', '289575074711790844', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('289525561255060732', 'user', '289575075164906748', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('289525561255060732', 'oidc_session', 'V2_289575178579535100', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('', 'system', 'SYSTEM', 'ct3', 1, '{"key": "value"}', 'c1', 'SYSTEM')
]::eventstore.command[]
) )c;
*/

View File

@@ -1,5 +0,0 @@
SELECT data_type
FROM information_schema.columns
WHERE table_schema = 'eventstore'
AND table_name = 'events2'
AND column_name = 'in_tx_order';

View File

@@ -12,8 +12,7 @@ import (
) )
var ( var (
//go:embed 43/cockroach/*.sql //go:embed 43/*.sql
//go:embed 43/postgres/*.sql
createFieldsDomainIndex embed.FS createFieldsDomainIndex embed.FS
) )
@@ -22,7 +21,7 @@ type CreateFieldsDomainIndex struct {
} }
func (mig *CreateFieldsDomainIndex) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *CreateFieldsDomainIndex) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(createFieldsDomainIndex, "43", mig.dbClient.Type()) statements, err := readStatements(createFieldsDomainIndex, "43")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,3 +0,0 @@
CREATE INDEX CONCURRENTLY IF NOT EXISTS fields_instance_domains_idx
ON eventstore.fields (object_id)
WHERE object_type = 'instance_domain' AND field_name = 'domain';

View File

@@ -21,7 +21,7 @@ type ReplaceCurrentSequencesIndex struct {
} }
func (mig *ReplaceCurrentSequencesIndex) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *ReplaceCurrentSequencesIndex) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(replaceCurrentSequencesIndex, "44", "") statements, err := readStatements(replaceCurrentSequencesIndex, "44")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -21,7 +21,7 @@ var (
) )
func (mig *InitPermissionFunctions) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *InitPermissionFunctions) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(permissionFunctions, "46", "") statements, err := readStatements(permissionFunctions, "46")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -21,7 +21,7 @@ var (
) )
func (mig *InitPermittedOrgsFunction) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *InitPermittedOrgsFunction) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(permittedOrgsFunction, "49", "") statements, err := readStatements(permittedOrgsFunction, "49")
if err != nil { if err != nil {
return err return err
} }

View File

@@ -35,7 +35,7 @@ func Cleanup(config *Config) {
logging.OnError(err).Fatal("unable to connect to database") logging.OnError(err).Fatal("unable to connect to database")
config.Eventstore.Pusher = new_es.NewEventstore(dbClient) config.Eventstore.Pusher = new_es.NewEventstore(dbClient)
config.Eventstore.Querier = old_es.NewCRDB(dbClient) config.Eventstore.Querier = old_es.NewPostgres(dbClient)
es := eventstore.NewEventstore(config.Eventstore) es := eventstore.NewEventstore(config.Eventstore)
step, err := migration.LastStuckStep(ctx, es) step, err := migration.LastStuckStep(ctx, es)

View File

@@ -69,7 +69,7 @@ func MustNewConfig(v *viper.Viper) *Config {
hooks.SliceTypeStringDecode[internal_authz.RoleMapping], hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser], hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
hooks.MapHTTPHeaderStringDecode, hooks.MapHTTPHeaderStringDecode,
database.DecodeHook, database.DecodeHook(false),
actions.HTTPConfigDecodeHook, actions.HTTPConfigDecodeHook,
hook.EnumHookFunc(internal_authz.MemberTypeString), hook.EnumHookFunc(internal_authz.MemberTypeString),
hook.Base64ToBytesHookFunc(), hook.Base64ToBytesHookFunc(),

Some files were not shown because too many files have changed in this diff Show More