feat(cmd): mirror (#7004)

# Which Problems Are Solved

Adds the possibility to mirror an existing database to a new one. 

For that a new command was added `zitadel mirror`. Including it's
subcommands for a more fine grained mirror of the data.

Sub commands:

* `zitadel mirror eventstore`: copies only events and their unique
constraints
* `zitadel mirror system`: mirrors the data of the `system`-schema
*  `zitadel mirror projections`: runs all projections
*  `zitadel mirror auth`: copies auth requests
* `zitadel mirror verify`: counts the amount of rows in the source and
destination database and prints the diff.

The command requires one of the following flags:
* `--system`: copies all instances of the system
* `--instance <instance-id>`, `--instance <comma separated list of
instance ids>`: copies only the defined instances

The command is save to execute multiple times by adding the
`--replace`-flag. This replaces currently existing data except of the
`events`-table

# Additional Changes

A `--for-mirror`-flag was added to `zitadel setup` to prepare the new
database. The flag skips the creation of the first instances and initial
run of projections.

It is now possible to skip the creation of the first instance during
setup by setting `FirstInstance.Skip` to true in the steps
configuration.

# Additional info

It is currently not possible to merge multiple databases. See
https://github.com/zitadel/zitadel/issues/7964 for more details.

It is currently not possible to use files. See
https://github.com/zitadel/zitadel/issues/7966 for more information.

closes https://github.com/zitadel/zitadel/issues/7586
closes https://github.com/zitadel/zitadel/issues/7486

### Definition of Ready

- [x] I am happy with the code
- [x] Short description of the feature/issue is added in the pr
description
- [x] PR is linked to the corresponding user story
- [x] Acceptance criteria are met
- [x] All open todos and follow ups are defined in a new ticket and
justified
- [x] Deviations from the acceptance criteria and design are agreed with
the PO and documented.
- [x] No debug or dead code
- [x] My code has no repetitions
- [x] Critical parts are tested automatically
- [ ] Where possible E2E tests are implemented
- [x] Documentation/examples are up-to-date
- [x] All non-functional requirements are met
- [x] Functionality of the acceptance criteria is checked manually on
the dev system.

---------

Co-authored-by: Livio Spring <livio.a@gmail.com>
This commit is contained in:
Silvan 2024-05-30 11:35:30 +02:00 committed by GitHub
parent d254828d47
commit 2243306ef6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
66 changed files with 2150 additions and 129 deletions

View File

@ -40,7 +40,7 @@ func New() *cobra.Command {
Long: `Sets up the minimum requirements to start ZITADEL.
Prerequisites:
- cockroachDB
- database (PostgreSql or cockroachdb)
The user provided by flags needs privileges to
- create the database if it does not exist

91
cmd/mirror/auth.go Normal file
View File

@ -0,0 +1,91 @@
package mirror
import (
"context"
_ "embed"
"io"
"time"
"github.com/jackc/pgx/v5/stdlib"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
)
func authCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "auth",
Short: "mirrors the auth requests table from one database to another",
Long: `mirrors the auth requests table from one database to another
ZITADEL needs to be initialized and set up with the --for-mirror flag
Only auth requests are mirrored`,
Run: func(cmd *cobra.Command, args []string) {
config := mustNewMigrationConfig(viper.GetViper())
copyAuth(cmd.Context(), config)
},
}
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete auth requests of defined instances before copy")
return cmd
}
func copyAuth(ctx context.Context, config *Migration) {
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
logging.OnError(err).Fatal("unable to connect to source database")
defer sourceClient.Close()
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
logging.OnError(err).Fatal("unable to connect to destination database")
defer destClient.Close()
copyAuthRequests(ctx, sourceClient, destClient)
}
func copyAuthRequests(ctx context.Context, source, dest *database.DB) {
start := time.Now()
sourceConn, err := source.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire connection")
defer sourceConn.Close()
r, w := io.Pipe()
errs := make(chan error, 1)
go func() {
err = sourceConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+") TO STDOUT")
w.Close()
return err
})
errs <- err
}()
destConn, err := dest.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire connection")
defer destConn.Close()
var affected int64
err = destConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
if shouldReplace {
_, err := conn.Exec(ctx, "DELETE FROM auth.auth_requests "+instanceClause())
if err != nil {
return err
}
}
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY auth.auth_requests FROM STDIN")
affected = tag.RowsAffected()
return err
})
logging.OnError(err).Fatal("unable to copy auth requests to destination")
logging.OnError(<-errs).Fatal("unable to copy auth requests from source")
logging.WithFields("took", time.Since(start), "count", affected).Info("auth requests migrated")
}

80
cmd/mirror/config.go Normal file
View File

@ -0,0 +1,80 @@
package mirror
import (
_ "embed"
"time"
"github.com/mitchellh/mapstructure"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/cmd/hooks"
"github.com/zitadel/zitadel/internal/actions"
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/hook"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/id"
)
type Migration struct {
Source database.Config
Destination database.Config
EventBulkSize uint32
Log *logging.Config
Machine *id.Config
}
var (
//go:embed defaults.yaml
defaultConfig []byte
)
func mustNewMigrationConfig(v *viper.Viper) *Migration {
config := new(Migration)
mustNewConfig(v, config)
err := config.Log.SetLogger()
logging.OnError(err).Fatal("unable to set logger")
id.Configure(config.Machine)
return config
}
func mustNewProjectionsConfig(v *viper.Viper) *ProjectionsConfig {
config := new(ProjectionsConfig)
mustNewConfig(v, config)
err := config.Log.SetLogger()
logging.OnError(err).Fatal("unable to set logger")
id.Configure(config.Machine)
return config
}
func mustNewConfig(v *viper.Viper, config any) {
err := v.Unmarshal(config,
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
hooks.SliceTypeStringDecode[*command.SetQuota],
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
hooks.MapTypeStringDecode[domain.Feature, any],
hooks.MapHTTPHeaderStringDecode,
hook.Base64ToBytesHookFunc(),
hook.TagToLanguageHookFunc(),
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToTimeHookFunc(time.RFC3339),
mapstructure.StringToSliceHookFunc(","),
database.DecodeHook,
actions.HTTPConfigDecodeHook,
hook.EnumHookFunc(internal_authz.MemberTypeString),
)),
)
logging.OnError(err).Fatal("unable to read default config")
}

114
cmd/mirror/defaults.yaml Normal file
View File

@ -0,0 +1,114 @@
Source:
cockroach:
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
EventPushConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
User:
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
SSL:
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres:
Host: # ZITADEL_DATABASE_POSTGRES_HOST
Port: # ZITADEL_DATABASE_POSTGRES_PORT
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
User:
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
SSL:
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
Destination:
cockroach:
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
MaxOpenConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
EventPushConnRatio: 0.01 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
User:
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
SSL:
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres:
Host: # ZITADEL_DATABASE_POSTGRES_HOST
Port: # ZITADEL_DATABASE_POSTGRES_PORT
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
User:
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
SSL:
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
EventBulkSize: 10000
Projections:
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
# turn off scheduler during operation
RequeueEvery: 0s
ConcurrentInstances: 7
EventBulkLimit: 1000
Customizations:
notifications:
MaxFailureCount: 1
Eventstore:
MaxRetries: 3
Auth:
Spooler:
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
Admin:
Spooler:
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
BulkLimit: 10 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
FirstInstance:
# We only need to create an empty zitadel database so this step must be skipped
Skip: true
Log:
Level: info

96
cmd/mirror/event.go Normal file
View File

@ -0,0 +1,96 @@
package mirror
import (
"context"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/v2/projection"
"github.com/zitadel/zitadel/internal/v2/readmodel"
"github.com/zitadel/zitadel/internal/v2/system"
mirror_event "github.com/zitadel/zitadel/internal/v2/system/mirror"
)
func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore.EventStore, source string) (*readmodel.LastSuccessfulMirror, error) {
lastSuccess := readmodel.NewLastSuccessfulMirror(source)
if shouldIgnorePrevious {
return lastSuccess, nil
}
_, err := destinationES.Query(
ctx,
eventstore.NewQuery(
system.AggregateInstance,
lastSuccess,
eventstore.SetFilters(lastSuccess.Filter()),
),
)
if err != nil {
return nil, err
}
return lastSuccess, nil
}
func writeMigrationStart(ctx context.Context, sourceES *eventstore.EventStore, id string, destination string) (_ float64, err error) {
var cmd *eventstore.Command
if len(instanceIDs) > 0 {
cmd, err = mirror_event.NewStartedInstancesCommand(destination, instanceIDs)
if err != nil {
return 0, err
}
} else {
cmd = mirror_event.NewStartedSystemCommand(destination)
}
var position projection.HighestPosition
err = sourceES.Push(
ctx,
eventstore.NewPushIntent(
system.AggregateInstance,
eventstore.AppendAggregate(
system.AggregateOwner,
system.AggregateType,
id,
eventstore.CurrentSequenceMatches(0),
eventstore.AppendCommands(cmd),
),
eventstore.PushReducer(&position),
),
)
if err != nil {
return 0, err
}
return position.Position, nil
}
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position float64) error {
return destinationES.Push(
ctx,
eventstore.NewPushIntent(
system.AggregateInstance,
eventstore.AppendAggregate(
system.AggregateOwner,
system.AggregateType,
id,
eventstore.CurrentSequenceMatches(0),
eventstore.AppendCommands(mirror_event.NewSucceededCommand(source, position)),
),
),
)
}
func writeMigrationFailed(ctx context.Context, destinationES *eventstore.EventStore, id, source string, err error) error {
return destinationES.Push(
ctx,
eventstore.NewPushIntent(
system.AggregateInstance,
eventstore.AppendAggregate(
system.AggregateOwner,
system.AggregateType,
id,
eventstore.CurrentSequenceMatches(0),
eventstore.AppendCommands(mirror_event.NewFailedCommand(source, err)),
),
),
)
}

250
cmd/mirror/event_store.go Normal file
View File

@ -0,0 +1,250 @@
package mirror
import (
"context"
"database/sql"
_ "embed"
"errors"
"io"
"time"
"github.com/jackc/pgx/v5/stdlib"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
db "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
"github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/v2/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
"github.com/zitadel/zitadel/internal/zerrors"
)
var shouldIgnorePrevious bool
func eventstoreCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "eventstore",
Short: "mirrors the eventstore of an instance from one database to another",
Long: `mirrors the eventstore of an instance from one database to another
ZITADEL needs to be initialized and set up with the --for-mirror flag
Migrate only copies events2 and unique constraints`,
Run: func(cmd *cobra.Command, args []string) {
config := mustNewMigrationConfig(viper.GetViper())
copyEventstore(cmd.Context(), config)
},
}
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete unique constraints of defined instances before copy")
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
return cmd
}
func copyEventstore(ctx context.Context, config *Migration) {
sourceClient, err := db.Connect(config.Source, false, dialect.DBPurposeQuery)
logging.OnError(err).Fatal("unable to connect to source database")
defer sourceClient.Close()
destClient, err := db.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
logging.OnError(err).Fatal("unable to connect to destination database")
defer destClient.Close()
copyEvents(ctx, sourceClient, destClient, config.EventBulkSize)
copyUniqueConstraints(ctx, sourceClient, destClient)
}
func positionQuery(db *db.DB) string {
switch db.Type() {
case "postgres":
return "SELECT EXTRACT(EPOCH FROM clock_timestamp())"
case "cockroach":
return "SELECT cluster_logical_timestamp()"
default:
logging.WithFields("db_type", db.Type()).Fatal("database type not recognized")
return ""
}
}
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
start := time.Now()
reader, writer := io.Pipe()
migrationID, err := id.SonyFlakeGenerator().Next()
logging.OnError(err).Fatal("unable to generate migration id")
sourceConn, err := source.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire source connection")
destConn, err := dest.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire dest connection")
sourceES := eventstore.NewEventstoreFromOne(postgres.New(source, &postgres.Config{
MaxRetries: 3,
}))
destinationES := eventstore.NewEventstoreFromOne(postgres.New(dest, &postgres.Config{
MaxRetries: 3,
}))
previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName())
logging.OnError(err).Fatal("unable to query latest successful migration")
maxPosition, err := writeMigrationStart(ctx, sourceES, migrationID, dest.DatabaseName())
logging.OnError(err).Fatal("unable to write migration started event")
logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration")
nextPos := make(chan bool, 1)
pos := make(chan float64, 1)
errs := make(chan error, 3)
go func() {
err := sourceConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
nextPos <- true
var i uint32
for position := range pos {
var stmt database.Statement
stmt.WriteString("COPY (SELECT instance_id, aggregate_type, aggregate_id, event_type, sequence, revision, created_at, regexp_replace(payload::TEXT, '\\\\u0000', '', 'g')::JSON payload, creator, owner, ")
stmt.WriteArg(position)
stmt.WriteString(" position, row_number() OVER (PARTITION BY instance_id ORDER BY position, in_tx_order) AS in_tx_order FROM eventstore.events2 ")
stmt.WriteString(instanceClause())
stmt.WriteString(" AND ")
database.NewNumberAtMost(maxPosition).Write(&stmt, "position")
stmt.WriteString(" AND ")
database.NewNumberGreater(previousMigration.Position).Write(&stmt, "position")
stmt.WriteString(" ORDER BY instance_id, position, in_tx_order")
stmt.WriteString(" LIMIT ")
stmt.WriteArg(bulkSize)
stmt.WriteString(" OFFSET ")
stmt.WriteArg(bulkSize * i)
stmt.WriteString(") TO STDOUT")
// Copy does not allow args so we use we replace the args in the statement
tag, err := conn.PgConn().CopyTo(ctx, writer, stmt.Debug())
if err != nil {
return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i)
}
if tag.RowsAffected() < int64(bulkSize) {
return nil
}
nextPos <- true
i++
}
return nil
})
writer.Close()
close(nextPos)
errs <- err
}()
// generate next position for
go func() {
defer close(pos)
for range nextPos {
var position float64
err := dest.QueryRowContext(
ctx,
func(row *sql.Row) error {
return row.Scan(&position)
},
positionQuery(dest),
)
if err != nil {
errs <- zerrors.ThrowUnknown(err, "MIGRA-kMyPH", "unable to query next position")
return
}
pos <- position
}
}()
var eventCount int64
errs <- destConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN")
eventCount = tag.RowsAffected()
if err != nil {
return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination")
}
return nil
})
close(errs)
writeCopyEventsDone(ctx, destinationES, migrationID, source.DatabaseName(), maxPosition, errs)
logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated")
}
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position float64, errs <-chan error) {
joinedErrs := make([]error, 0, len(errs))
for err := range errs {
joinedErrs = append(joinedErrs, err)
}
err := errors.Join(joinedErrs...)
if err != nil {
logging.WithError(err).Error("unable to mirror events")
err := writeMigrationFailed(ctx, es, id, source, err)
logging.OnError(err).Fatal("unable to write failed event")
return
}
err = writeMigrationSucceeded(ctx, es, id, source, position)
logging.OnError(err).Fatal("unable to write failed event")
}
func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) {
start := time.Now()
reader, writer := io.Pipe()
errs := make(chan error, 1)
sourceConn, err := source.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire source connection")
go func() {
err := sourceConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
var stmt database.Statement
stmt.WriteString("COPY (SELECT instance_id, unique_type, unique_field FROM eventstore.unique_constraints ")
stmt.WriteString(instanceClause())
stmt.WriteString(") TO stdout")
_, err := conn.PgConn().CopyTo(ctx, writer, stmt.String())
writer.Close()
return err
})
errs <- err
}()
destConn, err := dest.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire dest connection")
var eventCount int64
err = destConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
if shouldReplace {
var stmt database.Statement
stmt.WriteString("DELETE FROM eventstore.unique_constraints ")
stmt.WriteString(instanceClause())
_, err := conn.Exec(ctx, stmt.String())
if err != nil {
return err
}
}
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.unique_constraints FROM stdin")
eventCount = tag.RowsAffected()
return err
})
logging.OnError(err).Fatal("unable to copy unique constraints to destination")
logging.OnError(<-errs).Fatal("unable to copy unique constraints from source")
logging.WithFields("took", time.Since(start), "count", eventCount).Info("unique constraints migrated")
}

93
cmd/mirror/mirror.go Normal file
View File

@ -0,0 +1,93 @@
package mirror
import (
"bytes"
_ "embed"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/cmd/key"
)
var (
instanceIDs []string
isSystem bool
shouldReplace bool
)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "mirror",
Short: "mirrors all data of ZITADEL from one database to another",
Long: `mirrors all data of ZITADEL from one database to another
ZITADEL needs to be initialized and set up with --for-mirror
The command does mirror all data needed and recomputes the projections.
For more details call the help functions of the sub commands.
Order of execution:
1. mirror system tables
2. mirror auth tables
3. mirror event store tables
4. recompute projections
5. verify`,
Run: func(cmd *cobra.Command, args []string) {
config := mustNewMigrationConfig(viper.GetViper())
projectionConfig := mustNewProjectionsConfig(viper.GetViper())
masterKey, err := key.MasterKey(cmd)
logging.OnError(err).Fatal("unable to read master key")
copySystem(cmd.Context(), config)
copyAuth(cmd.Context(), config)
copyEventstore(cmd.Context(), config)
projections(cmd.Context(), projectionConfig, masterKey)
verifyMigration(cmd.Context(), config)
},
}
mirrorFlags(cmd)
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
cmd.Flags().BoolVar(&shouldReplace, "replace", false, `replaces all data of the following tables for the provided instances or all if the "--system"-flag is set:
* system.assets
* auth.auth_requests
* eventstore.unique_constraints
The flag should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.`)
migrateProjectionsFlags(cmd)
err := viper.MergeConfig(bytes.NewBuffer(defaultConfig))
logging.OnError(err).Fatal("unable to read default config")
cmd.AddCommand(
eventstoreCmd(),
systemCmd(),
projectionsCmd(),
authCmd(),
verifyCmd(),
)
return cmd
}
func mirrorFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringSliceVar(&instanceIDs, "instance", nil, "id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
cmd.PersistentFlags().BoolVar(&isSystem, "system", false, "migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
cmd.MarkFlagsOneRequired("system", "instance")
cmd.MarkFlagsMutuallyExclusive("system", "instance")
}
func instanceClause() string {
if isSystem {
return "WHERE instance_id <> ''"
}
for i := range instanceIDs {
instanceIDs[i] = "'" + instanceIDs[i] + "'"
}
// COPY does not allow parameters so we need to set them directly
return "WHERE instance_id IN (" + strings.Join(instanceIDs, ", ") + ")"
}

316
cmd/mirror/projections.go Normal file
View File

@ -0,0 +1,316 @@
package mirror
import (
"context"
"database/sql"
"net/http"
"sync"
"time"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/cmd/encryption"
"github.com/zitadel/zitadel/cmd/key"
"github.com/zitadel/zitadel/cmd/tls"
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/oidc"
"github.com/zitadel/zitadel/internal/api/ui/login"
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
auth_handler "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/handler"
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/authz"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/i18n"
"github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/notification"
"github.com/zitadel/zitadel/internal/notification/handlers"
"github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/query/projection"
static_config "github.com/zitadel/zitadel/internal/static/config"
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
"github.com/zitadel/zitadel/internal/webauthn"
)
func projectionsCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "projections",
Short: "calls the projections synchronously",
Run: func(cmd *cobra.Command, args []string) {
config := mustNewProjectionsConfig(viper.GetViper())
masterKey, err := key.MasterKey(cmd)
logging.OnError(err).Fatal("unable to read master key")
projections(cmd.Context(), config, masterKey)
},
}
migrateProjectionsFlags(cmd)
return cmd
}
type ProjectionsConfig struct {
Destination database.Config
Projections projection.Config
EncryptionKeys *encryption.EncryptionKeyConfig
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
Eventstore *eventstore.Config
Admin admin_es.Config
Auth auth_es.Config
Log *logging.Config
Machine *id.Config
ExternalPort uint16
ExternalDomain string
ExternalSecure bool
InternalAuthZ internal_authz.Config
SystemDefaults systemdefaults.SystemDefaults
Telemetry *handlers.TelemetryPusherConfig
Login login.Config
OIDC oidc.Config
WebAuthNName string
DefaultInstance command.InstanceSetup
AssetStorage static_config.AssetStorageConfig
}
func migrateProjectionsFlags(cmd *cobra.Command) {
key.AddMasterKeyFlag(cmd)
tls.AddTLSModeFlag(cmd)
}
func projections(
ctx context.Context,
config *ProjectionsConfig,
masterKey string,
) {
start := time.Now()
client, err := database.Connect(config.Destination, false, dialect.DBPurposeQuery)
logging.OnError(err).Fatal("unable to connect to database")
keyStorage, err := crypto_db.NewKeyStorage(client, masterKey)
logging.OnError(err).Fatal("cannot start key storage")
keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage)
logging.OnError(err).Fatal("unable to read encryption keys")
staticStorage, err := config.AssetStorage.NewStorage(client.DB)
logging.OnError(err).Fatal("unable create static storage")
config.Eventstore.Querier = old_es.NewCRDB(client)
esPusherDBClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
logging.OnError(err).Fatal("unable to connect eventstore push client")
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
es := eventstore.NewEventstore(config.Eventstore)
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{
MaxRetries: config.Eventstore.MaxRetries,
}))
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
queries, err := query.StartQueries(
ctx,
es,
esV4.Querier,
client,
client,
config.Projections,
config.SystemDefaults,
keys.IDPConfig,
keys.OTP,
keys.OIDC,
keys.SAML,
config.InternalAuthZ.RolePermissionMappings,
sessionTokenVerifier,
func(q *query.Queries) domain.PermissionCheck {
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
},
0,
config.SystemAPIUsers,
false,
)
logging.OnError(err).Fatal("unable to start queries")
authZRepo, err := authz.Start(queries, es, client, keys.OIDC, config.ExternalSecure)
logging.OnError(err).Fatal("unable to start authz repo")
webAuthNConfig := &webauthn.Config{
DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure,
}
commands, err := command.StartCommands(
es,
config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings,
staticStorage,
webAuthNConfig,
config.ExternalDomain,
config.ExternalSecure,
config.ExternalPort,
keys.IDPConfig,
keys.OTP,
keys.SMTP,
keys.SMS,
keys.User,
keys.DomainVerification,
keys.OIDC,
keys.SAML,
&http.Client{},
func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
},
sessionTokenVerifier,
config.OIDC.DefaultAccessTokenLifetime,
config.OIDC.DefaultRefreshTokenExpiration,
config.OIDC.DefaultRefreshTokenIdleExpiration,
config.DefaultInstance.SecretGenerators,
)
logging.OnError(err).Fatal("unable to start commands")
err = projection.Create(ctx, client, es, config.Projections, keys.OIDC, keys.SAML, config.SystemAPIUsers)
logging.OnError(err).Fatal("unable to start projections")
i18n.MustLoadSupportedLanguagesFromDir()
notification.Register(
ctx,
config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["telemetry"],
*config.Telemetry,
config.ExternalDomain,
config.ExternalPort,
config.ExternalSecure,
commands,
queries,
es,
config.Login.DefaultOTPEmailURLV2,
config.SystemDefaults.Notifications.FileSystemPath,
keys.User,
keys.SMTP,
keys.SMS,
)
config.Auth.Spooler.Client = client
config.Auth.Spooler.Eventstore = es
authView, err := auth_view.StartView(config.Auth.Spooler.Client, keys.OIDC, queries, config.Auth.Spooler.Eventstore)
logging.OnError(err).Fatal("unable to start auth view")
auth_handler.Register(ctx, config.Auth.Spooler, authView, queries)
config.Admin.Spooler.Client = client
config.Admin.Spooler.Eventstore = es
adminView, err := admin_view.StartView(config.Admin.Spooler.Client)
logging.OnError(err).Fatal("unable to start admin view")
admin_handler.Register(ctx, config.Admin.Spooler, adminView, staticStorage)
instances := make(chan string, config.Projections.ConcurrentInstances)
failedInstances := make(chan string)
wg := sync.WaitGroup{}
wg.Add(int(config.Projections.ConcurrentInstances))
go func() {
for instance := range failedInstances {
logging.WithFields("instance", instance).Error("projection failed")
}
}()
for i := 0; i < int(config.Projections.ConcurrentInstances); i++ {
go execProjections(ctx, instances, failedInstances, &wg)
}
for _, instance := range queryInstanceIDs(ctx, client) {
instances <- instance
}
close(instances)
wg.Wait()
close(failedInstances)
logging.WithFields("took", time.Since(start)).Info("projections executed")
}
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
for instance := range instances {
logging.WithFields("instance", instance).Info("start projections")
ctx = internal_authz.WithInstanceID(ctx, instance)
err := projection.ProjectInstance(ctx)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger failed")
failedInstances <- instance
continue
}
err = admin_handler.ProjectInstance(ctx)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger admin handler failed")
failedInstances <- instance
continue
}
err = auth_handler.ProjectInstance(ctx)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger auth handler failed")
failedInstances <- instance
continue
}
err = notification.ProjectInstance(ctx)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger notification failed")
failedInstances <- instance
continue
}
logging.WithFields("instance", instance).Info("projections done")
}
wg.Done()
}
// returns the instance configured by flag
// or all instances which are not removed
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
if len(instanceIDs) > 0 {
return instanceIDs
}
instances := []string{}
err := source.QueryContext(
ctx,
func(r *sql.Rows) error {
for r.Next() {
var instance string
if err := r.Scan(&instance); err != nil {
return err
}
instances = append(instances, instance)
}
return r.Err()
},
"SELECT DISTINCT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.added' AND instance_id NOT IN (SELECT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.removed')",
)
logging.OnError(err).Fatal("unable to query instances")
return instances
}

139
cmd/mirror/system.go Normal file
View File

@ -0,0 +1,139 @@
package mirror
import (
"context"
_ "embed"
"io"
"time"
"github.com/jackc/pgx/v5/stdlib"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
)
func systemCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "system",
Short: "mirrors the system tables of ZITADEL from one database to another",
Long: `mirrors the system tables of ZITADEL from one database to another
ZITADEL needs to be initialized
Only keys and assets are mirrored`,
Run: func(cmd *cobra.Command, args []string) {
config := mustNewMigrationConfig(viper.GetViper())
copySystem(cmd.Context(), config)
},
}
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete ALL keys and assets of defined instances before copy")
return cmd
}
func copySystem(ctx context.Context, config *Migration) {
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
logging.OnError(err).Fatal("unable to connect to source database")
defer sourceClient.Close()
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
logging.OnError(err).Fatal("unable to connect to destination database")
defer destClient.Close()
copyAssets(ctx, sourceClient, destClient)
copyEncryptionKeys(ctx, sourceClient, destClient)
}
func copyAssets(ctx context.Context, source, dest *database.DB) {
start := time.Now()
sourceConn, err := source.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire source connection")
defer sourceConn.Close()
r, w := io.Pipe()
errs := make(chan error, 1)
go func() {
err = sourceConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
// ignore hash column because it's computed
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT instance_id, asset_type, resource_owner, name, content_type, data, updated_at FROM system.assets "+instanceClause()+") TO stdout")
w.Close()
return err
})
errs <- err
}()
destConn, err := dest.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire dest connection")
defer destConn.Close()
var eventCount int64
err = destConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
if shouldReplace {
_, err := conn.Exec(ctx, "DELETE FROM system.assets "+instanceClause())
if err != nil {
return err
}
}
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin")
eventCount = tag.RowsAffected()
return err
})
logging.OnError(err).Fatal("unable to copy assets to destination")
logging.OnError(<-errs).Fatal("unable to copy assets from source")
logging.WithFields("took", time.Since(start), "count", eventCount).Info("assets migrated")
}
func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
start := time.Now()
sourceConn, err := source.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire source connection")
defer sourceConn.Close()
r, w := io.Pipe()
errs := make(chan error, 1)
go func() {
err = sourceConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
// ignore hash column because it's computed
_, err := conn.PgConn().CopyTo(ctx, w, "COPY system.encryption_keys TO stdout")
w.Close()
return err
})
errs <- err
}()
destConn, err := dest.Conn(ctx)
logging.OnError(err).Fatal("unable to acquire dest connection")
defer destConn.Close()
var eventCount int64
err = destConn.Raw(func(driverConn interface{}) error {
conn := driverConn.(*stdlib.Conn).Conn()
if shouldReplace {
_, err := conn.Exec(ctx, "TRUNCATE system.encryption_keys")
if err != nil {
return err
}
}
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin")
eventCount = tag.RowsAffected()
return err
})
logging.OnError(err).Fatal("unable to copy encryption keys to destination")
logging.OnError(<-errs).Fatal("unable to copy encryption keys from source")
logging.WithFields("took", time.Since(start), "count", eventCount).Info("encryption keys migrated")
}

111
cmd/mirror/verify.go Normal file
View File

@ -0,0 +1,111 @@
package mirror
import (
"context"
"database/sql"
_ "embed"
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
)
func verifyCmd() *cobra.Command {
return &cobra.Command{
Use: "verify",
Short: "counts if source and dest have the same amount of entries",
Run: func(cmd *cobra.Command, args []string) {
config := mustNewMigrationConfig(viper.GetViper())
verifyMigration(cmd.Context(), config)
},
}
}
var schemas = []string{
"adminapi",
"auth",
"eventstore",
"projections",
"system",
}
func verifyMigration(ctx context.Context, config *Migration) {
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
logging.OnError(err).Fatal("unable to connect to source database")
defer sourceClient.Close()
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
logging.OnError(err).Fatal("unable to connect to destination database")
defer destClient.Close()
for _, schema := range schemas {
for _, table := range append(getTables(ctx, destClient, schema), getViews(ctx, destClient, schema)...) {
sourceCount := countEntries(ctx, sourceClient, table)
destCount := countEntries(ctx, destClient, table)
entry := logging.WithFields("table", table, "dest", destCount, "source", sourceCount)
if sourceCount == destCount {
entry.Debug("equal count")
continue
}
entry.WithField("diff", destCount-sourceCount).Info("unequal count")
}
}
}
func getTables(ctx context.Context, dest *database.DB, schema string) (tables []string) {
err := dest.QueryContext(
ctx,
func(r *sql.Rows) error {
for r.Next() {
var table string
if err := r.Scan(&table); err != nil {
return err
}
tables = append(tables, table)
}
return r.Err()
},
"SELECT CONCAT(schemaname, '.', tablename) FROM pg_tables WHERE schemaname = $1",
schema,
)
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query tables")
return tables
}
func getViews(ctx context.Context, dest *database.DB, schema string) (tables []string) {
err := dest.QueryContext(
ctx,
func(r *sql.Rows) error {
for r.Next() {
var table string
if err := r.Scan(&table); err != nil {
return err
}
tables = append(tables, table)
}
return r.Err()
},
"SELECT CONCAT(schemaname, '.', viewname) FROM pg_views WHERE schemaname = $1",
schema,
)
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query views")
return tables
}
func countEntries(ctx context.Context, client *database.DB, table string) (count int) {
err := client.QueryRowContext(
ctx,
func(r *sql.Row) error {
return r.Scan(&count)
},
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause()),
)
logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count")
return count
}

View File

@ -26,6 +26,8 @@ type FirstInstance struct {
PatPath string
Features *command.InstanceFeatures
Skip bool
instanceSetup command.InstanceSetup
userEncryptionKey *crypto.KeyConfig
smtpEncryptionKey *crypto.KeyConfig
@ -42,6 +44,9 @@ type FirstInstance struct {
}
func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error {
if mig.Skip {
return nil
}
keyStorage, err := mig.verifyEncryptionKeys(ctx)
if err != nil {
return err

View File

@ -28,6 +28,7 @@ import (
)
type Config struct {
ForMirror bool
Database database.Config
SystemDefaults systemdefaults.SystemDefaults
InternalAuthZ internal_authz.Config

View File

@ -34,6 +34,8 @@ import (
notify_handler "github.com/zitadel/zitadel/internal/notification"
"github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/query/projection"
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
"github.com/zitadel/zitadel/internal/webauthn"
)
@ -57,13 +59,16 @@ Requirements:
err = BindInitProjections(cmd)
logging.OnError(err).Fatal("unable to bind \"init-projections\" flag")
err = bindForMirror(cmd)
logging.OnError(err).Fatal("unable to bind \"for-mirror\" flag")
config := MustNewConfig(viper.GetViper())
steps := MustNewSteps(viper.New())
masterKey, err := key.MasterKey(cmd)
logging.OnError(err).Panic("No master key provided")
Setup(config, steps, masterKey)
Setup(cmd.Context(), config, steps, masterKey)
},
}
@ -77,6 +82,7 @@ Requirements:
func Flags(cmd *cobra.Command) {
cmd.PersistentFlags().StringArrayVar(&stepFiles, "steps", nil, "paths to step files to overwrite default steps")
cmd.Flags().Bool("init-projections", viper.GetBool("InitProjections"), "beta feature: initializes projections after they are created, allows smooth start as projections are up to date")
cmd.Flags().Bool("for-mirror", viper.GetBool("ForMirror"), "use this flag if you want to mirror your existing data")
key.AddMasterKeyFlag(cmd)
tls.AddTLSModeFlag(cmd)
}
@ -85,8 +91,11 @@ func BindInitProjections(cmd *cobra.Command) error {
return viper.BindPFlag("InitProjections.Enabled", cmd.Flags().Lookup("init-projections"))
}
func Setup(config *Config, steps *Steps, masterKey string) {
ctx := context.Background()
func bindForMirror(cmd *cobra.Command) error {
return viper.BindPFlag("ForMirror", cmd.Flags().Lookup("for-mirror"))
}
func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) {
logging.Info("setup started")
i18n.MustLoadSupportedLanguagesFromDir()
@ -102,10 +111,14 @@ func Setup(config *Config, steps *Steps, masterKey string) {
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
logging.OnError(err).Fatal("unable to start eventstore")
eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{
MaxRetries: config.Eventstore.MaxRetries,
}))
steps.s1ProjectionTable = &ProjectionTable{dbClient: queryDBClient.DB}
steps.s2AssetsTable = &AssetTable{dbClient: queryDBClient.DB}
steps.FirstInstance.Skip = config.ForMirror || steps.FirstInstance.Skip
steps.FirstInstance.instanceSetup = config.DefaultInstance
steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User
steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP
@ -197,10 +210,11 @@ func Setup(config *Config, steps *Steps, masterKey string) {
}
// projection initialization must be done last, since the steps above might add required columns to the projections
if config.InitProjections.Enabled {
if !config.ForMirror && config.InitProjections.Enabled {
initProjections(
ctx,
eventstoreClient,
eventstoreV4,
queryDBClient,
projectionDBClient,
masterKey,
@ -222,6 +236,7 @@ func readStmt(fs embed.FS, folder, typ, filename string) (string, error) {
func initProjections(
ctx context.Context,
eventstoreClient *eventstore.Eventstore,
eventstoreV4 *es_v4.EventStore,
queryDBClient,
projectionDBClient *database.DB,
masterKey string,
@ -278,6 +293,7 @@ func initProjections(
queries, err := query.StartQueries(
ctx,
eventstoreClient,
eventstoreV4.Querier,
queryDBClient,
projectionDBClient,
config.Projections,

View File

@ -1,5 +1,7 @@
# By using the FirstInstance section, you can overwrite the DefaultInstance configuration for the first instance created by zitadel setup.
FirstInstance:
# If set to true zitadel is setup without initial data
Skip: false
# The machine key from the section FirstInstance.Org.Machine.MachineKey is written to the MachineKeyPath.
MachineKeyPath: # ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH
# The personal access token from the section FirstInstance.Org.Machine.Pat is written to the PatPath.

View File

@ -78,6 +78,8 @@ import (
"github.com/zitadel/zitadel/internal/notification"
"github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/static"
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
"github.com/zitadel/zitadel/internal/webauthn"
"github.com/zitadel/zitadel/openapi"
)
@ -153,12 +155,16 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
config.Eventstore.Querier = old_es.NewCRDB(queryDBClient)
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{
MaxRetries: config.Eventstore.MaxRetries,
}))
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
queries, err := query.StartQueries(
ctx,
eventstoreClient,
eventstoreV4.Querier,
queryDBClient,
projectionDBClient,
config.Projections,

View File

@ -36,7 +36,7 @@ Requirements:
setupConfig := setup.MustNewConfig(viper.GetViper())
setupSteps := setup.MustNewSteps(viper.New())
setup.Setup(setupConfig, setupSteps, masterKey)
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
startConfig := MustNewConfig(viper.GetViper())

View File

@ -34,7 +34,7 @@ Requirements:
setupConfig := setup.MustNewConfig(viper.GetViper())
setupSteps := setup.MustNewSteps(viper.New())
setup.Setup(setupConfig, setupSteps, masterKey)
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
startConfig := MustNewConfig(viper.GetViper())

View File

@ -15,6 +15,7 @@ import (
"github.com/zitadel/zitadel/cmd/build"
"github.com/zitadel/zitadel/cmd/initialise"
"github.com/zitadel/zitadel/cmd/key"
"github.com/zitadel/zitadel/cmd/mirror"
"github.com/zitadel/zitadel/cmd/ready"
"github.com/zitadel/zitadel/cmd/setup"
"github.com/zitadel/zitadel/cmd/start"
@ -55,6 +56,7 @@ func New(out io.Writer, in io.Reader, args []string, server chan<- *start.Server
start.New(server),
start.NewStartFromInit(server),
start.NewStartFromSetup(server),
mirror.New(),
key.New(),
ready.New(),
)

View File

@ -281,7 +281,7 @@ ZITADEL hosts everything under a single domain: `{instance}.zitadel.cloud` or yo
The domain is used as the OIDC issuer and as the base url for the gRPC and REST APIs, the Login and Console UI, which you'll find under `{your_domain}/ui/console/`.
Are you self-hosting and having troubles with *Instance not found* errors? [Check out this page](https://zitadel.com/docs/self-hosting/manage/custom-domain).
Are you self-hosting and having troubles with *Instance not found* errors? [Check out this page](/docs/self-hosting/manage/custom-domain).
## API path prefixes

View File

@ -102,7 +102,7 @@ composer require drenso/symfony-oidc-bundle
First, we need to create a User class for the database, so we can persist user info between requests. In this case you don't need password authentication.
Email addresses are not unique for ZITADEL users. There can be multiple user accounts with the same email address.
See [User Constraints](https://zitadel.com/docs/concepts/structure/users#constraints) for more details.
See [User Constraints](/docs/concepts/structure/users#constraints) for more details.
We will use the User Info `sub` claim as unique "display" name for the user. `sub` equals the unique User ID from ZITADEL.
This creates a User Repository and Entity that implements the `UserInterface`:

View File

@ -9,9 +9,9 @@ This documentation section guides you through the process of integrating ZITADEL
## Overview
The NestJS API includes a single secured route that prints "Hello World!" when authenticated. The API expects an authorization header with a valid JWT, serving as a bearer token to authenticate the user when calling the API. The API will validate the access token on the [introspect endpoint](https://zitadel.com/docs/apis/openidoauth/endpoints#introspection_endpoint) and receive the user from ZITADEL.
The NestJS API includes a single secured route that prints "Hello World!" when authenticated. The API expects an authorization header with a valid JWT, serving as a bearer token to authenticate the user when calling the API. The API will validate the access token on the [introspect endpoint](/docs/apis/openidoauth/endpoints#introspection_endpoint) and receive the user from ZITADEL.
The API application utilizes [JWT with Private Key](https://zitadel.com/docs/apis/openidoauth/authn-methods#jwt-with-private-key) for authentication against ZITADEL and accessing the introspection endpoint. Make sure to create an API Application within Zitadel and download the JSON. In this instance, we use this service account, so make sure to provide the secrets in the example application via environmental variables.
The API application utilizes [JWT with Private Key](/docs/apis/openidoauth/authn-methods#jwt-with-private-key) for authentication against ZITADEL and accessing the introspection endpoint. Make sure to create an API Application within Zitadel and download the JSON. In this instance, we use this service account, so make sure to provide the secrets in the example application via environmental variables.
## Overview
@ -25,7 +25,7 @@ Make sure you have Node.js and npm installed on your machine.
### ZITADEL Configuration for the API
1. Create a ZITADEL instance and a project by following the steps [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance).
1. Create a ZITADEL instance and a project by following the steps [here](/docs/guides/start/quickstart#2-create-your-first-instance).
2. Set up an API application within your project:
- Create a new application of type "API" with authentication method "Private Key".

View File

@ -145,7 +145,7 @@ python manage.py runserver
### Call the API
To call the API you need an access token, which is then verified by ZITADEL.
Please follow [this guide here](https://zitadel.com/docs/guides/integrate/private-key-jwt#get-an-access-token), ignoring the first step as we already have the `.json`-key-file from the serviceaccount.
Please follow [this guide here](/docs/guides/integrate/token-introspection/private-key-jwt#get-an-access-token), ignoring the first step as we already have the `.json`-key-file from the serviceaccount.
Optionally set the token as an environment variable:
```

View File

@ -12,11 +12,11 @@ This example shows you how to secure a Python3 Flask API with both authenticatio
The Python API will have public, private, and private-scoped routes and check if a user is authenticated and authorized to access the routes.
The private routes expect an authorization header with a valid access token in the request. The access token is used as a bearer token to authenticate the user when calling the API.
The API will validate the access token on the [introspect endpoint](https://zitadel.com/docs/apis/openidoauth/endpoints#introspection_endpoint) and will receive the user's roles from ZITADEL.
The API will validate the access token on the [introspect endpoint](/docs/apis/openidoauth/endpoints#introspection_endpoint) and will receive the user's roles from ZITADEL.
The API application uses [Client Secret Basic](https://zitadel.com/docs/apis/openidoauth/authn-methods#client-secret-basic) to authenticate against ZITADEL and access the introspection endpoint.
The API application uses [Client Secret Basic](/docs/apis/openidoauth/authn-methods#client-secret-basic) to authenticate against ZITADEL and access the introspection endpoint.
You can use any valid access_token from a user or service account to send requests to the example API.
In this example we will use a service account with a [personal access token](https://zitadel.com/docs/guides/integrate/service-users/personal-access-token) which can be used directly to access the example API.
In this example we will use a service account with a [personal access token](/docs/guides/integrate/service-users/personal-access-token) which can be used directly to access the example API.
## Running the example
@ -31,9 +31,9 @@ In order to run the example you need to have `python3` and `pip3` installed.
You need to setup a couple of things in ZITADEL.
1. If you don't have an instance yet, please go ahead and create an instance as explained [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance). Also, create a new project by following the steps [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance).
1. If you don't have an instance yet, please go ahead and create an instance as explained [here](/docs/guides/start/quickstart#2-create-your-first-instance). Also, create a new project by following the steps [here](/docs/guides/start/quickstart#2-create-your-first-instance).
2. You must create an API application in your project. Follow [this guide](https://zitadel.com/docs/guides/manage/console/applications) to create a new application of type "API" with authentication method "Basic". Save both the ClientID and ClientSecret after you create the application.
2. You must create an API application in your project. Follow [this guide](/docs/guides/manage/console/applications) to create a new application of type "API" with authentication method "Basic". Save both the ClientID and ClientSecret after you create the application.
### Create the API
@ -179,7 +179,7 @@ class ZitadelIntrospectTokenValidator(IntrospectTokenValidator):
res = self.introspect_token(*args, **kwargs)
return res
```
3. Create a new file named ".env" in the directory. Copy the configuration in the [".env.example"](https://github.com/zitadel/example-api-python3-flask/blob/main/.env.example) file to the newly created .env file. Set the values with your Instance Domain/Issuer URL, Client ID, and Client Secret from the previous steps. Obtain your Issuer URL by following [these steps](https://zitadel.com/docs/guides/start/quickstart#referred1).
3. Create a new file named ".env" in the directory. Copy the configuration in the [".env.example"](https://github.com/zitadel/example-api-python3-flask/blob/main/.env.example) file to the newly created .env file. Set the values with your Instance Domain/Issuer URL, Client ID, and Client Secret from the previous steps. Obtain your Issuer URL by following [these steps](/docs/guides/start/quickstart#referred1).
```python
ZITADEL_DOMAIN = "https://your-domain-abcdef.zitadel.cloud"
@ -191,9 +191,9 @@ CLIENT_SECRET = "NVAp70IqiGmJldbS...."
![Create a service user](/img/python-flask/3.png)
1. Create a service user and a Personal Access Token (PAT) for that user by following [this guide](https://zitadel.com/docs/guides/integrate/service-users/personal-access-token#create-a-service-user-with-a-pat).
2. To enable authorization, follow [this guide](https://zitadel.com/docs/guides/manage/console/roles) to create a role `read:messages` on your project.
3. Next, create an authorization for the service user you created by adding the role `read:messages` to the user. Follow this [guide](https://zitadel.com/docs/guides/manage/console/roles#authorizations) for more information on creating an authorization.
1. Create a service user and a Personal Access Token (PAT) for that user by following [this guide](/docs/guides/integrate/service-users/personal-access-token#create-a-service-user-with-a-pat).
2. To enable authorization, follow [this guide](/docs/guides/manage/console/roles) to create a role `read:messages` on your project.
3. Next, create an authorization for the service user you created by adding the role `read:messages` to the user. Follow this [guide](/docs/guides/manage/console/roles#authorizations) for more information on creating an authorization.
### Run the API

View File

@ -131,9 +131,9 @@ In the guides below, some of which utilize the Generic OIDC or SAML templates fo
If ZITADEL doesn't offer a specific template for your Identity Provider (IdP) and your IdP is fully compliant with OpenID Connect (OIDC), you have the option to use the generic OIDC provider configuration.
For those utilizing a SAML Service Provider, the SAML Service Provider option is available. You can learn how to set up a SAML Service Provider with our [MockSAML example](https://zitadel.com/docs/guides/integrate/identity-providers/mocksaml).
For those utilizing a SAML Service Provider, the SAML Service Provider option is available. You can learn how to set up a SAML Service Provider with our [MockSAML example](/docs/guides/integrate/identity-providers/mocksaml).
Should you wish to transition from a generic OIDC provider to Entra ID (formerly Azure Active Directory) or Google, consider following this [guide](https://zitadel.com/docs/guides/integrate/identity-providers/migrate).
Should you wish to transition from a generic OIDC provider to Entra ID (formerly Azure Active Directory) or Google, consider following this [guide](/docs/guides/integrate/identity-providers/migrate).
@ -176,6 +176,6 @@ Deciding whether to configure an external Identity Provider (IdP) at the organiz
## References
- [Identity brokering in ZITADEL](https://zitadel.com/docs/concepts/features/identity-brokering)
- [The ZITADEL API reference for managing external IdPs](https://zitadel.com/docs/category/apis/resources/admin/identity-providers)
- [Handle external logins in a custom login UI](https://zitadel.com/docs/guides/integrate/login-ui/external-login)
- [Identity brokering in ZITADEL](/docs/concepts/features/identity-brokering)
- [The ZITADEL API reference for managing external IdPs](/docs/category/apis/resources/admin/identity-providers)
- [Handle external logins in a custom login UI](/docs/guides/integrate/login-ui/external-login)

View File

@ -1,5 +1,5 @@
When your user is done using your application and clicks on the logout button, you have to send a request to the terminate session endpoint.
[Terminate Session Documentation](https://zitadel.com/docs/apis/resources/session_service/session-service-delete-session)
[Terminate Session Documentation](/docs/apis/resources/session_service/session-service-delete-session)
Sessions can be terminated by either:
- the authenticated user

View File

@ -3,7 +3,7 @@ If you want to build your own select account/account picker, you have to cache t
We recommend storing a list of the session Ids with the corresponding session token in a cookie.
The list of session IDs can be sent in the “search sessions” request to get a detailed list of sessions for the account selection.
[Search Sessions Documentation](https://zitadel.com/docs/apis/resources/session_service/session-service-list-sessions)
[Search Sessions Documentation](/docs/apis/resources/session_service/session-service-list-sessions)
### Request

View File

@ -115,7 +115,7 @@ We do have a guide series on how to build your own login ui, which also includes
- Passkeys
- External Login Providers
You can find all the guides here: [Build your own login UI](https://zitadel.com/docs/guides/integrate/login-ui)
You can find all the guides here: [Build your own login UI](/docs/guides/integrate/login-ui)
The create user request also allows you to add metadata (key, value) to the user.
This gives you the possibility to collect additional data from your users during the registration process and store it directly to the user in ZITADEL.

View File

@ -5,7 +5,7 @@ sidebar_label: Basic Authentication
import IntrospectionResponse from './_introspection-response.mdx';
This is a guide on how to secure your API using [Basic Authentication](https://zitadel.com/docs/apis/openidoauth/authn-methods#client-secret-basic).
This is a guide on how to secure your API using [Basic Authentication](/docs/apis/openidoauth/authn-methods#client-secret-basic).
## Register the API in ZITADEL

View File

@ -7,7 +7,7 @@ ZITADEL leverages the power of eventsourcing, meaning every action and change wi
To provide you with greater flexibility and access to these events, ZITADEL has introduced an Event API.
This API allows you to easily retrieve and utilize the events generated within the system, enabling you to integrate them into your own system and respond to specific events as they occur.
You need to give a user the [manager role](https://zitadel.com/docs/guides/manage/console/managers) IAM_OWNER_VIEWER or IAM_OWNER to access the Event API.
You need to give a user the [manager role](/docs/guides/manage/console/managers) IAM_OWNER_VIEWER or IAM_OWNER to access the Event API.
If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview).
## Request Events

View File

@ -19,7 +19,7 @@ You can subscribe and unsubscribe to notifications and newsletters:
- Security: Receive notifications related to security issues
:::info Technical Advisories
If you want to stay up to date on our technical advisories, we recommend [subscribing here to the mailing list](https://zitadel.com/docs/support/technical_advisory#subscribe-to-our-mailing-list).
If you want to stay up to date on our technical advisories, we recommend [subscribing here to the mailing list](/docs/support/technical_advisory#subscribe-to-our-mailing-list).
Technical advisories are notices that report major issues with ZITADEL Self-Hosted or the ZITADEL Cloud platform that could potentially impact security or stability in production environments.
:::

View File

@ -5,7 +5,7 @@ sidebar_label: From Keycloak
## Migrating from Keycloak to ZITADEL
This guide will use [Docker installation](https://www.docker.com/) to run Keycloak and ZITADEL. However, both Keycloak and ZITADEL offer different installation methods. As a result, this guide won't include any required production tuning or security hardening for either system. However, it's advised you follow [recommended guidelines](https://zitadel.com/docs/guides/manage/self-hosted/production) before putting those systems into production. You can skip setting up Keycloak and ZITADEL if you already have running instances.
This guide will use [Docker installation](https://www.docker.com/) to run Keycloak and ZITADEL. However, both Keycloak and ZITADEL offer different installation methods. As a result, this guide won't include any required production tuning or security hardening for either system. However, it's advised you follow [recommended guidelines](/docs/self-hosting/manage/production) before putting those systems into production. You can skip setting up Keycloak and ZITADEL if you already have running instances.
## Set up Keycloak
### Run Keycloak
@ -77,7 +77,7 @@ docker cp <keycloak container ID>:/tmp/my-realm-users-0.json .
## Set up ZITADEL
After creating a sample application that connects to Keycloak, you need to set up ZITADEL in order to migrate the application and users from Keycloak to ZITADEL. For this, ZITADEL offers a [Docker Compose](https://zitadel.com/docs/self-hosting/deploy/compose) installation guide. Follow the instructions under the [Docker compose](https://zitadel.com/docs/self-hosting/deploy/compose#docker-compose) section to run a ZITADEL instance locally.
After creating a sample application that connects to Keycloak, you need to set up ZITADEL in order to migrate the application and users from Keycloak to ZITADEL. For this, ZITADEL offers a [Docker Compose](/docs/self-hosting/deploy/compose) installation guide. Follow the instructions under the [Docker compose](/docs/self-hosting/deploy/compose#docker-compose) section to run a ZITADEL instance locally.
Next, the application will be available at [http://localhost:8080/ui/console/](http://localhost:8080/ui/console/).
@ -91,13 +91,13 @@ Now you can access the console with the following default credentials:
## Import Keycloak users into ZITADEL
As explained in this [ZITADEL user migration guide](https://zitadel.com/docs/guides/migrate/users), you can import users individually or in bulk. Since we are looking at importing a single user from Keycloak, migrating that individual user to ZITADEL can be done with the [ImportHumanUser](https://zitadel.com/docs/apis/resources/mgmt/management-service-import-human-user) endpoint.
As explained in this [ZITADEL user migration guide](/docs/guides/migrate/users), you can import users individually or in bulk. Since we are looking at importing a single user from Keycloak, migrating that individual user to ZITADEL can be done with the [ImportHumanUser](/docs/apis/resources/mgmt/management-service-import-human-user) endpoint.
> With this endpoint, an email will only be sent to the user if the email is marked as not verified or if there's no password set.
### Create a service user to consume ZITADEL API
But first of all, in order to use this ZITADEL API, you need to create a [service user](https://zitadel.com/docs/guides/integrate/service-users/authenticate-service-users#exercise-create-a-service-user).
But first of all, in order to use this ZITADEL API, you need to create a [service user](/docs/guides/integrate/service-users/authenticate-service-users#exercise-create-a-service-user).
Go to the **Users** menu and select the **Service Users** tab. And click the **+ New** button.
@ -167,7 +167,7 @@ if your Keycloak Realm has a single user, your `my-realm-users-0.json` file, int
}
```
Now, you need to transform the JSON to the ZITADEL data format by adhering to the ZITADEL API [specification](https://zitadel.com/docs/apis/resources/mgmt/management-service-import-human-user) to import a user. The minimal format would be as shown below:
Now, you need to transform the JSON to the ZITADEL data format by adhering to the ZITADEL API [specification](/docs/apis/resources/mgmt/management-service-import-human-user) to import a user. The minimal format would be as shown below:
```js
{

View File

@ -42,7 +42,7 @@ Please also consult our [guide](/docs/guides/manage/user/reg-create-user) on how
## Bulk import
For bulk import use the [import endpoint](https://zitadel.com/docs/apis/resources/admin/admin-service-import-data) on the admin API:
For bulk import use the [import endpoint](/docs/apis/resources/admin/admin-service-import-data) on the admin API:
```json
{
@ -191,7 +191,7 @@ Currently it is not possible to migrate passkeys directly from another system.
## Users linked to an external IDP
A users `sub` is bound to the external [IDP's Client ID](https://zitadel.com/docs/guides/manage/console/default-settings#identity-providers).
A users `sub` is bound to the external [IDP's Client ID](/docs/guides/manage/console/default-settings#identity-providers).
This means that the IDP Client ID configured in ZITADEL must be the same ID as in the legacy system.
Users should be imported with their `externalUserId`.
@ -211,7 +211,7 @@ _snippet from [bulk-import](#bulk-import) example:_
}
```
You can use an Action with [post-creation flow](https://zitadel.com/docs/apis/actions/external-authentication#post-creation) to pull information such as roles from the old system and apply them to the user in ZITADEL.
You can use an Action with [post-creation flow](/docs/apis/actions/external-authentication#post-creation) to pull information such as roles from the old system and apply them to the user in ZITADEL.
## Metadata
@ -220,7 +220,7 @@ Use metadata to store additional attributes of the users, such as organizational
:::info
Metadata must be added to users after the users were created. Currently metadata can't be added during user creation.
[API reference: User Metadata](https://zitadel.com/docs/category/apis/resources/mgmt/user-metadata)
[API reference: User Metadata](/docs/category/apis/resources/mgmt/user-metadata)
:::
Request metadata from the userinfo endpoint by passing the required [reserved scope](/docs/apis/openidoauth/scopes#reserved-scopes) in your auth request.
@ -232,5 +232,5 @@ You can assign roles from owned or granted projects to a user.
:::info
Authorizations must be added to users after the users were created. Currently metadata can't be added during user creation.
[API reference: User Authorization / Grants](https://zitadel.com/docs/category/apis/resources/auth/user-authorizations-grants)
[API reference: User Authorization / Grants](/docs/category/apis/resources/auth/user-authorizations-grants)
:::

View File

@ -353,7 +353,7 @@ The provided config extends the `UserManagerSettings` of the `oidc-client-ts` li
- redirect_uri (the URL to redirect to after the authorization flow is complete)
- post_logout_redirect_uri (the URL to redirect to after the user logs out)
- scope (the permissions requested from the user)
- project_resource_id (To add a ZITADEL project scope. `urn:zitadel:iam:org:project:id:[projectId]:aud` and `urn:zitadel:iam:org:projects:roles` [scopes](https://zitadel.com/docs/apis/openidoauth/scopes#reserved-scopes).)
- project_resource_id (To add a ZITADEL project scope. `urn:zitadel:iam:org:project:id:[projectId]:aud` and `urn:zitadel:iam:org:projects:roles` [scopes](/docs/apis/openidoauth/scopes#reserved-scopes).)
- prompt ([the OIDC prompt parameter](/apis/openidoauth/endpoints#additional-parameters))
2. Create a folder named components in the src directory. Create two files named Login.js and Callback.js.
@ -412,4 +412,4 @@ And this brings us to the end of this quick start guide!
This tutorial covered how to configure ZITADEL and how to use React to build an app that communicates with ZITADEL to access secured resources.
We hope you enjoyed the tutorial and encourage you to check out the ZITADEL [documentation](https://zitadel.com/docs) for more information on how to use the ZITADEL platform to its full potential. Thanks for joining us!
We hope you enjoyed the tutorial and encourage you to check out the ZITADEL [documentation](/docs) for more information on how to use the ZITADEL platform to its full potential. Thanks for joining us!

View File

@ -34,7 +34,7 @@ To ensure the logo is used as intended, we provide specific examples below and r
- Use in architecture diagrams without implying affiliation or partnership
- Editorial and informational purposes such as blog posts or news articles
- Linking back to our [website](https://zitadel.com), official [repositories](https://github.com/zitadel), or [documentation](https://zitadel.com/docs)
- Linking back to our [website](https://zitadel.com), official [repositories](https://github.com/zitadel), or [documentation](/docs)
- Indicating that the software is available for use or installation without implying any affiliation or endorsement
### Not acceptable

View File

@ -57,7 +57,7 @@ We will not publish this information by default to protect your privacy.
### What not to report
- Disclosure of known public files or directories, e.g. robots.txt, files under .well-known, or files that are included in our public repositories (eg, go.mod)
- DoS of users when [Lockout Policy is enabled](https://zitadel.com/docs/guides/manage/console/default-settings#lockout)
- DoS of users when [Lockout Policy is enabled](/docs/guides/manage/console/default-settings#lockout)
- Suggestions on Certificate Authority Authorization (CAA) rules
- Suggestions on DMARC/DKIM/SPF settings
- Suggestions on DNSSEC settings

View File

@ -1,5 +1,5 @@
`ID=QUERY-n0wng Message=Instance not found`
If you're self hosting with a custom domain, you need to instruct ZITADEL to use the `ExternalDomain`.
You can find further instructions in our guide about [custom domains](https://zitadel.com/docs/self-hosting/manage/custom-domain).
We also provide a guide on how to [configure](https://zitadel.com/docs/self-hosting/manage/configure) ZITADEL with variables from files or environment variables.
You can find further instructions in our guide about [custom domains](/docs/self-hosting/manage/custom-domain).
We also provide a guide on how to [configure](/docs/self-hosting/manage/configure) ZITADEL with variables from files or environment variables.

View File

@ -0,0 +1,232 @@
---
title: Mirror data to another database
sidebar_label: Mirror command
---
The `mirror` command allows you to do database to database migrations. This functionality is useful to copy data from one database to another.
The data can be mirrored to multiple database without influencing each other.
## Use cases
Migrate from cockroachdb to postgres or vice versa.
Replicate data to a secondary environment for testing.
## Prerequisites
You need an existing source database, most probably the database ZITADEL currently serves traffic from.
To mirror the data the destination database needs to be initialized and setup without an instance.
### Start the destination database
Follow one of the following guides to start the database:
* [Linux](/docs/self-hosting/deploy/linux#run-postgresql)
* [MacOS](/docs/self-hosting/deploy/macos#install-postgresql)
Or use the following commands for [Docker Compose](/docs/self-hosting/deploy/compose)
```bash
# Download the docker compose example configuration.
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose.yaml
# Run the database and application containers.
docker compose up db --detach
```
## Example
The following commands setup the database as described above. See [configuration](#configuration) for more details about the configuration options.
```bash
zitadel init --config /path/to/your/new/config.yaml
zitadel setup --for-init --config /path/to/your/new/config.yaml # make sure to set --tlsMode and masterkey analog to your current deployment
zitadel mirror --system --config /path/to/your/mirror/config.yaml # make sure to set --tlsMode and masterkey analog to your current deployment
```
## Usage
The general syntax for the mirror command is:
```bash
zitadel mirror [flags]
Flags:
-h, --help help for mirror
--config stringArray path to config file to overwrite system defaults
--ignore-previous ignores previous migrations of the events table. This flag should be used if you manually dropped previously mirrored events.
--replace replaces all data of the following tables for the provided instances or all if the `--system`-flag is set:
* system.assets
* auth.auth_requests
* eventstore.unique_constraints
The should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.
--instance strings id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.
--system migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.
# For the flags below use the same configuration you also use in the current deployment
--masterkey string masterkey as argument for en/decryption keys
-m, --masterkeyFile string path to the masterkey for en/decryption keys
--masterkeyFromEnv read masterkey for en/decryption keys from environment variable (ZITADEL_MASTERKEY)
--tlsMode externalSecure start ZITADEL with (enabled), without (disabled) TLS or external component e.g. reverse proxy (external) terminating TLS, this flag will overwrite externalSecure and `tls.enabled` in configs files
```
## Configuration
```yaml
# The source database the data are copied from. Use either cockroach or postgres, by default cockroach is used
Source:
cockroach:
Host: localhost # ZITADEL_SOURCE_COCKROACH_HOST
Port: 26257 # ZITADEL_SOURCE_COCKROACH_PORT
Database: zitadel # ZITADEL_SOURCE_COCKROACH_DATABASE
MaxOpenConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXIDLECONNS
EventPushConnRatio: 0.33 # ZITADEL_SOURCE_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.33 # ZITADEL_SOURCE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
MaxConnLifetime: 30m # ZITADEL_SOURCE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_SOURCE_COCKROACH_MAXCONNIDLETIME
Options: "" # ZITADEL_SOURCE_COCKROACH_OPTIONS
User:
Username: zitadel # ZITADEL_SOURCE_COCKROACH_USER_USERNAME
Password: "" # ZITADEL_SOURCE_COCKROACH_USER_PASSWORD
SSL:
Mode: disable # ZITADEL_SOURCE_COCKROACH_USER_SSL_MODE
RootCert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_CERT
Key: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres:
Host: # ZITADEL_SOURCE_POSTGRES_HOST
Port: # ZITADEL_SOURCE_POSTGRES_PORT
Database: # ZITADEL_SOURCE_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_SOURCE_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_SOURCE_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_SOURCE_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_SOURCE_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_SOURCE_POSTGRES_OPTIONS
User:
Username: # ZITADEL_SOURCE_POSTGRES_USER_USERNAME
Password: # ZITADEL_SOURCE_POSTGRES_USER_PASSWORD
SSL:
Mode: # ZITADEL_SOURCE_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_SOURCE_POSTGRES_USER_SSL_KEY
# The destination database the data are copied to. Use either cockroach or postgres, by default cockroach is used
Destination:
cockroach:
Host: localhost # ZITADEL_DESTINATION_COCKROACH_HOST
Port: 26257 # ZITADEL_DESTINATION_COCKROACH_PORT
Database: zitadel # ZITADEL_DESTINATION_COCKROACH_DATABASE
MaxOpenConns: 0 # ZITADEL_DESTINATION_COCKROACH_MAXOPENCONNS
MaxIdleConns: 0 # ZITADEL_DESTINATION_COCKROACH_MAXIDLECONNS
MaxConnLifetime: 30m # ZITADEL_DESTINATION_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DESTINATION_COCKROACH_MAXCONNIDLETIME
EventPushConnRatio: 0.01 # ZITADEL_DESTINATION_COCKROACH_EVENTPUSHCONNRATIO
ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DESTINATION_COCKROACH_PROJECTIONSPOOLERCONNRATIO
Options: "" # ZITADEL_DESTINATION_COCKROACH_OPTIONS
User:
Username: zitadel # ZITADEL_DESTINATION_COCKROACH_USER_USERNAME
Password: "" # ZITADEL_DESTINATION_COCKROACH_USER_PASSWORD
SSL:
Mode: disable # ZITADEL_DESTINATION_COCKROACH_USER_SSL_MODE
RootCert: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_ROOTCERT
Cert: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_CERT
Key: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_KEY
# Postgres is used as soon as a value is set
# The values describe the possible fields to set values
postgres:
Host: # ZITADEL_DESTINATION_POSTGRES_HOST
Port: # ZITADEL_DESTINATION_POSTGRES_PORT
Database: # ZITADEL_DESTINATION_POSTGRES_DATABASE
MaxOpenConns: # ZITADEL_DESTINATION_POSTGRES_MAXOPENCONNS
MaxIdleConns: # ZITADEL_DESTINATION_POSTGRES_MAXIDLECONNS
MaxConnLifetime: # ZITADEL_DESTINATION_POSTGRES_MAXCONNLIFETIME
MaxConnIdleTime: # ZITADEL_DESTINATION_POSTGRES_MAXCONNIDLETIME
Options: # ZITADEL_DESTINATION_POSTGRES_OPTIONS
User:
Username: # ZITADEL_DESTINATION_POSTGRES_USER_USERNAME
Password: # ZITADEL_DESTINATION_POSTGRES_USER_PASSWORD
SSL:
Mode: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_MODE
RootCert: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_ROOTCERT
Cert: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_CERT
Key: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_KEY
# As cockroachdb first copies the data into memory this parameter is used to iterate through the events table and fetch only the given amount of events per iteration
EventBulkSize: 10000 # ZITADEL_EVENTBULKSIZE
Projections:
# Defines how many projections are allowed to run in parallel
ConcurrentInstances: 7 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES
# Limits the amount of events projected by each iteration
EventBulkLimit: 1000 # ZITADEL_PROJECTIONS_EVENTBULKLIMIT
Auth:
Spooler:
# Limits the amount of events projected by each iteration
BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
Admin:
Spooler:
# Limits the amount of events projected by each iteration
BulkLimit: 10 #ZITADEL_ADMIN_SPOOLER_BULKLIMIT
Log:
Level: info
```
## Sub commands
The provided sub commands allow more fine grained execution of copying the data.
The following commands are safe to execute multiple times by adding the `--replace`-flag which replaces the data not provided by the events in the destination database.
### `zitadel mirror auth`
Copies the auth requests to the destination database.
### `zitadel mirror eventstore`
Copies the events since the last migration and unique constraints to the destination database.
### `zitadel mirror projections`
Executes all projections in the destination database.
It is NOOP if the projections are already up-to-date.
### `zitadel mirror system`
Copies encryption keys and assets to the destination database.
### `zitadel mirror verify`
Prints the amount of rows of the source and destination database and the diff. Positive numbers indicate more rows in the destination table that in the source, negative numbers the opposite.
The following tables will likely have an unequal count:
* **projections.current_states**: If your deployment was upgraded several times, the number of entries in the destination will be lower
* **projections.locks**: If your deployment was upgraded several times, the number of entries in the destination will be lower
* **projections.keys4\***: Only not expired keys are inserted, the number of entries in the destination will be lower
* **projections.failed_events**: Should be lower or equal.
* **auth.users2**: Was replaced with auth.users3, the number of entries in the destination will be 0
* **auth.users3**: Is the replacement of auth.users2, the number of entries in the destination will be equal or higher
## Limitations
It is not possible to use files as source or destination. See github issue [here](https://github.com/zitadel/zitadel/issues/7966)
Currently the encryption keys of the source database must be copied to the destination database. See github issue [here](https://github.com/zitadel/zitadel/issues/7964)
It is not possible to change the domain of the ZITADEL deployment.
Once you mirrored an instance using the `--instance` flag, you have to make sure you don't mirror other preexisting instances. This means for example, you cannot mirror a few instances and then pass the `--system` flag. You have to pass all remaining instances explicitly, once you used the `--instance` flag

View File

@ -0,0 +1,34 @@
---
title: ZITADEL Command Line Interface
sidebar_label: Overview
---
This documentation serves as your guide to interacting with Zitadel through the command line interface (CLI). The Zitadel CLI empowers you to manage various aspects of your Zitadel system efficiently from your terminal.
This introductory section provides a brief overview of what the Zitadel CLI offers and who can benefit from using it.
Let's dive in!
## Download the CLI
Download the CLI for [Linux](/docs/self-hosting/deploy/linux#install-zitadel) or [MacOS](/docs/self-hosting/deploy/macos#install-zitadel).
## Quick start
The easiest way to start ZITADEL is by following the [docker compose example](/docs/self-hosting/deploy/compose) which executes the commands for you.
## Initialize the database
The `zitadel init`-command sets up the zitadel database. The statements executed need a user with `ADMIN`-privilege. See [init phase](/docs/self-hosting/manage/updating_scaling#the-init-phase) for more information.
## Setup ZITADEL
The `zitadel setup`-command further sets up the database created using `zitadel init`. This command only requires the user created in the previous step. See [setup phase](/docs/self-hosting/manage/updating_scaling#the-setup-phase) for more information.
## Start ZITADEL
The `zitadel start`-command runs the ZITADEL server. See [runtime phase](/docs/self-hosting/manage/updating_scaling#the-runtime-phase) for more information.
The `zitadel start-from-setup`-command first executes [the setup phase](#setup-zitadel) and afterwards runs the ZITADEL server.
The `zitadel start-from-init`-command first executes [the init phase](#Initialize-the-database), afterwards [the setup phase](#setup-zitadel) and lastly runs the ZITADEL server.

View File

@ -25,7 +25,7 @@ To apply best practices to your production setup we created a step by step check
- [ ] Secure database connections from outside your network and/or use an internal subnet for database connectivity
- [ ] High Availability for critical infrastructure components (depending on your setup)
- [ ] Loadbalancer
- [ ] [Reverse Proxies](https://zitadel.com/docs/self-hosting/manage/reverseproxy/reverse_proxy)
- [ ] [Reverse Proxies](/docs/self-hosting/manage/reverseproxy/reverse_proxy)
- [ ] Web Application Firewall
#### Networking
@ -41,7 +41,7 @@ To apply best practices to your production setup we created a step by step check
- [ ] Add [Custom Branding](/docs/guides/manage/customize/branding) if required
- [ ] Configure a valid [SMS Service](/docs/guides/manage/console/default-settings#sms) such as Twilio if needed
- [ ] Configure your privacy policy, terms of service and a help Link if needed
- [ ] Keep your [masterkey](https://zitadel.com/docs/self-hosting/manage/configure) in a secure storage
- [ ] Keep your [masterkey](/docs/self-hosting/manage/configure) in a secure storage
- [ ] Declare and apply zitadel configuration using the zitadel terraform [provider](https://github.com/zitadel/terraform-provider-zitadel)
### Security

View File

@ -21,7 +21,7 @@ If users are redirected to the Login-UI without any organizational context, they
:::note
If the registration (and also authentication) needs to occur on a specified organization, apps can already
specify this by providing [an organization scope](https://zitadel.com/docs/apis/openidoauth/scopes#reserved-scopes).
specify this by providing [an organization scope](/docs/apis/openidoauth/scopes#reserved-scopes).
:::
## Statement
@ -37,7 +37,7 @@ There's no action needed on your side currently as existing instances are not af
Once this update has been released and deployed, newly created instances will always use the default organization and its settings as default context for the login.
Already existing instances will still use the instance settings by default and can switch to the new default by ["Activating the 'LoginDefaultOrg' feature"](https://zitadel.com/docs/apis/resources/admin/admin-service-activate-feature-login-default-org) through the Admin API.
Already existing instances will still use the instance settings by default and can switch to the new default by ["Activating the 'LoginDefaultOrg' feature"](/docs/apis/resources/admin/admin-service-activate-feature-login-default-org) through the Admin API.
**This change is irreversible!**
:::note

View File

@ -74,7 +74,7 @@ During this phase, support is limited as we focus on testing and bug fixing.
### General available
Generally available features are available to everyone and have the appropriate test coverage to be used for critical tasks.
The software will be backwards-compatible with previous versions, for exceptions we will publish a [technical advisory](https://zitadel.com/docs/support/technical_advisory).
The software will be backwards-compatible with previous versions, for exceptions we will publish a [technical advisory](/docs/support/technical_advisory).
Features in General Availability are not marked explicitly.
## Release types

View File

@ -881,6 +881,18 @@ module.exports = {
"self-hosting/manage/database/database",
"self-hosting/manage/updating_scaling",
"self-hosting/manage/usage_control",
{
type: "category",
label: "Command Line Interface",
collapsed: false,
link: {
type: "doc",
id: "self-hosting/manage/cli/overview"
},
items: [
"self-hosting/manage/cli/mirror"
],
},
],
},
],

View File

@ -41,14 +41,24 @@ func Register(ctx context.Context, config Config, view *view.View, static static
))
}
func Projections() []*handler2.Handler {
return projections
}
func Start(ctx context.Context) {
for _, projection := range projections {
projection.Start(ctx)
}
}
func Projections() []*handler2.Handler {
return projections
func ProjectInstance(ctx context.Context) error {
for _, projection := range projections {
_, err := projection.Trigger(ctx)
if err != nil {
return err
}
}
return nil
}
func (config Config) overwrite(viewModel string) handler2.Config {

View File

@ -63,6 +63,16 @@ func Projections() []*handler2.Handler {
return projections
}
func ProjectInstance(ctx context.Context) error {
for _, projection := range projections {
_, err := projection.Trigger(ctx)
if err != nil {
return err
}
}
return nil
}
func (config Config) overwrite(viewModel string) handler2.Config {
c := handler2.Config{
Client: config.Client,

View File

@ -171,7 +171,7 @@ func BytesToPrivateKey(priv []byte) (*rsa.PrivateKey, error) {
var ErrEmpty = errors.New("cannot decode, empty data")
func BytesToPublicKey(pub []byte) (*rsa.PublicKey, error) {
if pub == nil {
if len(pub) == 0 {
return nil, ErrEmpty
}
block, _ := pem.Decode(pub)

View File

@ -14,7 +14,7 @@ import (
)
func init() {
config := &Config{}
config := new(Config)
dialect.Register(config, config, true)
}
@ -49,11 +49,12 @@ func (c *Config) MatchName(name string) bool {
return false
}
func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) {
func (_ *Config) Decode(configs []interface{}) (dialect.Connector, error) {
connector := new(Config)
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: c,
Result: connector,
})
if err != nil {
return nil, err
@ -65,7 +66,7 @@ func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) {
}
}
return c, nil
return connector, nil
}
func (c *Config) Connect(useAdmin bool, pusherRatio, spoolerRatio float64, purpose dialect.DBPurpose) (*sql.DB, error) {

View File

@ -75,7 +75,7 @@ func (db *DB) QueryRow(scan func(*sql.Row) error, query string, args ...any) (er
func (db *DB) QueryRowContext(ctx context.Context, scan func(row *sql.Row) error, query string, args ...any) (err error) {
ctx, spanBeginTx := tracing.NewNamedSpan(ctx, "db.BeginTx")
tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true})
tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true, Isolation: sql.LevelReadCommitted})
spanBeginTx.EndWithError(err)
if err != nil {
return err

View File

@ -14,7 +14,7 @@ import (
)
func init() {
config := &Config{}
config := new(Config)
dialect.Register(config, config, false)
}
@ -50,11 +50,12 @@ func (c *Config) MatchName(name string) bool {
return false
}
func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) {
func (_ *Config) Decode(configs []interface{}) (dialect.Connector, error) {
connector := new(Config)
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: c,
Result: connector,
})
if err != nil {
return nil, err
@ -66,7 +67,7 @@ func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) {
}
}
return c, nil
return connector, nil
}
func (c *Config) Connect(useAdmin bool, pusherRatio, spoolerRatio float64, purpose dialect.DBPurpose) (*sql.DB, error) {

View File

@ -259,9 +259,6 @@ func (h *Handler) triggerInstances(ctx context.Context, instances []string, trig
for ; err != nil; _, err = h.Trigger(instanceCtx, triggerOpts...) {
time.Sleep(h.retryFailedAfter)
h.log().WithField("instance", instance).OnError(err).Debug("trigger failed")
if err == nil {
break
}
}
}
}

View File

@ -44,6 +44,16 @@ func Start(ctx context.Context) {
}
}
func ProjectInstance(ctx context.Context) error {
for _, projection := range projections {
_, err := projection.Trigger(ctx)
if err != nil {
return err
}
}
return nil
}
func Projections() []*handler.Handler {
return projections
}

View File

@ -181,6 +181,16 @@ func Start(ctx context.Context) {
}
}
func ProjectInstance(ctx context.Context) error {
for _, projection := range projections {
_, err := projection.Trigger(ctx)
if err != nil {
return err
}
}
return nil
}
func ApplyCustomConfig(customConfig CustomConfig) handler.Config {
return applyCustomConfig(projectionConfig, customConfig)
}

View File

@ -20,7 +20,6 @@ import (
"github.com/zitadel/zitadel/internal/query/projection"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
)
type Queries struct {
@ -46,6 +45,7 @@ type Queries struct {
func StartQueries(
ctx context.Context,
es *eventstore.Eventstore,
esV4 es_v4.Querier,
querySqlClient, projectionSqlClient *database.DB,
projections projection.Config,
defaults sd.SystemDefaults,
@ -59,7 +59,7 @@ func StartQueries(
) (repo *Queries, err error) {
repo = &Queries{
eventstore: es,
eventStoreV4: postgres.New(querySqlClient),
eventStoreV4: esV4,
client: querySqlClient,
DefaultLanguage: language.Und,
LoginTranslationFileContents: make(map[string][]byte),

View File

@ -29,14 +29,14 @@ func intentToCommands(intent *intent) (commands []*command, err error) {
}
func marshalPayload(payload any) ([]byte, error) {
if reflect.ValueOf(payload).IsZero() {
if payload == nil || reflect.ValueOf(payload).IsZero() {
return nil, nil
}
return json.Marshal(payload)
}
type command struct {
eventstore.Command
*eventstore.Command
intent *intent

View File

@ -3,7 +3,9 @@ package postgres
import (
"context"
"database/sql"
"fmt"
"github.com/cockroachdb/cockroach-go/v2/crdb"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
@ -28,40 +30,54 @@ func (s *Storage) Push(ctx context.Context, intent *eventstore.PushIntent) (err
}()
}
// allows smaller wait times on query side for instances which are not actively writing
if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil {
return err
}
var retryCount uint32
return crdb.Execute(func() (err error) {
defer func() {
if err == nil {
return
}
if retryCount < s.config.MaxRetries {
retryCount++
return
}
logging.WithFields("retry_count", retryCount).WithError(err).Debug("max retry count reached")
err = zerrors.ThrowInternal(err, "POSTG-VJfJz", "Errors.Internal")
}()
// allows smaller wait times on query side for instances which are not actively writing
if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil {
return err
}
intents, err := lockAggregates(ctx, tx, intent)
if err != nil {
return err
}
if !checkSequences(intents) {
return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched")
}
commands := make([]*command, 0, len(intents))
for _, intent := range intents {
additionalCommands, err := intentToCommands(intent)
intents, err := lockAggregates(ctx, tx, intent)
if err != nil {
return err
}
commands = append(commands, additionalCommands...)
}
err = uniqueConstraints(ctx, tx, commands)
if err != nil {
return err
}
if !checkSequences(intents) {
return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched")
}
return push(ctx, tx, intent, commands)
commands := make([]*command, 0, len(intents))
for _, intent := range intents {
additionalCommands, err := intentToCommands(intent)
if err != nil {
return err
}
commands = append(commands, additionalCommands...)
}
err = uniqueConstraints(ctx, tx, commands)
if err != nil {
return err
}
return push(ctx, tx, intent, commands)
})
}
// setAppName for the the current transaction
func setAppName(ctx context.Context, tx *sql.Tx, name string) error {
_, err := tx.ExecContext(ctx, "SET LOCAL application_name TO $1", name)
_, err := tx.ExecContext(ctx, fmt.Sprintf("SET LOCAL application_name TO '%s'", name))
if err != nil {
logging.WithFields("name", name).WithError(err).Debug("setting app name failed")
return zerrors.ThrowInternal(err, "POSTG-G3OmZ", "Errors.Internal")
@ -154,7 +170,8 @@ func push(ctx context.Context, tx *sql.Tx, reducer eventstore.Reducer, commands
cmd.sequence,
cmd.position.InPositionOrder,
)
stmt.WriteString(", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp())")
stmt.WriteString(pushPositionStmt)
stmt.WriteString(`)`)
}
stmt.WriteString(` RETURNING created_at, "position"`)

View File

@ -36,7 +36,9 @@ func Test_uniqueConstraints(t *testing.T) {
name: "command without constraints",
args: args{
commands: []*command{
{},
{
Command: &eventstore.Command{},
},
},
expectations: []mock.Expectation{},
},
@ -53,7 +55,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id", "error"),
},
@ -81,7 +83,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddGlobalUniqueConstraint("test", "id", "error"),
},
@ -109,7 +111,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id", "error"),
eventstore.NewAddEventUniqueConstraint("test", "id2", "error"),
@ -143,7 +145,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id", "error"),
},
@ -156,7 +158,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id2", "error"),
},
@ -189,7 +191,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveInstanceUniqueConstraints(),
},
@ -217,7 +219,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveInstanceUniqueConstraints(),
},
@ -230,7 +232,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveInstanceUniqueConstraints(),
},
@ -263,7 +265,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveUniqueConstraint("test", "id"),
},
@ -291,7 +293,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveGlobalUniqueConstraint("test", "id"),
},
@ -319,7 +321,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveUniqueConstraint("test", "id"),
eventstore.NewRemoveUniqueConstraint("test", "id2"),
@ -353,7 +355,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveUniqueConstraint("test", "id"),
},
@ -366,7 +368,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewRemoveUniqueConstraint("test", "id2"),
},
@ -399,7 +401,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id", ""),
},
@ -433,7 +435,7 @@ func Test_uniqueConstraints(t *testing.T) {
eventstore.AppendAggregate("", "", ""),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
UniqueConstraints: []*eventstore.UniqueConstraint{
eventstore.NewAddEventUniqueConstraint("test", "id", "My.Error"),
},
@ -786,7 +788,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -841,7 +843,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -857,7 +859,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -926,7 +928,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -942,7 +944,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "type2", "id2"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1011,7 +1013,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1067,7 +1069,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1123,7 +1125,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1139,7 +1141,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1214,7 +1216,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1230,7 +1232,7 @@ func Test_push(t *testing.T) {
eventstore.AppendAggregate("owner", "testType", "testID"),
).Aggregates()[0],
},
Command: eventstore.Command{
Command: &eventstore.Command{
Action: eventstore.Action[any]{
Creator: "gigi",
Revision: 1,
@ -1286,6 +1288,7 @@ func Test_push(t *testing.T) {
},
},
}
initPushStmt("postgres")
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dbMock := mock.NewSQLMock(t, append([]mock.Expectation{mock.ExpectBegin(nil)}, tt.args.expectations...)...)

View File

@ -194,6 +194,7 @@ func writeAggregateFilters(stmt *database.Statement, filters []*eventstore.Aggre
func writeAggregateFilter(stmt *database.Statement, filter *eventstore.AggregateFilter) {
conditions := definedConditions([]*condition{
{column: "owner", condition: filter.Owners()},
{column: "aggregate_type", condition: filter.Type()},
{column: "aggregate_id", condition: filter.IDs()},
})

View File

@ -3,6 +3,8 @@ package postgres
import (
"context"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
@ -10,15 +12,35 @@ import (
var (
_ eventstore.Pusher = (*Storage)(nil)
_ eventstore.Querier = (*Storage)(nil)
pushPositionStmt string
)
type Storage struct {
client *database.DB
config *Config
}
func New(client *database.DB) *Storage {
type Config struct {
MaxRetries uint32
}
func New(client *database.DB, config *Config) *Storage {
initPushStmt(client.Type())
return &Storage{
client: client,
config: config,
}
}
func initPushStmt(typ string) {
switch typ {
case "cockroach":
pushPositionStmt = ", hlc_to_timestamp(cluster_logical_timestamp()), cluster_logical_timestamp()"
case "postgres":
pushPositionStmt = ", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp())"
default:
logging.WithFields("database_type", typ).Panic("position statement for type not implemented")
}
}

View File

@ -87,7 +87,7 @@ type PushAggregate struct {
// owner of the aggregate
owner string
// Commands is an ordered list of changes on the aggregate
commands []Command
commands []*Command
// CurrentSequence checks the current state of the aggregate.
// The following types match the current sequence of the aggregate as described:
// * nil or [SequenceIgnore]: Not relevant to add the commands
@ -122,7 +122,7 @@ func (pa *PushAggregate) Owner() string {
return pa.owner
}
func (pa *PushAggregate) Commands() []Command {
func (pa *PushAggregate) Commands() []*Command {
return pa.commands
}
@ -165,7 +165,7 @@ func CurrentSequenceAtLeast(sequence uint32) PushAggregateOpt {
}
}
func AppendCommands(commands ...Command) PushAggregateOpt {
func AppendCommands(commands ...*Command) PushAggregateOpt {
return func(pa *PushAggregate) {
pa.commands = append(pa.commands, commands...)
}

View File

@ -255,6 +255,7 @@ func NewAggregateFilter(typ string, opts ...AggregateFilterOpt) *AggregateFilter
type AggregateFilter struct {
typ string
ids []string
owners *filter[[]string]
events []*EventFilter
}
@ -273,6 +274,13 @@ func (f *AggregateFilter) IDs() database.Condition {
return database.NewListContains(f.ids...)
}
func (f *AggregateFilter) Owners() database.Condition {
if f.owners == nil {
return nil
}
return f.owners.condition
}
func (f *AggregateFilter) Events() []*EventFilter {
return f.events
}
@ -298,6 +306,61 @@ func AggregateIDs(ids ...string) AggregateFilterOpt {
}
}
func AggregateOwnersEqual(owners ...string) AggregateFilterOpt {
return func(f *AggregateFilter) {
var cond database.Condition
switch len(owners) {
case 0:
return
case 1:
cond = database.NewTextEqual(owners[0])
default:
cond = database.NewListEquals(owners...)
}
f.owners = &filter[[]string]{
condition: cond,
value: &owners,
}
}
}
func AggregateOwnersContains(owners ...string) AggregateFilterOpt {
return func(f *AggregateFilter) {
var cond database.Condition
switch len(owners) {
case 0:
return
case 1:
cond = database.NewTextEqual(owners[0])
default:
cond = database.NewListContains(owners...)
}
f.owners = &filter[[]string]{
condition: cond,
value: &owners,
}
}
}
func AggregateOwnersNotContains(owners ...string) AggregateFilterOpt {
return func(f *AggregateFilter) {
var cond database.Condition
switch len(owners) {
case 0:
return
case 1:
cond = database.NewTextUnequal(owners[0])
default:
cond = database.NewListNotContains(owners...)
}
f.owners = &filter[[]string]{
condition: cond,
value: &owners,
}
}
}
func AppendEvent(opts ...EventFilterOpt) AggregateFilterOpt {
return AppendEvents(NewEventFilter(opts...))
}

View File

@ -0,0 +1,15 @@
package projection
import (
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
type HighestPosition eventstore.GlobalPosition
var _ eventstore.Reducer = (*HighestPosition)(nil)
// Reduce implements eventstore.Reducer.
func (h *HighestPosition) Reduce(events ...*eventstore.StorageEvent) error {
*h = HighestPosition(events[len(events)-1].Position)
return nil
}

View File

@ -0,0 +1,72 @@
package readmodel
import (
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/v2/system"
"github.com/zitadel/zitadel/internal/v2/system/mirror"
)
type LastSuccessfulMirror struct {
ID string
Position float64
source string
}
func NewLastSuccessfulMirror(source string) *LastSuccessfulMirror {
return &LastSuccessfulMirror{
source: source,
}
}
var _ eventstore.Reducer = (*LastSuccessfulMirror)(nil)
func (p *LastSuccessfulMirror) Filter() *eventstore.Filter {
return eventstore.NewFilter(
eventstore.AppendAggregateFilter(
system.AggregateType,
eventstore.AggregateOwnersEqual(system.AggregateOwner),
eventstore.AppendEvent(
eventstore.SetEventTypes(
mirror.SucceededType,
),
eventstore.EventCreatorsEqual(mirror.Creator),
),
),
eventstore.FilterPagination(
eventstore.Descending(),
),
)
}
// Reduce implements eventstore.Reducer.
func (h *LastSuccessfulMirror) Reduce(events ...*eventstore.StorageEvent) (err error) {
for _, event := range events {
if event.Type == mirror.SucceededType {
err = h.reduceSucceeded(event)
}
if err != nil {
return err
}
}
return nil
}
func (h *LastSuccessfulMirror) reduceSucceeded(event *eventstore.StorageEvent) error {
// if position is set we skip all older events
if h.Position > 0 {
return nil
}
succeededEvent, err := mirror.SucceededEventFromStorage(event)
if err != nil {
return err
}
if h.source != succeededEvent.Payload.Source {
return nil
}
h.Position = succeededEvent.Payload.Position
return nil
}

View File

@ -0,0 +1,8 @@
package system
const (
AggregateType = "system"
AggregateOwner = "SYSTEM"
AggregateInstance = ""
EventTypePrefix = AggregateType + "."
)

View File

@ -0,0 +1,8 @@
package mirror
import "github.com/zitadel/zitadel/internal/v2/system"
const (
Creator = "MIRROR"
eventTypePrefix = system.EventTypePrefix + "mirror."
)

View File

@ -0,0 +1,52 @@
package mirror
import (
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
type failedPayload struct {
Cause string `json:"cause"`
// Source is the name of the database data are mirrored to
Source string `json:"source"`
}
const FailedType = eventTypePrefix + "failed"
type FailedEvent eventstore.Event[failedPayload]
var _ eventstore.TypeChecker = (*FailedEvent)(nil)
func (e *FailedEvent) ActionType() string {
return FailedType
}
func FailedEventFromStorage(event *eventstore.StorageEvent) (e *FailedEvent, _ error) {
if event.Type != e.ActionType() {
return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-bwB9l", "Errors.Invalid.Event.Type")
}
payload, err := eventstore.UnmarshalPayload[failedPayload](event.Payload)
if err != nil {
return nil, err
}
return &FailedEvent{
StorageEvent: event,
Payload: payload,
}, nil
}
func NewFailedCommand(source string, cause error) *eventstore.Command {
return &eventstore.Command{
Action: eventstore.Action[any]{
Creator: Creator,
Type: FailedType,
Payload: failedPayload{
Cause: cause.Error(),
Source: source,
},
Revision: 1,
},
}
}

View File

@ -0,0 +1,68 @@
package mirror
import (
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
type startedPayload struct {
// Destination is the name of the database data are mirrored to
Destination string `json:"destination"`
// Either Instances or System needs to be set
Instances []string `json:"instances,omitempty"`
System bool `json:"system,omitempty"`
}
const StartedType = eventTypePrefix + "started"
type StartedEvent eventstore.Event[startedPayload]
var _ eventstore.TypeChecker = (*StartedEvent)(nil)
func (e *StartedEvent) ActionType() string {
return StartedType
}
func StartedEventFromStorage(event *eventstore.StorageEvent) (e *StartedEvent, _ error) {
if event.Type != e.ActionType() {
return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-bwB9l", "Errors.Invalid.Event.Type")
}
payload, err := eventstore.UnmarshalPayload[startedPayload](event.Payload)
if err != nil {
return nil, err
}
return &StartedEvent{
StorageEvent: event,
Payload: payload,
}, nil
}
func NewStartedSystemCommand(destination string) *eventstore.Command {
return newStartedCommand(&startedPayload{
Destination: destination,
System: true,
})
}
func NewStartedInstancesCommand(destination string, instances []string) (*eventstore.Command, error) {
if len(instances) == 0 {
return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-8YkrE", "Errors.Mirror.NoInstances")
}
return newStartedCommand(&startedPayload{
Destination: destination,
Instances: instances,
}), nil
}
func newStartedCommand(payload *startedPayload) *eventstore.Command {
return &eventstore.Command{
Action: eventstore.Action[any]{
Creator: Creator,
Type: StartedType,
Revision: 1,
Payload: *payload,
},
}
}

View File

@ -0,0 +1,53 @@
package mirror
import (
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
type succeededPayload struct {
// Source is the name of the database data are mirrored from
Source string `json:"source"`
// Position until data will be mirrored
Position float64 `json:"position"`
}
const SucceededType = eventTypePrefix + "succeeded"
type SucceededEvent eventstore.Event[succeededPayload]
var _ eventstore.TypeChecker = (*SucceededEvent)(nil)
func (e *SucceededEvent) ActionType() string {
return SucceededType
}
func SucceededEventFromStorage(event *eventstore.StorageEvent) (e *SucceededEvent, _ error) {
if event.Type != e.ActionType() {
return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-xh5IW", "Errors.Invalid.Event.Type")
}
payload, err := eventstore.UnmarshalPayload[succeededPayload](event.Payload)
if err != nil {
return nil, err
}
return &SucceededEvent{
StorageEvent: event,
Payload: payload,
}, nil
}
func NewSucceededCommand(source string, position float64) *eventstore.Command {
return &eventstore.Command{
Action: eventstore.Action[any]{
Creator: Creator,
Type: SucceededType,
Revision: 1,
Payload: succeededPayload{
Source: source,
Position: position,
},
},
}
}