mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-12 17:47:32 +00:00
chore: move the go code into a subfolder
This commit is contained in:
95
apps/api/cmd/mirror/auth.go
Normal file
95
apps/api/cmd/mirror/auth.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func authCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "auth",
|
||||
Short: "mirrors the auth requests table from one database to another",
|
||||
Long: `mirrors the auth requests table from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Only auth requests are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyAuth(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete auth requests of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyAuth(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAuthRequests(ctx, sourceClient, destClient, config.MaxAuthRequestAge)
|
||||
}
|
||||
|
||||
func copyAuthRequests(ctx context.Context, source, dest *database.DB, maxAuthRequestAge time.Duration) {
|
||||
start := time.Now()
|
||||
|
||||
logging.Info("creating index on auth.auth_requests.change_date to speed up copy in source database")
|
||||
_, err := source.ExecContext(ctx, "CREATE INDEX CONCURRENTLY IF NOT EXISTS auth_requests_change_date ON auth.auth_requests (change_date)")
|
||||
logging.OnError(err).Fatal("unable to create index on auth.auth_requests.change_date")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+" AND change_date > NOW() - INTERVAL '"+strconv.FormatFloat(maxAuthRequestAge.Seconds(), 'f', -1, 64)+" seconds') TO STDOUT")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var affected int64
|
||||
err = destConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM auth.auth_requests "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY auth.auth_requests FROM STDIN")
|
||||
affected = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy auth requests to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy auth requests from source")
|
||||
logging.WithFields("took", time.Since(start), "count", affected).Info("auth requests migrated")
|
||||
}
|
87
apps/api/cmd/mirror/config.go
Normal file
87
apps/api/cmd/mirror/config.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/hooks"
|
||||
"github.com/zitadel/zitadel/internal/actions"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
metrics "github.com/zitadel/zitadel/internal/telemetry/metrics/config"
|
||||
)
|
||||
|
||||
type Migration struct {
|
||||
Source database.Config
|
||||
Destination database.Config
|
||||
|
||||
EventBulkSize uint32
|
||||
MaxAuthRequestAge time.Duration
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
Metrics metrics.Config
|
||||
}
|
||||
|
||||
var (
|
||||
//go:embed defaults.yaml
|
||||
defaultConfig []byte
|
||||
)
|
||||
|
||||
func mustNewMigrationConfig(v *viper.Viper) *Migration {
|
||||
config := new(Migration)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
err = config.Metrics.NewMeter()
|
||||
logging.OnError(err).Fatal("unable to set meter")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewProjectionsConfig(v *viper.Viper) *ProjectionsConfig {
|
||||
config := new(ProjectionsConfig)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewConfig(v *viper.Viper, config any) {
|
||||
err := v.Unmarshal(config,
|
||||
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
||||
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
|
||||
hooks.SliceTypeStringDecode[*command.SetQuota],
|
||||
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
|
||||
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
|
||||
hooks.MapTypeStringDecode[domain.Feature, any],
|
||||
hooks.MapHTTPHeaderStringDecode,
|
||||
hook.Base64ToBytesHookFunc(),
|
||||
hook.TagToLanguageHookFunc(),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
mapstructure.StringToSliceHookFunc(","),
|
||||
database.DecodeHook(true),
|
||||
actions.HTTPConfigDecodeHook,
|
||||
hook.EnumHookFunc(internal_authz.MemberTypeString),
|
||||
mapstructure.TextUnmarshallerHookFunc(),
|
||||
)),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
}
|
94
apps/api/cmd/mirror/defaults.yaml
Normal file
94
apps/api/cmd/mirror/defaults.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
Source:
|
||||
cockroach:
|
||||
Host: localhost # ZITADEL_SOURCE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_SOURCE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_SOURCE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_SOURCE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_SOURCE_COCKROACH_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_SOURCE_COCKROACH_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_SOURCE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_SOURCE_COCKROACH_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_SOURCE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_KEY
|
||||
# Postgres is used as soon as a value is set
|
||||
# The values describe the possible fields to set values
|
||||
postgres:
|
||||
Host: # ZITADEL_SOURCE_POSTGRES_HOST
|
||||
Port: # ZITADEL_SOURCE_POSTGRES_PORT
|
||||
Database: # ZITADEL_SOURCE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_SOURCE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_SOURCE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_SOURCE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_SOURCE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_SOURCE_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: # ZITADEL_SOURCE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_SOURCE_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: # ZITADEL_SOURCE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_SOURCE_POSTGRES_USER_SSL_KEY
|
||||
|
||||
Destination:
|
||||
postgres:
|
||||
Host: localhost # ZITADEL_DESTINATION_POSTGRES_HOST
|
||||
Port: 5432 # ZITADEL_DESTINATION_POSTGRES_PORT
|
||||
Database: zitadel # ZITADEL_DESTINATION_POSTGRES_DATABASE
|
||||
MaxOpenConns: 5 # ZITADEL_DESTINATION_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: 2 # ZITADEL_DESTINATION_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DESTINATION_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DESTINATION_POSTGRES_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DESTINATION_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DESTINATION_POSTGRES_USER_USERNAME
|
||||
Password: "" # ZITADEL_DESTINATION_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DESTINATION_POSTGRES_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_KEY
|
||||
|
||||
EventBulkSize: 10000 # ZITADEL_EVENTBULKSIZE
|
||||
# The maximum duration an auth request was last updated before it gets ignored.
|
||||
# Default is 30 days
|
||||
MaxAuthRequestAge: 720h # ZITADEL_MAXAUTHREQUESTAGE
|
||||
|
||||
Projections:
|
||||
# The maximum duration a transaction remains open
|
||||
# before it spots left folding additional events
|
||||
# and updates the table.
|
||||
TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
|
||||
# turn off scheduler during operation
|
||||
RequeueEvery: 0s
|
||||
ConcurrentInstances: 7 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES
|
||||
EventBulkLimit: 1000 # ZITADEL_PROJECTIONS_EVENTBULKLIMIT
|
||||
Customizations:
|
||||
notifications:
|
||||
MaxFailureCount: 1
|
||||
|
||||
Eventstore:
|
||||
MaxRetries: 3 # ZITADEL_EVENTSTORE_MAXRETRIES
|
||||
|
||||
Auth:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
Admin:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 10 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
FirstInstance:
|
||||
# We only need to create an empty zitadel database so this step must be skipped
|
||||
Skip: true
|
||||
|
||||
Log:
|
||||
Level: info
|
64
apps/api/cmd/mirror/event.go
Normal file
64
apps/api/cmd/mirror/event.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/readmodel"
|
||||
"github.com/zitadel/zitadel/internal/v2/system"
|
||||
mirror_event "github.com/zitadel/zitadel/internal/v2/system/mirror"
|
||||
)
|
||||
|
||||
func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore.EventStore, source string) (*readmodel.LastSuccessfulMirror, error) {
|
||||
lastSuccess := readmodel.NewLastSuccessfulMirror(source)
|
||||
if shouldIgnorePrevious {
|
||||
return lastSuccess, nil
|
||||
}
|
||||
_, err := destinationES.Query(
|
||||
ctx,
|
||||
eventstore.NewQuery(
|
||||
system.AggregateInstance,
|
||||
lastSuccess,
|
||||
eventstore.SetFilters(lastSuccess.Filter()),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lastSuccess, nil
|
||||
}
|
||||
|
||||
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position decimal.Decimal) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewSucceededCommand(source, position)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func writeMigrationFailed(ctx context.Context, destinationES *eventstore.EventStore, id, source string, err error) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewFailedCommand(source, err)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
264
apps/api/cmd/mirror/event_store.go
Normal file
264
apps/api/cmd/mirror/event_store.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/shopspring/decimal"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
db "github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/v2/database"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
var shouldIgnorePrevious bool
|
||||
|
||||
func eventstoreCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "eventstore",
|
||||
Short: "mirrors the eventstore of an instance from one database to another",
|
||||
Long: `mirrors the eventstore of an instance from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Migrate only copies events2 and unique constraints`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyEventstore(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete unique constraints of defined instances before copy")
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyEventstore(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := db.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := db.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyEvents(ctx, sourceClient, destClient, config.EventBulkSize)
|
||||
copyUniqueConstraints(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func positionQuery(db *db.DB) string {
|
||||
switch db.Type() {
|
||||
case dialect.DatabaseTypePostgres:
|
||||
return "SELECT EXTRACT(EPOCH FROM clock_timestamp())"
|
||||
case dialect.DatabaseTypeCockroach:
|
||||
return "SELECT cluster_logical_timestamp()"
|
||||
default:
|
||||
logging.WithFields("db_type", db.Type()).Fatal("database type not recognized")
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
logging.Info("starting to copy events")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
migrationID, err := id.SonyFlakeGenerator().Next()
|
||||
logging.OnError(err).Fatal("unable to generate migration id")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
destinationES := eventstore.NewEventstoreFromOne(postgres.New(dest, &postgres.Config{
|
||||
MaxRetries: 3,
|
||||
}))
|
||||
|
||||
previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName())
|
||||
logging.OnError(err).Fatal("unable to query latest successful migration")
|
||||
|
||||
var maxPosition decimal.Decimal
|
||||
err = source.QueryRowContext(ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&maxPosition)
|
||||
},
|
||||
"SELECT MAX(position) FROM eventstore.events2 "+instanceClause(),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to query max position from source")
|
||||
|
||||
logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration")
|
||||
|
||||
nextPos := make(chan bool, 1)
|
||||
pos := make(chan decimal.Decimal, 1)
|
||||
errs := make(chan error, 3)
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
nextPos <- true
|
||||
var i uint32
|
||||
for position := range pos {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, aggregate_type, aggregate_id, event_type, sequence, revision, created_at, regexp_replace(payload::TEXT, '\\\\u0000', '', 'g')::JSON payload, creator, owner, ")
|
||||
stmt.WriteArg(position)
|
||||
stmt.WriteString(" position, row_number() OVER (PARTITION BY instance_id ORDER BY position, in_tx_order) AS in_tx_order FROM eventstore.events2 ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberAtMost(maxPosition).Write(&stmt, "position")
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberGreater(previousMigration.Position).Write(&stmt, "position")
|
||||
stmt.WriteString(" ORDER BY instance_id, position, in_tx_order")
|
||||
stmt.WriteString(" LIMIT ")
|
||||
stmt.WriteArg(bulkSize)
|
||||
stmt.WriteString(" OFFSET ")
|
||||
stmt.WriteArg(bulkSize * i)
|
||||
stmt.WriteString(") TO STDOUT")
|
||||
|
||||
// Copy does not allow args so we use we replace the args in the statement
|
||||
tag, err := conn.PgConn().CopyTo(ctx, writer, stmt.Debug())
|
||||
if err != nil {
|
||||
return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i)
|
||||
}
|
||||
logging.WithFields("batch_count", i).Info("batch of events copied")
|
||||
|
||||
if tag.RowsAffected() < int64(bulkSize) {
|
||||
logging.WithFields("batch_count", i).Info("last batch of events copied")
|
||||
return nil
|
||||
}
|
||||
|
||||
nextPos <- true
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
writer.Close()
|
||||
close(nextPos)
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
// generate next position for
|
||||
go func() {
|
||||
defer close(pos)
|
||||
for range nextPos {
|
||||
var position decimal.Decimal
|
||||
err := dest.QueryRowContext(
|
||||
ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&position)
|
||||
},
|
||||
positionQuery(dest),
|
||||
)
|
||||
if err != nil {
|
||||
errs <- zerrors.ThrowUnknown(err, "MIGRA-kMyPH", "unable to query next position")
|
||||
return
|
||||
}
|
||||
pos <- position
|
||||
}
|
||||
}()
|
||||
|
||||
var eventCount int64
|
||||
errs <- destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN")
|
||||
eventCount = tag.RowsAffected()
|
||||
if err != nil {
|
||||
pgErr := new(pgconn.PgError)
|
||||
errors.As(err, &pgErr)
|
||||
|
||||
logging.WithError(err).WithField("pg_err_details", pgErr.Detail).Error("unable to copy events into destination")
|
||||
return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
close(errs)
|
||||
writeCopyEventsDone(ctx, destinationES, migrationID, source.DatabaseName(), maxPosition, errs)
|
||||
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated")
|
||||
}
|
||||
|
||||
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position decimal.Decimal, errs <-chan error) {
|
||||
joinedErrs := make([]error, 0, len(errs))
|
||||
for err := range errs {
|
||||
joinedErrs = append(joinedErrs, err)
|
||||
}
|
||||
err := errors.Join(joinedErrs...)
|
||||
|
||||
if err != nil {
|
||||
logging.WithError(err).Error("unable to mirror events")
|
||||
err := writeMigrationFailed(ctx, es, id, source, err)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
return
|
||||
}
|
||||
|
||||
err = writeMigrationSucceeded(ctx, es, id, source, position)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
}
|
||||
|
||||
func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) {
|
||||
logging.Info("starting to copy unique constraints")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, unique_type, unique_field FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(") TO stdout")
|
||||
|
||||
_, err := conn.PgConn().CopyTo(ctx, writer, stmt.String())
|
||||
writer.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
var eventCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("DELETE FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
|
||||
_, err := conn.Exec(ctx, stmt.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.unique_constraints FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy unique constraints to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy unique constraints from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("unique constraints migrated")
|
||||
}
|
99
apps/api/cmd/mirror/mirror.go
Normal file
99
apps/api/cmd/mirror/mirror.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
)
|
||||
|
||||
var (
|
||||
instanceIDs []string
|
||||
isSystem bool
|
||||
shouldReplace bool
|
||||
)
|
||||
|
||||
func New(configFiles *[]string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "mirror",
|
||||
Short: "mirrors all data of ZITADEL from one database to another",
|
||||
Long: `mirrors all data of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized and set up with --for-mirror
|
||||
|
||||
The command does mirror all data needed and recomputes the projections.
|
||||
For more details call the help functions of the sub commands.
|
||||
|
||||
Order of execution:
|
||||
1. mirror system tables
|
||||
2. mirror auth tables
|
||||
3. mirror event store tables
|
||||
4. recompute projections
|
||||
5. verify`,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.MergeConfig(bytes.NewBuffer(defaultConfig))
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
|
||||
for _, file := range *configFiles {
|
||||
viper.SetConfigFile(file)
|
||||
err := viper.MergeInConfig()
|
||||
logging.WithFields("file", file).OnError(err).Warn("unable to read config file")
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
projectionConfig := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
copySystem(cmd.Context(), config)
|
||||
copyAuth(cmd.Context(), config)
|
||||
copyEventstore(cmd.Context(), config)
|
||||
|
||||
projections(cmd.Context(), projectionConfig, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
mirrorFlags(cmd)
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, `replaces all data of the following tables for the provided instances or all if the "--system"-flag is set:
|
||||
* system.assets
|
||||
* auth.auth_requests
|
||||
* eventstore.unique_constraints
|
||||
The flag should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.`)
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
cmd.AddCommand(
|
||||
eventstoreCmd(),
|
||||
systemCmd(),
|
||||
projectionsCmd(),
|
||||
authCmd(),
|
||||
verifyCmd(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func mirrorFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringSliceVar(&instanceIDs, "instance", nil, "id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.PersistentFlags().BoolVar(&isSystem, "system", false, "migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.MarkFlagsOneRequired("system", "instance")
|
||||
cmd.MarkFlagsMutuallyExclusive("system", "instance")
|
||||
}
|
||||
|
||||
func instanceClause() string {
|
||||
if isSystem {
|
||||
return "WHERE instance_id <> ''"
|
||||
}
|
||||
for i := range instanceIDs {
|
||||
instanceIDs[i] = "'" + instanceIDs[i] + "'"
|
||||
}
|
||||
|
||||
// COPY does not allow parameters so we need to set them directly
|
||||
return "WHERE instance_id IN (" + strings.Join(instanceIDs, ", ") + ")"
|
||||
}
|
351
apps/api/cmd/mirror/projections.go
Normal file
351
apps/api/cmd/mirror/projections.go
Normal file
@@ -0,0 +1,351 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/encryption"
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
"github.com/zitadel/zitadel/cmd/tls"
|
||||
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
|
||||
admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
|
||||
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/api/oidc"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
|
||||
auth_handler "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/handler"
|
||||
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
|
||||
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
|
||||
"github.com/zitadel/zitadel/internal/i18n"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/notification"
|
||||
"github.com/zitadel/zitadel/internal/notification/handlers"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
static_config "github.com/zitadel/zitadel/internal/static/config"
|
||||
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/webauthn"
|
||||
)
|
||||
|
||||
func projectionsCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "projections",
|
||||
Short: "calls the projections synchronously",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
projections(cmd.Context(), config, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ProjectionsConfig struct {
|
||||
Destination database.Config
|
||||
Projections projection.Config
|
||||
Notifications handlers.WorkerConfig
|
||||
EncryptionKeys *encryption.EncryptionKeyConfig
|
||||
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
|
||||
Eventstore *eventstore.Config
|
||||
Caches *connector.CachesConfig
|
||||
|
||||
Admin admin_es.Config
|
||||
Auth auth_es.Config
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
|
||||
ExternalPort uint16
|
||||
ExternalDomain string
|
||||
ExternalSecure bool
|
||||
InternalAuthZ internal_authz.Config
|
||||
SystemAuthZ internal_authz.Config
|
||||
SystemDefaults systemdefaults.SystemDefaults
|
||||
Telemetry *handlers.TelemetryPusherConfig
|
||||
Login login.Config
|
||||
OIDC oidc.Config
|
||||
WebAuthNName string
|
||||
DefaultInstance command.InstanceSetup
|
||||
AssetStorage static_config.AssetStorageConfig
|
||||
}
|
||||
|
||||
func migrateProjectionsFlags(cmd *cobra.Command) {
|
||||
key.AddMasterKeyFlag(cmd)
|
||||
tls.AddTLSModeFlag(cmd)
|
||||
}
|
||||
|
||||
func projections(
|
||||
ctx context.Context,
|
||||
config *ProjectionsConfig,
|
||||
masterKey string,
|
||||
) {
|
||||
logging.Info("starting to fill projections")
|
||||
start := time.Now()
|
||||
|
||||
client, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to database")
|
||||
|
||||
keyStorage, err := crypto_db.NewKeyStorage(client, masterKey)
|
||||
logging.OnError(err).Fatal("cannot start key storage")
|
||||
|
||||
keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage)
|
||||
logging.OnError(err).Fatal("unable to read encryption keys")
|
||||
|
||||
staticStorage, err := config.AssetStorage.NewStorage(client.DB)
|
||||
logging.OnError(err).Fatal("unable create static storage")
|
||||
|
||||
newEventstore := new_es.NewEventstore(client)
|
||||
config.Eventstore.Querier = old_es.NewPostgres(client)
|
||||
config.Eventstore.Pusher = newEventstore
|
||||
config.Eventstore.Searcher = newEventstore
|
||||
|
||||
es := eventstore.NewEventstore(config.Eventstore)
|
||||
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{
|
||||
MaxRetries: config.Eventstore.MaxRetries,
|
||||
}))
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
cacheConnectors, err := connector.StartConnectors(config.Caches, client)
|
||||
logging.OnError(err).Fatal("unable to start caches")
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
es,
|
||||
esV4.Querier,
|
||||
client,
|
||||
client,
|
||||
cacheConnectors,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
keys.Target,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
sessionTokenVerifier,
|
||||
func(q *query.Queries) domain.PermissionCheck {
|
||||
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
}
|
||||
},
|
||||
0,
|
||||
config.SystemAPIUsers,
|
||||
false,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start queries")
|
||||
|
||||
authZRepo, err := authz.Start(queries, es, client, keys.OIDC, config.ExternalSecure)
|
||||
logging.OnError(err).Fatal("unable to start authz repo")
|
||||
|
||||
webAuthNConfig := &webauthn.Config{
|
||||
DisplayName: config.WebAuthNName,
|
||||
ExternalSecure: config.ExternalSecure,
|
||||
}
|
||||
commands, err := command.StartCommands(ctx,
|
||||
es,
|
||||
cacheConnectors,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
staticStorage,
|
||||
webAuthNConfig,
|
||||
config.ExternalDomain,
|
||||
config.ExternalSecure,
|
||||
config.ExternalPort,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.User,
|
||||
keys.DomainVerification,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
keys.Target,
|
||||
&http.Client{},
|
||||
func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, authZRepo, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
},
|
||||
sessionTokenVerifier,
|
||||
config.OIDC.DefaultAccessTokenLifetime,
|
||||
config.OIDC.DefaultRefreshTokenExpiration,
|
||||
config.OIDC.DefaultRefreshTokenIdleExpiration,
|
||||
config.DefaultInstance.SecretGenerators,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start commands")
|
||||
|
||||
err = projection.Create(ctx, client, es, config.Projections, keys.OIDC, keys.SAML, config.SystemAPIUsers)
|
||||
logging.OnError(err).Fatal("unable to start projections")
|
||||
|
||||
i18n.MustLoadSupportedLanguagesFromDir()
|
||||
|
||||
notification.Register(
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["backchannel"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
config.Notifications,
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
config.ExternalPort,
|
||||
config.ExternalSecure,
|
||||
commands,
|
||||
queries,
|
||||
es,
|
||||
config.Login.DefaultOTPEmailURLV2,
|
||||
config.SystemDefaults.Notifications.FileSystemPath,
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.OIDC,
|
||||
config.OIDC.DefaultBackChannelLogoutLifetime,
|
||||
nil,
|
||||
)
|
||||
|
||||
config.Auth.Spooler.Client = client
|
||||
config.Auth.Spooler.Eventstore = es
|
||||
authView, err := auth_view.StartView(config.Auth.Spooler.Client, keys.OIDC, queries, config.Auth.Spooler.Eventstore)
|
||||
logging.OnError(err).Fatal("unable to start auth view")
|
||||
auth_handler.Register(ctx, config.Auth.Spooler, authView, queries)
|
||||
|
||||
config.Admin.Spooler.Client = client
|
||||
config.Admin.Spooler.Eventstore = es
|
||||
adminView, err := admin_view.StartView(config.Admin.Spooler.Client)
|
||||
logging.OnError(err).Fatal("unable to start admin view")
|
||||
|
||||
admin_handler.Register(ctx, config.Admin.Spooler, adminView, staticStorage)
|
||||
|
||||
instances := make(chan string, config.Projections.ConcurrentInstances)
|
||||
failedInstances := make(chan string)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(int(config.Projections.ConcurrentInstances))
|
||||
|
||||
go func() {
|
||||
for instance := range failedInstances {
|
||||
logging.WithFields("instance", instance).Error("projection failed")
|
||||
}
|
||||
}()
|
||||
|
||||
for range int(config.Projections.ConcurrentInstances) {
|
||||
go execProjections(ctx, instances, failedInstances, &wg)
|
||||
}
|
||||
|
||||
existingInstances := queryInstanceIDs(ctx, client)
|
||||
for i, instance := range existingInstances {
|
||||
instances <- instance
|
||||
logging.WithFields("id", instance, "index", fmt.Sprintf("%d/%d", i, len(existingInstances))).Info("instance queued for projection")
|
||||
}
|
||||
close(instances)
|
||||
wg.Wait()
|
||||
|
||||
close(failedInstances)
|
||||
|
||||
logging.WithFields("took", time.Since(start)).Info("projections executed")
|
||||
}
|
||||
|
||||
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
|
||||
for instance := range instances {
|
||||
logging.WithFields("instance", instance).Info("starting projections")
|
||||
ctx = internal_authz.WithInstanceID(ctx, instance)
|
||||
|
||||
err := projection.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = projection.ProjectInstanceFields(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = admin_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger admin handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = projection.ProjectInstanceFields(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = auth_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger auth handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = notification.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger notification failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
logging.WithFields("instance", instance).Info("projections done")
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// queryInstanceIDs returns the instance configured by flag
|
||||
// or all instances which are not removed
|
||||
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
|
||||
if len(instanceIDs) > 0 {
|
||||
return instanceIDs
|
||||
}
|
||||
|
||||
instances := []string{}
|
||||
err := source.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var instance string
|
||||
|
||||
if err := r.Scan(&instance); err != nil {
|
||||
return err
|
||||
}
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT DISTINCT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.added' AND instance_id NOT IN (SELECT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.removed')",
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to query instances")
|
||||
|
||||
return instances
|
||||
}
|
140
apps/api/cmd/mirror/system.go
Normal file
140
apps/api/cmd/mirror/system.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func systemCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "system",
|
||||
Short: "mirrors the system tables of ZITADEL from one database to another",
|
||||
Long: `mirrors the system tables of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized
|
||||
Only keys and assets are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copySystem(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete ALL keys and assets of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copySystem(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAssets(ctx, sourceClient, destClient)
|
||||
copyEncryptionKeys(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy assets")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT instance_id, asset_type, resource_owner, name, content_type, data, updated_at FROM system.assets "+instanceClause()+") TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var assetCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM system.assets "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin")
|
||||
assetCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy assets to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy assets from source")
|
||||
logging.WithFields("took", time.Since(start), "count", assetCount).Info("assets migrated")
|
||||
}
|
||||
|
||||
func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy encryption keys")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY system.encryption_keys TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var keyCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "TRUNCATE system.encryption_keys")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin")
|
||||
keyCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy encryption keys to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy encryption keys from source")
|
||||
logging.WithFields("took", time.Since(start), "count", keyCount).Info("encryption keys migrated")
|
||||
}
|
123
apps/api/cmd/mirror/verify.go
Normal file
123
apps/api/cmd/mirror/verify.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
cryptoDatabase "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
)
|
||||
|
||||
func verifyCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "verify",
|
||||
Short: "counts if source and dest have the same amount of entries",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
verifyMigration(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var schemas = []string{
|
||||
"adminapi",
|
||||
"auth",
|
||||
"eventstore",
|
||||
"projections",
|
||||
"system",
|
||||
}
|
||||
|
||||
func verifyMigration(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
for _, schema := range schemas {
|
||||
for _, table := range append(getTables(ctx, destClient, schema), getViews(ctx, destClient, schema)...) {
|
||||
sourceCount := countEntries(ctx, sourceClient, table)
|
||||
destCount := countEntries(ctx, destClient, table)
|
||||
|
||||
entry := logging.WithFields("table", table, "dest", destCount, "source", sourceCount)
|
||||
if sourceCount == destCount {
|
||||
entry.Debug("equal count")
|
||||
continue
|
||||
}
|
||||
entry.WithField("diff", destCount-sourceCount).Info("unequal count")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTables(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', tablename) FROM pg_tables WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query tables")
|
||||
return tables
|
||||
}
|
||||
|
||||
func getViews(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', viewname) FROM pg_views WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query views")
|
||||
return tables
|
||||
}
|
||||
|
||||
func countEntries(ctx context.Context, client *database.DB, table string) (count int) {
|
||||
instanceClause := instanceClause()
|
||||
noInstanceIDColumn := []string{
|
||||
projection.InstanceProjectionTable,
|
||||
projection.SystemFeatureTable,
|
||||
cryptoDatabase.EncryptionKeysTable,
|
||||
}
|
||||
if slices.Contains(noInstanceIDColumn, table) {
|
||||
instanceClause = ""
|
||||
}
|
||||
|
||||
err := client.QueryRowContext(
|
||||
ctx,
|
||||
func(r *sql.Row) error {
|
||||
return r.Scan(&count)
|
||||
},
|
||||
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause),
|
||||
)
|
||||
logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count")
|
||||
|
||||
return count
|
||||
}
|
Reference in New Issue
Block a user