mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-12 01:37:31 +00:00
feat(cmd): mirror (#7004)
# Which Problems Are Solved Adds the possibility to mirror an existing database to a new one. For that a new command was added `zitadel mirror`. Including it's subcommands for a more fine grained mirror of the data. Sub commands: * `zitadel mirror eventstore`: copies only events and their unique constraints * `zitadel mirror system`: mirrors the data of the `system`-schema * `zitadel mirror projections`: runs all projections * `zitadel mirror auth`: copies auth requests * `zitadel mirror verify`: counts the amount of rows in the source and destination database and prints the diff. The command requires one of the following flags: * `--system`: copies all instances of the system * `--instance <instance-id>`, `--instance <comma separated list of instance ids>`: copies only the defined instances The command is save to execute multiple times by adding the `--replace`-flag. This replaces currently existing data except of the `events`-table # Additional Changes A `--for-mirror`-flag was added to `zitadel setup` to prepare the new database. The flag skips the creation of the first instances and initial run of projections. It is now possible to skip the creation of the first instance during setup by setting `FirstInstance.Skip` to true in the steps configuration. # Additional info It is currently not possible to merge multiple databases. See https://github.com/zitadel/zitadel/issues/7964 for more details. It is currently not possible to use files. See https://github.com/zitadel/zitadel/issues/7966 for more information. closes https://github.com/zitadel/zitadel/issues/7586 closes https://github.com/zitadel/zitadel/issues/7486 ### Definition of Ready - [x] I am happy with the code - [x] Short description of the feature/issue is added in the pr description - [x] PR is linked to the corresponding user story - [x] Acceptance criteria are met - [x] All open todos and follow ups are defined in a new ticket and justified - [x] Deviations from the acceptance criteria and design are agreed with the PO and documented. - [x] No debug or dead code - [x] My code has no repetitions - [x] Critical parts are tested automatically - [ ] Where possible E2E tests are implemented - [x] Documentation/examples are up-to-date - [x] All non-functional requirements are met - [x] Functionality of the acceptance criteria is checked manually on the dev system. --------- Co-authored-by: Livio Spring <livio.a@gmail.com>
This commit is contained in:
@@ -40,7 +40,7 @@ func New() *cobra.Command {
|
||||
Long: `Sets up the minimum requirements to start ZITADEL.
|
||||
|
||||
Prerequisites:
|
||||
- cockroachDB
|
||||
- database (PostgreSql or cockroachdb)
|
||||
|
||||
The user provided by flags needs privileges to
|
||||
- create the database if it does not exist
|
||||
|
91
cmd/mirror/auth.go
Normal file
91
cmd/mirror/auth.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
)
|
||||
|
||||
func authCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "auth",
|
||||
Short: "mirrors the auth requests table from one database to another",
|
||||
Long: `mirrors the auth requests table from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Only auth requests are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyAuth(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete auth requests of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyAuth(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAuthRequests(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func copyAuthRequests(ctx context.Context, source, dest *database.DB) {
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+") TO STDOUT")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var affected int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM auth.auth_requests "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY auth.auth_requests FROM STDIN")
|
||||
affected = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy auth requests to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy auth requests from source")
|
||||
logging.WithFields("took", time.Since(start), "count", affected).Info("auth requests migrated")
|
||||
}
|
80
cmd/mirror/config.go
Normal file
80
cmd/mirror/config.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/hooks"
|
||||
"github.com/zitadel/zitadel/internal/actions"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
)
|
||||
|
||||
type Migration struct {
|
||||
Source database.Config
|
||||
Destination database.Config
|
||||
|
||||
EventBulkSize uint32
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
}
|
||||
|
||||
var (
|
||||
//go:embed defaults.yaml
|
||||
defaultConfig []byte
|
||||
)
|
||||
|
||||
func mustNewMigrationConfig(v *viper.Viper) *Migration {
|
||||
config := new(Migration)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewProjectionsConfig(v *viper.Viper) *ProjectionsConfig {
|
||||
config := new(ProjectionsConfig)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewConfig(v *viper.Viper, config any) {
|
||||
err := v.Unmarshal(config,
|
||||
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
||||
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
|
||||
hooks.SliceTypeStringDecode[*command.SetQuota],
|
||||
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
|
||||
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
|
||||
hooks.MapTypeStringDecode[domain.Feature, any],
|
||||
hooks.MapHTTPHeaderStringDecode,
|
||||
hook.Base64ToBytesHookFunc(),
|
||||
hook.TagToLanguageHookFunc(),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
mapstructure.StringToSliceHookFunc(","),
|
||||
database.DecodeHook,
|
||||
actions.HTTPConfigDecodeHook,
|
||||
hook.EnumHookFunc(internal_authz.MemberTypeString),
|
||||
)),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
}
|
114
cmd/mirror/defaults.yaml
Normal file
114
cmd/mirror/defaults.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
Source:
|
||||
cockroach:
|
||||
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
|
||||
EventPushConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
|
||||
ProjectionSpoolerConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
|
||||
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
|
||||
# Postgres is used as soon as a value is set
|
||||
# The values describe the possible fields to set values
|
||||
postgres:
|
||||
Host: # ZITADEL_DATABASE_POSTGRES_HOST
|
||||
Port: # ZITADEL_DATABASE_POSTGRES_PORT
|
||||
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
|
||||
|
||||
Destination:
|
||||
cockroach:
|
||||
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
|
||||
EventPushConnRatio: 0.01 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
|
||||
ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO
|
||||
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
|
||||
# Postgres is used as soon as a value is set
|
||||
# The values describe the possible fields to set values
|
||||
postgres:
|
||||
Host: # ZITADEL_DATABASE_POSTGRES_HOST
|
||||
Port: # ZITADEL_DATABASE_POSTGRES_PORT
|
||||
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
|
||||
|
||||
EventBulkSize: 10000
|
||||
|
||||
Projections:
|
||||
# The maximum duration a transaction remains open
|
||||
# before it spots left folding additional events
|
||||
# and updates the table.
|
||||
TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
|
||||
# turn off scheduler during operation
|
||||
RequeueEvery: 0s
|
||||
ConcurrentInstances: 7
|
||||
EventBulkLimit: 1000
|
||||
Customizations:
|
||||
notifications:
|
||||
MaxFailureCount: 1
|
||||
|
||||
Eventstore:
|
||||
MaxRetries: 3
|
||||
|
||||
Auth:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
Admin:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 10 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
FirstInstance:
|
||||
# We only need to create an empty zitadel database so this step must be skipped
|
||||
Skip: true
|
||||
|
||||
Log:
|
||||
Level: info
|
96
cmd/mirror/event.go
Normal file
96
cmd/mirror/event.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/projection"
|
||||
"github.com/zitadel/zitadel/internal/v2/readmodel"
|
||||
"github.com/zitadel/zitadel/internal/v2/system"
|
||||
mirror_event "github.com/zitadel/zitadel/internal/v2/system/mirror"
|
||||
)
|
||||
|
||||
func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore.EventStore, source string) (*readmodel.LastSuccessfulMirror, error) {
|
||||
lastSuccess := readmodel.NewLastSuccessfulMirror(source)
|
||||
if shouldIgnorePrevious {
|
||||
return lastSuccess, nil
|
||||
}
|
||||
_, err := destinationES.Query(
|
||||
ctx,
|
||||
eventstore.NewQuery(
|
||||
system.AggregateInstance,
|
||||
lastSuccess,
|
||||
eventstore.SetFilters(lastSuccess.Filter()),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lastSuccess, nil
|
||||
}
|
||||
|
||||
func writeMigrationStart(ctx context.Context, sourceES *eventstore.EventStore, id string, destination string) (_ float64, err error) {
|
||||
var cmd *eventstore.Command
|
||||
if len(instanceIDs) > 0 {
|
||||
cmd, err = mirror_event.NewStartedInstancesCommand(destination, instanceIDs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
cmd = mirror_event.NewStartedSystemCommand(destination)
|
||||
}
|
||||
|
||||
var position projection.HighestPosition
|
||||
|
||||
err = sourceES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(cmd),
|
||||
),
|
||||
eventstore.PushReducer(&position),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return position.Position, nil
|
||||
}
|
||||
|
||||
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position float64) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewSucceededCommand(source, position)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func writeMigrationFailed(ctx context.Context, destinationES *eventstore.EventStore, id, source string, err error) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewFailedCommand(source, err)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
250
cmd/mirror/event_store.go
Normal file
250
cmd/mirror/event_store.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
db "github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/v2/database"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
var shouldIgnorePrevious bool
|
||||
|
||||
func eventstoreCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "eventstore",
|
||||
Short: "mirrors the eventstore of an instance from one database to another",
|
||||
Long: `mirrors the eventstore of an instance from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Migrate only copies events2 and unique constraints`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyEventstore(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete unique constraints of defined instances before copy")
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyEventstore(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := db.Connect(config.Source, false, dialect.DBPurposeQuery)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := db.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyEvents(ctx, sourceClient, destClient, config.EventBulkSize)
|
||||
copyUniqueConstraints(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func positionQuery(db *db.DB) string {
|
||||
switch db.Type() {
|
||||
case "postgres":
|
||||
return "SELECT EXTRACT(EPOCH FROM clock_timestamp())"
|
||||
case "cockroach":
|
||||
return "SELECT cluster_logical_timestamp()"
|
||||
default:
|
||||
logging.WithFields("db_type", db.Type()).Fatal("database type not recognized")
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
migrationID, err := id.SonyFlakeGenerator().Next()
|
||||
logging.OnError(err).Fatal("unable to generate migration id")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
sourceES := eventstore.NewEventstoreFromOne(postgres.New(source, &postgres.Config{
|
||||
MaxRetries: 3,
|
||||
}))
|
||||
destinationES := eventstore.NewEventstoreFromOne(postgres.New(dest, &postgres.Config{
|
||||
MaxRetries: 3,
|
||||
}))
|
||||
|
||||
previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName())
|
||||
logging.OnError(err).Fatal("unable to query latest successful migration")
|
||||
|
||||
maxPosition, err := writeMigrationStart(ctx, sourceES, migrationID, dest.DatabaseName())
|
||||
logging.OnError(err).Fatal("unable to write migration started event")
|
||||
|
||||
logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration")
|
||||
|
||||
nextPos := make(chan bool, 1)
|
||||
pos := make(chan float64, 1)
|
||||
errs := make(chan error, 3)
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
nextPos <- true
|
||||
var i uint32
|
||||
for position := range pos {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, aggregate_type, aggregate_id, event_type, sequence, revision, created_at, regexp_replace(payload::TEXT, '\\\\u0000', '', 'g')::JSON payload, creator, owner, ")
|
||||
stmt.WriteArg(position)
|
||||
stmt.WriteString(" position, row_number() OVER (PARTITION BY instance_id ORDER BY position, in_tx_order) AS in_tx_order FROM eventstore.events2 ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberAtMost(maxPosition).Write(&stmt, "position")
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberGreater(previousMigration.Position).Write(&stmt, "position")
|
||||
stmt.WriteString(" ORDER BY instance_id, position, in_tx_order")
|
||||
stmt.WriteString(" LIMIT ")
|
||||
stmt.WriteArg(bulkSize)
|
||||
stmt.WriteString(" OFFSET ")
|
||||
stmt.WriteArg(bulkSize * i)
|
||||
stmt.WriteString(") TO STDOUT")
|
||||
|
||||
// Copy does not allow args so we use we replace the args in the statement
|
||||
tag, err := conn.PgConn().CopyTo(ctx, writer, stmt.Debug())
|
||||
if err != nil {
|
||||
return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i)
|
||||
}
|
||||
if tag.RowsAffected() < int64(bulkSize) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nextPos <- true
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
writer.Close()
|
||||
close(nextPos)
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
// generate next position for
|
||||
go func() {
|
||||
defer close(pos)
|
||||
for range nextPos {
|
||||
var position float64
|
||||
err := dest.QueryRowContext(
|
||||
ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&position)
|
||||
},
|
||||
positionQuery(dest),
|
||||
)
|
||||
if err != nil {
|
||||
errs <- zerrors.ThrowUnknown(err, "MIGRA-kMyPH", "unable to query next position")
|
||||
return
|
||||
}
|
||||
pos <- position
|
||||
}
|
||||
}()
|
||||
|
||||
var eventCount int64
|
||||
errs <- destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN")
|
||||
eventCount = tag.RowsAffected()
|
||||
if err != nil {
|
||||
return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
close(errs)
|
||||
writeCopyEventsDone(ctx, destinationES, migrationID, source.DatabaseName(), maxPosition, errs)
|
||||
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated")
|
||||
}
|
||||
|
||||
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position float64, errs <-chan error) {
|
||||
joinedErrs := make([]error, 0, len(errs))
|
||||
for err := range errs {
|
||||
joinedErrs = append(joinedErrs, err)
|
||||
}
|
||||
err := errors.Join(joinedErrs...)
|
||||
|
||||
if err != nil {
|
||||
logging.WithError(err).Error("unable to mirror events")
|
||||
err := writeMigrationFailed(ctx, es, id, source, err)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
return
|
||||
}
|
||||
|
||||
err = writeMigrationSucceeded(ctx, es, id, source, position)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
}
|
||||
|
||||
func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) {
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, unique_type, unique_field FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(") TO stdout")
|
||||
|
||||
_, err := conn.PgConn().CopyTo(ctx, writer, stmt.String())
|
||||
writer.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
var eventCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("DELETE FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
|
||||
_, err := conn.Exec(ctx, stmt.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.unique_constraints FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy unique constraints to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy unique constraints from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("unique constraints migrated")
|
||||
}
|
93
cmd/mirror/mirror.go
Normal file
93
cmd/mirror/mirror.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
)
|
||||
|
||||
var (
|
||||
instanceIDs []string
|
||||
isSystem bool
|
||||
shouldReplace bool
|
||||
)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "mirror",
|
||||
Short: "mirrors all data of ZITADEL from one database to another",
|
||||
Long: `mirrors all data of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized and set up with --for-mirror
|
||||
|
||||
The command does mirror all data needed and recomputes the projections.
|
||||
For more details call the help functions of the sub commands.
|
||||
|
||||
Order of execution:
|
||||
1. mirror system tables
|
||||
2. mirror auth tables
|
||||
3. mirror event store tables
|
||||
4. recompute projections
|
||||
5. verify`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
projectionConfig := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
copySystem(cmd.Context(), config)
|
||||
copyAuth(cmd.Context(), config)
|
||||
copyEventstore(cmd.Context(), config)
|
||||
|
||||
projections(cmd.Context(), projectionConfig, masterKey)
|
||||
verifyMigration(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
mirrorFlags(cmd)
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, `replaces all data of the following tables for the provided instances or all if the "--system"-flag is set:
|
||||
* system.assets
|
||||
* auth.auth_requests
|
||||
* eventstore.unique_constraints
|
||||
The flag should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.`)
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
err := viper.MergeConfig(bytes.NewBuffer(defaultConfig))
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
|
||||
cmd.AddCommand(
|
||||
eventstoreCmd(),
|
||||
systemCmd(),
|
||||
projectionsCmd(),
|
||||
authCmd(),
|
||||
verifyCmd(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func mirrorFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringSliceVar(&instanceIDs, "instance", nil, "id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.PersistentFlags().BoolVar(&isSystem, "system", false, "migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.MarkFlagsOneRequired("system", "instance")
|
||||
cmd.MarkFlagsMutuallyExclusive("system", "instance")
|
||||
}
|
||||
|
||||
func instanceClause() string {
|
||||
if isSystem {
|
||||
return "WHERE instance_id <> ''"
|
||||
}
|
||||
for i := range instanceIDs {
|
||||
instanceIDs[i] = "'" + instanceIDs[i] + "'"
|
||||
}
|
||||
|
||||
// COPY does not allow parameters so we need to set them directly
|
||||
return "WHERE instance_id IN (" + strings.Join(instanceIDs, ", ") + ")"
|
||||
}
|
316
cmd/mirror/projections.go
Normal file
316
cmd/mirror/projections.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/encryption"
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
"github.com/zitadel/zitadel/cmd/tls"
|
||||
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
|
||||
admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
|
||||
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/api/oidc"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
|
||||
auth_handler "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/handler"
|
||||
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
|
||||
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
|
||||
"github.com/zitadel/zitadel/internal/i18n"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/notification"
|
||||
"github.com/zitadel/zitadel/internal/notification/handlers"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
static_config "github.com/zitadel/zitadel/internal/static/config"
|
||||
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/webauthn"
|
||||
)
|
||||
|
||||
func projectionsCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "projections",
|
||||
Short: "calls the projections synchronously",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
projections(cmd.Context(), config, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ProjectionsConfig struct {
|
||||
Destination database.Config
|
||||
Projections projection.Config
|
||||
EncryptionKeys *encryption.EncryptionKeyConfig
|
||||
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
|
||||
Eventstore *eventstore.Config
|
||||
|
||||
Admin admin_es.Config
|
||||
Auth auth_es.Config
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
|
||||
ExternalPort uint16
|
||||
ExternalDomain string
|
||||
ExternalSecure bool
|
||||
InternalAuthZ internal_authz.Config
|
||||
SystemDefaults systemdefaults.SystemDefaults
|
||||
Telemetry *handlers.TelemetryPusherConfig
|
||||
Login login.Config
|
||||
OIDC oidc.Config
|
||||
WebAuthNName string
|
||||
DefaultInstance command.InstanceSetup
|
||||
AssetStorage static_config.AssetStorageConfig
|
||||
}
|
||||
|
||||
func migrateProjectionsFlags(cmd *cobra.Command) {
|
||||
key.AddMasterKeyFlag(cmd)
|
||||
tls.AddTLSModeFlag(cmd)
|
||||
}
|
||||
|
||||
func projections(
|
||||
ctx context.Context,
|
||||
config *ProjectionsConfig,
|
||||
masterKey string,
|
||||
) {
|
||||
start := time.Now()
|
||||
|
||||
client, err := database.Connect(config.Destination, false, dialect.DBPurposeQuery)
|
||||
logging.OnError(err).Fatal("unable to connect to database")
|
||||
|
||||
keyStorage, err := crypto_db.NewKeyStorage(client, masterKey)
|
||||
logging.OnError(err).Fatal("cannot start key storage")
|
||||
|
||||
keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage)
|
||||
logging.OnError(err).Fatal("unable to read encryption keys")
|
||||
|
||||
staticStorage, err := config.AssetStorage.NewStorage(client.DB)
|
||||
logging.OnError(err).Fatal("unable create static storage")
|
||||
|
||||
config.Eventstore.Querier = old_es.NewCRDB(client)
|
||||
esPusherDBClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
|
||||
logging.OnError(err).Fatal("unable to connect eventstore push client")
|
||||
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
|
||||
es := eventstore.NewEventstore(config.Eventstore)
|
||||
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{
|
||||
MaxRetries: config.Eventstore.MaxRetries,
|
||||
}))
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
es,
|
||||
esV4.Querier,
|
||||
client,
|
||||
client,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
sessionTokenVerifier,
|
||||
func(q *query.Queries) domain.PermissionCheck {
|
||||
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
}
|
||||
},
|
||||
0,
|
||||
config.SystemAPIUsers,
|
||||
false,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start queries")
|
||||
|
||||
authZRepo, err := authz.Start(queries, es, client, keys.OIDC, config.ExternalSecure)
|
||||
logging.OnError(err).Fatal("unable to start authz repo")
|
||||
|
||||
webAuthNConfig := &webauthn.Config{
|
||||
DisplayName: config.WebAuthNName,
|
||||
ExternalSecure: config.ExternalSecure,
|
||||
}
|
||||
commands, err := command.StartCommands(
|
||||
es,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
staticStorage,
|
||||
webAuthNConfig,
|
||||
config.ExternalDomain,
|
||||
config.ExternalSecure,
|
||||
config.ExternalPort,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.User,
|
||||
keys.DomainVerification,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
&http.Client{},
|
||||
func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
},
|
||||
sessionTokenVerifier,
|
||||
config.OIDC.DefaultAccessTokenLifetime,
|
||||
config.OIDC.DefaultRefreshTokenExpiration,
|
||||
config.OIDC.DefaultRefreshTokenIdleExpiration,
|
||||
config.DefaultInstance.SecretGenerators,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start commands")
|
||||
|
||||
err = projection.Create(ctx, client, es, config.Projections, keys.OIDC, keys.SAML, config.SystemAPIUsers)
|
||||
logging.OnError(err).Fatal("unable to start projections")
|
||||
|
||||
i18n.MustLoadSupportedLanguagesFromDir()
|
||||
|
||||
notification.Register(
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
config.ExternalPort,
|
||||
config.ExternalSecure,
|
||||
commands,
|
||||
queries,
|
||||
es,
|
||||
config.Login.DefaultOTPEmailURLV2,
|
||||
config.SystemDefaults.Notifications.FileSystemPath,
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
)
|
||||
|
||||
config.Auth.Spooler.Client = client
|
||||
config.Auth.Spooler.Eventstore = es
|
||||
authView, err := auth_view.StartView(config.Auth.Spooler.Client, keys.OIDC, queries, config.Auth.Spooler.Eventstore)
|
||||
logging.OnError(err).Fatal("unable to start auth view")
|
||||
auth_handler.Register(ctx, config.Auth.Spooler, authView, queries)
|
||||
|
||||
config.Admin.Spooler.Client = client
|
||||
config.Admin.Spooler.Eventstore = es
|
||||
adminView, err := admin_view.StartView(config.Admin.Spooler.Client)
|
||||
logging.OnError(err).Fatal("unable to start admin view")
|
||||
|
||||
admin_handler.Register(ctx, config.Admin.Spooler, adminView, staticStorage)
|
||||
|
||||
instances := make(chan string, config.Projections.ConcurrentInstances)
|
||||
failedInstances := make(chan string)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(int(config.Projections.ConcurrentInstances))
|
||||
|
||||
go func() {
|
||||
for instance := range failedInstances {
|
||||
logging.WithFields("instance", instance).Error("projection failed")
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < int(config.Projections.ConcurrentInstances); i++ {
|
||||
go execProjections(ctx, instances, failedInstances, &wg)
|
||||
}
|
||||
|
||||
for _, instance := range queryInstanceIDs(ctx, client) {
|
||||
instances <- instance
|
||||
}
|
||||
close(instances)
|
||||
wg.Wait()
|
||||
|
||||
close(failedInstances)
|
||||
|
||||
logging.WithFields("took", time.Since(start)).Info("projections executed")
|
||||
}
|
||||
|
||||
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
|
||||
for instance := range instances {
|
||||
logging.WithFields("instance", instance).Info("start projections")
|
||||
ctx = internal_authz.WithInstanceID(ctx, instance)
|
||||
|
||||
err := projection.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).OnError(err).Info("trigger failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = admin_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).OnError(err).Info("trigger admin handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = auth_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).OnError(err).Info("trigger auth handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = notification.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).OnError(err).Info("trigger notification failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
logging.WithFields("instance", instance).Info("projections done")
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// returns the instance configured by flag
|
||||
// or all instances which are not removed
|
||||
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
|
||||
if len(instanceIDs) > 0 {
|
||||
return instanceIDs
|
||||
}
|
||||
|
||||
instances := []string{}
|
||||
err := source.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var instance string
|
||||
|
||||
if err := r.Scan(&instance); err != nil {
|
||||
return err
|
||||
}
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT DISTINCT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.added' AND instance_id NOT IN (SELECT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.removed')",
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to query instances")
|
||||
|
||||
return instances
|
||||
}
|
139
cmd/mirror/system.go
Normal file
139
cmd/mirror/system.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
)
|
||||
|
||||
func systemCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "system",
|
||||
Short: "mirrors the system tables of ZITADEL from one database to another",
|
||||
Long: `mirrors the system tables of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized
|
||||
Only keys and assets are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copySystem(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete ALL keys and assets of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copySystem(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAssets(ctx, sourceClient, destClient)
|
||||
copyEncryptionKeys(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT instance_id, asset_type, resource_owner, name, content_type, data, updated_at FROM system.assets "+instanceClause()+") TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var eventCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM system.assets "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy assets to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy assets from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("assets migrated")
|
||||
}
|
||||
|
||||
func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY system.encryption_keys TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var eventCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "TRUNCATE system.encryption_keys")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy encryption keys to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy encryption keys from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("encryption keys migrated")
|
||||
}
|
111
cmd/mirror/verify.go
Normal file
111
cmd/mirror/verify.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
)
|
||||
|
||||
func verifyCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "verify",
|
||||
Short: "counts if source and dest have the same amount of entries",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
verifyMigration(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var schemas = []string{
|
||||
"adminapi",
|
||||
"auth",
|
||||
"eventstore",
|
||||
"projections",
|
||||
"system",
|
||||
}
|
||||
|
||||
func verifyMigration(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
for _, schema := range schemas {
|
||||
for _, table := range append(getTables(ctx, destClient, schema), getViews(ctx, destClient, schema)...) {
|
||||
sourceCount := countEntries(ctx, sourceClient, table)
|
||||
destCount := countEntries(ctx, destClient, table)
|
||||
|
||||
entry := logging.WithFields("table", table, "dest", destCount, "source", sourceCount)
|
||||
if sourceCount == destCount {
|
||||
entry.Debug("equal count")
|
||||
continue
|
||||
}
|
||||
entry.WithField("diff", destCount-sourceCount).Info("unequal count")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTables(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', tablename) FROM pg_tables WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query tables")
|
||||
return tables
|
||||
}
|
||||
|
||||
func getViews(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', viewname) FROM pg_views WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query views")
|
||||
return tables
|
||||
}
|
||||
|
||||
func countEntries(ctx context.Context, client *database.DB, table string) (count int) {
|
||||
err := client.QueryRowContext(
|
||||
ctx,
|
||||
func(r *sql.Row) error {
|
||||
return r.Scan(&count)
|
||||
},
|
||||
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause()),
|
||||
)
|
||||
logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count")
|
||||
|
||||
return count
|
||||
}
|
@@ -26,6 +26,8 @@ type FirstInstance struct {
|
||||
PatPath string
|
||||
Features *command.InstanceFeatures
|
||||
|
||||
Skip bool
|
||||
|
||||
instanceSetup command.InstanceSetup
|
||||
userEncryptionKey *crypto.KeyConfig
|
||||
smtpEncryptionKey *crypto.KeyConfig
|
||||
@@ -42,6 +44,9 @@ type FirstInstance struct {
|
||||
}
|
||||
|
||||
func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
if mig.Skip {
|
||||
return nil
|
||||
}
|
||||
keyStorage, err := mig.verifyEncryptionKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ForMirror bool
|
||||
Database database.Config
|
||||
SystemDefaults systemdefaults.SystemDefaults
|
||||
InternalAuthZ internal_authz.Config
|
||||
|
@@ -34,6 +34,8 @@ import (
|
||||
notify_handler "github.com/zitadel/zitadel/internal/notification"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/webauthn"
|
||||
)
|
||||
|
||||
@@ -57,13 +59,16 @@ Requirements:
|
||||
err = BindInitProjections(cmd)
|
||||
logging.OnError(err).Fatal("unable to bind \"init-projections\" flag")
|
||||
|
||||
err = bindForMirror(cmd)
|
||||
logging.OnError(err).Fatal("unable to bind \"for-mirror\" flag")
|
||||
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
steps := MustNewSteps(viper.New())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Panic("No master key provided")
|
||||
|
||||
Setup(config, steps, masterKey)
|
||||
Setup(cmd.Context(), config, steps, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -77,6 +82,7 @@ Requirements:
|
||||
func Flags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringArrayVar(&stepFiles, "steps", nil, "paths to step files to overwrite default steps")
|
||||
cmd.Flags().Bool("init-projections", viper.GetBool("InitProjections"), "beta feature: initializes projections after they are created, allows smooth start as projections are up to date")
|
||||
cmd.Flags().Bool("for-mirror", viper.GetBool("ForMirror"), "use this flag if you want to mirror your existing data")
|
||||
key.AddMasterKeyFlag(cmd)
|
||||
tls.AddTLSModeFlag(cmd)
|
||||
}
|
||||
@@ -85,8 +91,11 @@ func BindInitProjections(cmd *cobra.Command) error {
|
||||
return viper.BindPFlag("InitProjections.Enabled", cmd.Flags().Lookup("init-projections"))
|
||||
}
|
||||
|
||||
func Setup(config *Config, steps *Steps, masterKey string) {
|
||||
ctx := context.Background()
|
||||
func bindForMirror(cmd *cobra.Command) error {
|
||||
return viper.BindPFlag("ForMirror", cmd.Flags().Lookup("for-mirror"))
|
||||
}
|
||||
|
||||
func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) {
|
||||
logging.Info("setup started")
|
||||
|
||||
i18n.MustLoadSupportedLanguagesFromDir()
|
||||
@@ -102,10 +111,14 @@ func Setup(config *Config, steps *Steps, masterKey string) {
|
||||
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
|
||||
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
|
||||
logging.OnError(err).Fatal("unable to start eventstore")
|
||||
eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{
|
||||
MaxRetries: config.Eventstore.MaxRetries,
|
||||
}))
|
||||
|
||||
steps.s1ProjectionTable = &ProjectionTable{dbClient: queryDBClient.DB}
|
||||
steps.s2AssetsTable = &AssetTable{dbClient: queryDBClient.DB}
|
||||
|
||||
steps.FirstInstance.Skip = config.ForMirror || steps.FirstInstance.Skip
|
||||
steps.FirstInstance.instanceSetup = config.DefaultInstance
|
||||
steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User
|
||||
steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP
|
||||
@@ -197,10 +210,11 @@ func Setup(config *Config, steps *Steps, masterKey string) {
|
||||
}
|
||||
|
||||
// projection initialization must be done last, since the steps above might add required columns to the projections
|
||||
if config.InitProjections.Enabled {
|
||||
if !config.ForMirror && config.InitProjections.Enabled {
|
||||
initProjections(
|
||||
ctx,
|
||||
eventstoreClient,
|
||||
eventstoreV4,
|
||||
queryDBClient,
|
||||
projectionDBClient,
|
||||
masterKey,
|
||||
@@ -222,6 +236,7 @@ func readStmt(fs embed.FS, folder, typ, filename string) (string, error) {
|
||||
func initProjections(
|
||||
ctx context.Context,
|
||||
eventstoreClient *eventstore.Eventstore,
|
||||
eventstoreV4 *es_v4.EventStore,
|
||||
queryDBClient,
|
||||
projectionDBClient *database.DB,
|
||||
masterKey string,
|
||||
@@ -278,6 +293,7 @@ func initProjections(
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
eventstoreClient,
|
||||
eventstoreV4.Querier,
|
||||
queryDBClient,
|
||||
projectionDBClient,
|
||||
config.Projections,
|
||||
|
@@ -1,5 +1,7 @@
|
||||
# By using the FirstInstance section, you can overwrite the DefaultInstance configuration for the first instance created by zitadel setup.
|
||||
FirstInstance:
|
||||
# If set to true zitadel is setup without initial data
|
||||
Skip: false
|
||||
# The machine key from the section FirstInstance.Org.Machine.MachineKey is written to the MachineKeyPath.
|
||||
MachineKeyPath: # ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH
|
||||
# The personal access token from the section FirstInstance.Org.Machine.Pat is written to the PatPath.
|
||||
|
@@ -78,6 +78,8 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/notification"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/static"
|
||||
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/webauthn"
|
||||
"github.com/zitadel/zitadel/openapi"
|
||||
)
|
||||
@@ -153,12 +155,16 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
|
||||
config.Eventstore.Querier = old_es.NewCRDB(queryDBClient)
|
||||
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
|
||||
eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{
|
||||
MaxRetries: config.Eventstore.MaxRetries,
|
||||
}))
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
eventstoreClient,
|
||||
eventstoreV4.Querier,
|
||||
queryDBClient,
|
||||
projectionDBClient,
|
||||
config.Projections,
|
||||
|
@@ -36,7 +36,7 @@ Requirements:
|
||||
|
||||
setupConfig := setup.MustNewConfig(viper.GetViper())
|
||||
setupSteps := setup.MustNewSteps(viper.New())
|
||||
setup.Setup(setupConfig, setupSteps, masterKey)
|
||||
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
|
||||
|
||||
startConfig := MustNewConfig(viper.GetViper())
|
||||
|
||||
|
@@ -34,7 +34,7 @@ Requirements:
|
||||
|
||||
setupConfig := setup.MustNewConfig(viper.GetViper())
|
||||
setupSteps := setup.MustNewSteps(viper.New())
|
||||
setup.Setup(setupConfig, setupSteps, masterKey)
|
||||
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
|
||||
|
||||
startConfig := MustNewConfig(viper.GetViper())
|
||||
|
||||
|
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/zitadel/zitadel/cmd/build"
|
||||
"github.com/zitadel/zitadel/cmd/initialise"
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
"github.com/zitadel/zitadel/cmd/mirror"
|
||||
"github.com/zitadel/zitadel/cmd/ready"
|
||||
"github.com/zitadel/zitadel/cmd/setup"
|
||||
"github.com/zitadel/zitadel/cmd/start"
|
||||
@@ -55,6 +56,7 @@ func New(out io.Writer, in io.Reader, args []string, server chan<- *start.Server
|
||||
start.New(server),
|
||||
start.NewStartFromInit(server),
|
||||
start.NewStartFromSetup(server),
|
||||
mirror.New(),
|
||||
key.New(),
|
||||
ready.New(),
|
||||
)
|
||||
|
Reference in New Issue
Block a user