mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 21:47:32 +00:00
chore: move the go code into a subfolder
This commit is contained in:
35
apps/api/cmd/admin/admin.go
Normal file
35
apps/api/cmd/admin/admin.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/initialise"
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
"github.com/zitadel/zitadel/cmd/setup"
|
||||
"github.com/zitadel/zitadel/cmd/start"
|
||||
)
|
||||
|
||||
func New() *cobra.Command {
|
||||
adminCMD := &cobra.Command{
|
||||
Use: "admin",
|
||||
Short: "The ZITADEL admin CLI lets you interact with your instance",
|
||||
Long: `The ZITADEL admin CLI lets you interact with your instance`,
|
||||
Deprecated: "please use subcommands directly, e.g. `zitadel start`",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return errors.New("no additional command provided")
|
||||
},
|
||||
}
|
||||
|
||||
adminCMD.AddCommand(
|
||||
initialise.New(),
|
||||
setup.New(),
|
||||
start.New(nil),
|
||||
start.NewStartFromInit(nil),
|
||||
key.New(),
|
||||
)
|
||||
|
||||
return adminCMD
|
||||
}
|
33
apps/api/cmd/build/info.go
Normal file
33
apps/api/cmd/build/info.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package build
|
||||
|
||||
import "time"
|
||||
|
||||
var (
|
||||
version = ""
|
||||
commit = ""
|
||||
date = ""
|
||||
dateTime time.Time
|
||||
)
|
||||
|
||||
func Version() string {
|
||||
if version != "" {
|
||||
return version
|
||||
}
|
||||
version = Date().Format(time.RFC3339)
|
||||
return version
|
||||
}
|
||||
|
||||
func Commit() string {
|
||||
return commit
|
||||
}
|
||||
|
||||
func Date() time.Time {
|
||||
if !dateTime.IsZero() {
|
||||
return dateTime
|
||||
}
|
||||
dateTime, _ = time.Parse(time.RFC3339, date)
|
||||
if dateTime.IsZero() {
|
||||
dateTime = time.Now()
|
||||
}
|
||||
return dateTime
|
||||
}
|
2107
apps/api/cmd/defaults.yaml
Normal file
2107
apps/api/cmd/defaults.yaml
Normal file
File diff suppressed because one or more lines are too long
134
apps/api/cmd/encryption/encryption_keys.go
Normal file
134
apps/api/cmd/encryption/encryption_keys.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package encryption
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultKeyIDs = []string{
|
||||
"domainVerificationKey",
|
||||
"idpConfigKey",
|
||||
"oidcKey",
|
||||
"samlKey",
|
||||
"otpKey",
|
||||
"smsKey",
|
||||
"smtpKey",
|
||||
"userKey",
|
||||
"targetKey",
|
||||
"csrfCookieKey",
|
||||
"userAgentCookieKey",
|
||||
}
|
||||
)
|
||||
|
||||
type EncryptionKeyConfig struct {
|
||||
DomainVerification *crypto.KeyConfig
|
||||
IDPConfig *crypto.KeyConfig
|
||||
OIDC *crypto.KeyConfig
|
||||
SAML *crypto.KeyConfig
|
||||
OTP *crypto.KeyConfig
|
||||
SMS *crypto.KeyConfig
|
||||
SMTP *crypto.KeyConfig
|
||||
User *crypto.KeyConfig
|
||||
Target *crypto.KeyConfig
|
||||
CSRFCookieKeyID string
|
||||
UserAgentCookieKeyID string
|
||||
}
|
||||
|
||||
type EncryptionKeys struct {
|
||||
DomainVerification crypto.EncryptionAlgorithm
|
||||
IDPConfig crypto.EncryptionAlgorithm
|
||||
OIDC crypto.EncryptionAlgorithm
|
||||
SAML crypto.EncryptionAlgorithm
|
||||
OTP crypto.EncryptionAlgorithm
|
||||
SMS crypto.EncryptionAlgorithm
|
||||
SMTP crypto.EncryptionAlgorithm
|
||||
User crypto.EncryptionAlgorithm
|
||||
Target crypto.EncryptionAlgorithm
|
||||
CSRFCookieKey []byte
|
||||
UserAgentCookieKey []byte
|
||||
OIDCKey []byte
|
||||
}
|
||||
|
||||
func EnsureEncryptionKeys(ctx context.Context, keyConfig *EncryptionKeyConfig, keyStorage crypto.KeyStorage) (keys *EncryptionKeys, err error) {
|
||||
if err := VerifyDefaultKeys(ctx, keyStorage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = new(EncryptionKeys)
|
||||
keys.DomainVerification, err = crypto.NewAESCrypto(keyConfig.DomainVerification, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.IDPConfig, err = crypto.NewAESCrypto(keyConfig.IDPConfig, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.OIDC, err = crypto.NewAESCrypto(keyConfig.OIDC, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.SAML, err = crypto.NewAESCrypto(keyConfig.SAML, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := crypto.LoadKey(keyConfig.OIDC.EncryptionKeyID, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.OIDCKey = []byte(key)
|
||||
keys.OTP, err = crypto.NewAESCrypto(keyConfig.OTP, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.SMS, err = crypto.NewAESCrypto(keyConfig.SMS, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.SMTP, err = crypto.NewAESCrypto(keyConfig.SMTP, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.User, err = crypto.NewAESCrypto(keyConfig.User, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.Target, err = crypto.NewAESCrypto(keyConfig.Target, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err = crypto.LoadKey(keyConfig.CSRFCookieKeyID, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.CSRFCookieKey = []byte(key)
|
||||
key, err = crypto.LoadKey(keyConfig.UserAgentCookieKeyID, keyStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys.UserAgentCookieKey = []byte(key)
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func VerifyDefaultKeys(ctx context.Context, keyStorage crypto.KeyStorage) (err error) {
|
||||
keys := make([]*crypto.Key, 0, len(defaultKeyIDs))
|
||||
for _, keyID := range defaultKeyIDs {
|
||||
_, err := crypto.LoadKey(keyID, keyStorage)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
key, err := crypto.NewKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := keyStorage.CreateKeys(ctx, keys...); err != nil {
|
||||
return zerrors.ThrowInternal(err, "START-aGBq2", "cannot create default keys")
|
||||
}
|
||||
return nil
|
||||
}
|
35
apps/api/cmd/hooks/complex.go
Normal file
35
apps/api/cmd/hooks/complex.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func SliceTypeStringDecode[T any](from, to reflect.Value) (any, error) {
|
||||
into := make([]T, 0)
|
||||
return complexTypeStringDecodeHook(from, to, into)
|
||||
}
|
||||
|
||||
func MapTypeStringDecode[K ~string | ~int, V any](from, to reflect.Value) (any, error) {
|
||||
into := make(map[K]V, 0)
|
||||
return complexTypeStringDecodeHook(from, to, into)
|
||||
}
|
||||
|
||||
func MapHTTPHeaderStringDecode(from, to reflect.Value) (any, error) {
|
||||
into := http.Header{}
|
||||
return complexTypeStringDecodeHook(from, to, into)
|
||||
}
|
||||
|
||||
func complexTypeStringDecodeHook(from, to reflect.Value, out any) (any, error) {
|
||||
fromInterface := from.Interface()
|
||||
if to.Type() != reflect.TypeOf(out) {
|
||||
return fromInterface, nil
|
||||
}
|
||||
data, ok := fromInterface.(string)
|
||||
if !ok {
|
||||
return fromInterface, nil
|
||||
}
|
||||
err := json.Unmarshal([]byte(data), &out)
|
||||
return out, err
|
||||
}
|
34
apps/api/cmd/initialise/config.go
Normal file
34
apps/api/cmd/initialise/config.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Database database.Config
|
||||
Machine *id.Config
|
||||
Log *logging.Config
|
||||
}
|
||||
|
||||
func MustNewConfig(v *viper.Viper) *Config {
|
||||
config := new(Config)
|
||||
err := v.Unmarshal(config,
|
||||
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
||||
database.DecodeHook(false),
|
||||
mapstructure.TextUnmarshallerHookFunc(),
|
||||
)),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to read config")
|
||||
|
||||
err = config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
23
apps/api/cmd/initialise/helper.go
Normal file
23
apps/api/cmd/initialise/helper.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func exec(ctx context.Context, db database.ContextExecuter, stmt string, possibleErrCodes []string, args ...interface{}) error {
|
||||
_, err := db.ExecContext(ctx, stmt, args...)
|
||||
pgErr := new(pgconn.PgError)
|
||||
if errors.As(err, &pgErr) {
|
||||
for _, possibleCode := range possibleErrCodes {
|
||||
if possibleCode == pgErr.Code {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
148
apps/api/cmd/initialise/init.go
Normal file
148
apps/api/cmd/initialise/init.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed sql/*.sql
|
||||
stmts embed.FS
|
||||
|
||||
createUserStmt string
|
||||
grantStmt string
|
||||
databaseStmt string
|
||||
createEventstoreStmt string
|
||||
createProjectionsStmt string
|
||||
createSystemStmt string
|
||||
createEncryptionKeysStmt string
|
||||
createEventsStmt string
|
||||
createUniqueConstraints string
|
||||
|
||||
roleAlreadyExistsCode = "42710"
|
||||
dbAlreadyExistsCode = "42P04"
|
||||
)
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialize ZITADEL instance",
|
||||
Long: `Sets up the minimum requirements to start ZITADEL.
|
||||
|
||||
Prerequisites:
|
||||
- PostgreSql database
|
||||
|
||||
The user provided by flags needs privileges to
|
||||
- create the database if it does not exist
|
||||
- see other users and create a new one if the user does not exist
|
||||
- grant all rights of the ZITADEL database to the user created if not yet set
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
|
||||
InitAll(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(newZitadel(), newDatabase(), newUser(), newGrant())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func InitAll(ctx context.Context, config *Config) {
|
||||
err := initialise(ctx, config.Database,
|
||||
VerifyUser(config.Database.Username(), config.Database.Password()),
|
||||
VerifyDatabase(config.Database.DatabaseName()),
|
||||
VerifyGrant(config.Database.DatabaseName(), config.Database.Username()),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to initialize the database")
|
||||
|
||||
err = verifyZitadel(ctx, config.Database)
|
||||
logging.OnError(err).Fatal("unable to initialize ZITADEL")
|
||||
}
|
||||
|
||||
func initialise(ctx context.Context, config database.Config, steps ...func(context.Context, *database.DB) error) error {
|
||||
logging.Info("initialization started")
|
||||
|
||||
err := ReadStmts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db, err := database.Connect(config, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return Init(ctx, db, steps...)
|
||||
}
|
||||
|
||||
func Init(ctx context.Context, db *database.DB, steps ...func(context.Context, *database.DB) error) error {
|
||||
for _, step := range steps {
|
||||
if err := step(ctx, db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadStmts() (err error) {
|
||||
createUserStmt, err = readStmt("01_user")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
databaseStmt, err = readStmt("02_database")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
grantStmt, err = readStmt("03_grant_user")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createEventstoreStmt, err = readStmt("04_eventstore")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createProjectionsStmt, err = readStmt("05_projections")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createSystemStmt, err = readStmt("06_system")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createEncryptionKeysStmt, err = readStmt("07_encryption_keys_table")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createEventsStmt, err = readStmt("08_events_table")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createUniqueConstraints, err = readStmt("10_unique_constraints_table")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readStmt(step string) (string, error) {
|
||||
stmt, err := stmts.ReadFile("sql/" + step + ".sql")
|
||||
return string(stmt), err
|
||||
}
|
86
apps/api/cmd/initialise/init_test.go
Normal file
86
apps/api/cmd/initialise/init_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
db_mock "github.com/zitadel/zitadel/internal/database/mock"
|
||||
)
|
||||
|
||||
type db struct {
|
||||
mock sqlmock.Sqlmock
|
||||
db *database.DB
|
||||
}
|
||||
|
||||
func prepareDB(t *testing.T, expectations ...expectation) db {
|
||||
t.Helper()
|
||||
client, mock, err := sqlmock.New(sqlmock.ValueConverterOption(new(db_mock.TypeConverter)))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create sql mock: %v", err)
|
||||
}
|
||||
for _, expectation := range expectations {
|
||||
expectation(mock)
|
||||
}
|
||||
return db{
|
||||
mock: mock,
|
||||
db: &database.DB{DB: client},
|
||||
}
|
||||
}
|
||||
|
||||
type expectation func(m sqlmock.Sqlmock)
|
||||
|
||||
func expectExec(stmt string, err error, args ...driver.Value) expectation {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
query := m.ExpectExec(regexp.QuoteMeta(stmt)).WithArgs(args...)
|
||||
if err != nil {
|
||||
query.WillReturnError(err)
|
||||
return
|
||||
}
|
||||
query.WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
}
|
||||
}
|
||||
|
||||
func expectQuery(stmt string, err error, columns []string, rows [][]driver.Value, args ...driver.Value) expectation {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
res := m.NewRows(columns)
|
||||
for _, row := range rows {
|
||||
res.AddRow(row...)
|
||||
}
|
||||
query := m.ExpectQuery(regexp.QuoteMeta(stmt)).WithArgs(args...).WillReturnRows(res)
|
||||
if err != nil {
|
||||
query.WillReturnError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectBegin(err error) expectation {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
query := m.ExpectBegin()
|
||||
if err != nil {
|
||||
query.WillReturnError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectCommit(err error) expectation {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
query := m.ExpectCommit()
|
||||
if err != nil {
|
||||
query.WillReturnError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectRollback(err error) expectation {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
query := m.ExpectRollback()
|
||||
if err != nil {
|
||||
query.WillReturnError(err)
|
||||
}
|
||||
}
|
||||
}
|
2
apps/api/cmd/initialise/sql/01_user.sql
Normal file
2
apps/api/cmd/initialise/sql/01_user.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
-- replace %[1]s with the name of the user
|
||||
CREATE USER "%[1]s"
|
2
apps/api/cmd/initialise/sql/02_database.sql
Normal file
2
apps/api/cmd/initialise/sql/02_database.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
-- replace %[1]s with the name of the database
|
||||
CREATE DATABASE "%[1]s"
|
3
apps/api/cmd/initialise/sql/03_grant_user.sql
Normal file
3
apps/api/cmd/initialise/sql/03_grant_user.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
-- replace the first %[1]s with the database
|
||||
-- replace the second \%[2]s with the user
|
||||
GRANT ALL ON DATABASE "%[1]s" TO "%[2]s";
|
3
apps/api/cmd/initialise/sql/04_eventstore.sql
Normal file
3
apps/api/cmd/initialise/sql/04_eventstore.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE SCHEMA IF NOT EXISTS eventstore;
|
||||
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA eventstore TO "%[1]s";
|
3
apps/api/cmd/initialise/sql/05_projections.sql
Normal file
3
apps/api/cmd/initialise/sql/05_projections.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE SCHEMA IF NOT EXISTS projections;
|
||||
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA projections TO "%[1]s";
|
3
apps/api/cmd/initialise/sql/06_system.sql
Normal file
3
apps/api/cmd/initialise/sql/06_system.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE SCHEMA IF NOT EXISTS system;
|
||||
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA system TO "%[1]s";
|
6
apps/api/cmd/initialise/sql/07_encryption_keys_table.sql
Normal file
6
apps/api/cmd/initialise/sql/07_encryption_keys_table.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
CREATE TABLE IF NOT EXISTS system.encryption_keys (
|
||||
id TEXT NOT NULL
|
||||
, key TEXT NOT NULL
|
||||
|
||||
, PRIMARY KEY (id)
|
||||
);
|
121
apps/api/cmd/initialise/sql/08_events_table.sql
Normal file
121
apps/api/cmd/initialise/sql/08_events_table.sql
Normal file
@@ -0,0 +1,121 @@
|
||||
CREATE TABLE IF NOT EXISTS eventstore.events2 (
|
||||
instance_id TEXT NOT NULL
|
||||
, aggregate_type TEXT NOT NULL
|
||||
, aggregate_id TEXT NOT NULL
|
||||
|
||||
, event_type TEXT NOT NULL
|
||||
, "sequence" BIGINT NOT NULL
|
||||
, revision SMALLINT NOT NULL
|
||||
, created_at TIMESTAMPTZ NOT NULL
|
||||
, payload JSONB
|
||||
, creator TEXT NOT NULL
|
||||
, "owner" TEXT NOT NULL
|
||||
|
||||
, "position" DECIMAL NOT NULL
|
||||
, in_tx_order INTEGER NOT NULL
|
||||
|
||||
, PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC, instance_id);
|
||||
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
|
||||
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");
|
||||
|
||||
-- represents an event to be created.
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE eventstore.command AS (
|
||||
instance_id TEXT
|
||||
, aggregate_type TEXT
|
||||
, aggregate_id TEXT
|
||||
, command_type TEXT
|
||||
, revision INT2
|
||||
, payload JSONB
|
||||
, creator TEXT
|
||||
, owner TEXT
|
||||
);
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION eventstore.commands_to_events(commands eventstore.command[]) RETURNS SETOF eventstore.events2 VOLATILE AS $$
|
||||
SELECT
|
||||
c.instance_id
|
||||
, c.aggregate_type
|
||||
, c.aggregate_id
|
||||
, c.command_type AS event_type
|
||||
, cs.sequence + ROW_NUMBER() OVER (PARTITION BY c.instance_id, c.aggregate_type, c.aggregate_id ORDER BY c.in_tx_order) AS sequence
|
||||
, c.revision
|
||||
, NOW() AS created_at
|
||||
, c.payload
|
||||
, c.creator
|
||||
, cs.owner
|
||||
, EXTRACT(EPOCH FROM NOW()) AS position
|
||||
, c.in_tx_order
|
||||
FROM (
|
||||
SELECT
|
||||
c.instance_id
|
||||
, c.aggregate_type
|
||||
, c.aggregate_id
|
||||
, c.command_type
|
||||
, c.revision
|
||||
, c.payload
|
||||
, c.creator
|
||||
, c.owner
|
||||
, ROW_NUMBER() OVER () AS in_tx_order
|
||||
FROM
|
||||
UNNEST(commands) AS c
|
||||
) AS c
|
||||
JOIN (
|
||||
SELECT
|
||||
cmds.instance_id
|
||||
, cmds.aggregate_type
|
||||
, cmds.aggregate_id
|
||||
, CASE WHEN (e.owner IS NOT NULL OR e.owner <> '') THEN e.owner ELSE command_owners.owner END AS owner
|
||||
, COALESCE(MAX(e.sequence), 0) AS sequence
|
||||
FROM (
|
||||
SELECT DISTINCT
|
||||
instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
, owner
|
||||
FROM UNNEST(commands)
|
||||
) AS cmds
|
||||
LEFT JOIN eventstore.events2 AS e
|
||||
ON cmds.instance_id = e.instance_id
|
||||
AND cmds.aggregate_type = e.aggregate_type
|
||||
AND cmds.aggregate_id = e.aggregate_id
|
||||
JOIN (
|
||||
SELECT
|
||||
DISTINCT ON (
|
||||
instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
)
|
||||
instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
, owner
|
||||
FROM
|
||||
UNNEST(commands)
|
||||
) AS command_owners ON
|
||||
cmds.instance_id = command_owners.instance_id
|
||||
AND cmds.aggregate_type = command_owners.aggregate_type
|
||||
AND cmds.aggregate_id = command_owners.aggregate_id
|
||||
GROUP BY
|
||||
cmds.instance_id
|
||||
, cmds.aggregate_type
|
||||
, cmds.aggregate_id
|
||||
, 4 -- owner
|
||||
) AS cs
|
||||
ON c.instance_id = cs.instance_id
|
||||
AND c.aggregate_type = cs.aggregate_type
|
||||
AND c.aggregate_id = cs.aggregate_id
|
||||
ORDER BY
|
||||
in_tx_order;
|
||||
$$ LANGUAGE SQL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION eventstore.push(commands eventstore.command[]) RETURNS SETOF eventstore.events2 VOLATILE AS $$
|
||||
INSERT INTO eventstore.events2
|
||||
SELECT * FROM eventstore.commands_to_events(commands)
|
||||
RETURNING *
|
||||
$$ LANGUAGE SQL;
|
@@ -0,0 +1,6 @@
|
||||
CREATE TABLE IF NOT EXISTS eventstore.unique_constraints (
|
||||
instance_id TEXT,
|
||||
unique_type TEXT,
|
||||
unique_field TEXT,
|
||||
PRIMARY KEY (instance_id, unique_type, unique_field)
|
||||
);
|
15
apps/api/cmd/initialise/sql/README.md
Normal file
15
apps/api/cmd/initialise/sql/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# SQL initialisation
|
||||
|
||||
The sql-files in this folder initialize the ZITADEL database and user. These objects need to exist before ZITADEL is able to set and start up.
|
||||
|
||||
## files
|
||||
|
||||
- 01_user.sql: create the user zitadel uses to connect to the database
|
||||
- 02_database.sql: create the database for zitadel
|
||||
- 03_grant_user.sql: grants the user created before to have full access to its database. The user needs full access to the database because zitadel makes ddl/dml on runtime
|
||||
- 04_eventstore.sql: creates the schema needed for eventsourcing
|
||||
- 05_projections.sql: creates the schema needed to read the data
|
||||
- 06_system.sql: creates the schema needed for ZITADEL itself
|
||||
- 07_encryption_keys_table.sql: creates the table for encryption keys (for event data)
|
||||
- 08_events_table.sql creates the table for eventsourcing
|
||||
- 10_unique_constraints_table.sql creates the table to check unique constraints for events
|
44
apps/api/cmd/initialise/verify_database.go
Normal file
44
apps/api/cmd/initialise/verify_database.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func newDatabase() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "database",
|
||||
Short: "initialize only the database",
|
||||
Long: `Sets up the ZITADEL database.
|
||||
|
||||
Prerequisites:
|
||||
- postgreSQL
|
||||
|
||||
The user provided by flags needs privileges to
|
||||
- create the database if it does not exist
|
||||
- see other users and create a new one if the user does not exist
|
||||
- grant all rights of the ZITADEL database to the user created if not yet set
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
|
||||
err := initialise(cmd.Context(), config.Database, VerifyDatabase(config.Database.DatabaseName()))
|
||||
logging.OnError(err).Fatal("unable to initialize the database")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyDatabase(databaseName string) func(context.Context, *database.DB) error {
|
||||
return func(ctx context.Context, db *database.DB) error {
|
||||
logging.WithFields("database", databaseName).Info("verify database")
|
||||
|
||||
return exec(ctx, db, fmt.Sprintf(databaseStmt, databaseName), []string{dbAlreadyExistsCode})
|
||||
}
|
||||
}
|
67
apps/api/cmd/initialise/verify_database_test.go
Normal file
67
apps/api/cmd/initialise/verify_database_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_verifyDB(t *testing.T) {
|
||||
err := ReadStmts()
|
||||
if err != nil {
|
||||
t.Errorf("unable to read stmts: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
type args struct {
|
||||
db db
|
||||
database string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
targetErr error
|
||||
}{
|
||||
{
|
||||
name: "doesn't exists, create fails",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", sql.ErrTxDone),
|
||||
),
|
||||
database: "zitadel",
|
||||
},
|
||||
targetErr: sql.ErrTxDone,
|
||||
},
|
||||
{
|
||||
name: "doesn't exists, create successful",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", nil),
|
||||
),
|
||||
database: "zitadel",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel with the name of the database\nCREATE DATABASE \"zitadel\"", nil),
|
||||
),
|
||||
database: "zitadel",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := VerifyDatabase(tt.args.database)(context.Background(), tt.args.db.db); !errors.Is(err, tt.targetErr) {
|
||||
t.Errorf("verifyDB() error = %v, want: %v", err, tt.targetErr)
|
||||
}
|
||||
if err := tt.args.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
39
apps/api/cmd/initialise/verify_grant.go
Normal file
39
apps/api/cmd/initialise/verify_grant.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func newGrant() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "grant",
|
||||
Short: "set ALL grant to user",
|
||||
Long: `Sets ALL grant to the database user.
|
||||
|
||||
Prerequisites:
|
||||
- postgreSQL
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
|
||||
err := initialise(cmd.Context(), config.Database, VerifyGrant(config.Database.DatabaseName(), config.Database.Username()))
|
||||
logging.OnError(err).Fatal("unable to set grant")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyGrant(databaseName, username string) func(context.Context, *database.DB) error {
|
||||
return func(ctx context.Context, db *database.DB) error {
|
||||
logging.WithFields("user", username, "database", databaseName).Info("verify grant")
|
||||
|
||||
return exec(ctx, db, fmt.Sprintf(grantStmt, databaseName, username), nil)
|
||||
}
|
||||
}
|
65
apps/api/cmd/initialise/verify_grant_test.go
Normal file
65
apps/api/cmd/initialise/verify_grant_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_verifyGrant(t *testing.T) {
|
||||
type args struct {
|
||||
db db
|
||||
database string
|
||||
username string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
targetErr error
|
||||
}{
|
||||
{
|
||||
name: "doesn't exists, create fails",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("GRANT ALL ON DATABASE \"zitadel\" TO \"zitadel-user\"", sql.ErrTxDone),
|
||||
),
|
||||
database: "zitadel",
|
||||
username: "zitadel-user",
|
||||
},
|
||||
targetErr: sql.ErrTxDone,
|
||||
},
|
||||
{
|
||||
name: "correct",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("GRANT ALL ON DATABASE \"zitadel\" TO \"zitadel-user\"", nil),
|
||||
),
|
||||
database: "zitadel",
|
||||
username: "zitadel-user",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("GRANT ALL ON DATABASE \"zitadel\" TO \"zitadel-user\"", nil),
|
||||
),
|
||||
database: "zitadel",
|
||||
username: "zitadel-user",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := VerifyGrant(tt.args.database, tt.args.username)(context.Background(), tt.args.db.db); !errors.Is(err, tt.targetErr) {
|
||||
t.Errorf("VerifyGrant() error = %v, want: %v", err, tt.targetErr)
|
||||
}
|
||||
if err := tt.args.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
48
apps/api/cmd/initialise/verify_user.go
Normal file
48
apps/api/cmd/initialise/verify_user.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func newUser() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "user",
|
||||
Short: "initialize only the database user",
|
||||
Long: `Sets up the ZITADEL database user.
|
||||
|
||||
Prerequisites:
|
||||
- postgreSQL
|
||||
|
||||
The user provided by flags needs privileges to
|
||||
- create the database if it does not exist
|
||||
- see other users and create a new one if the user does not exist
|
||||
- grant all rights of the ZITADEL database to the user created if not yet set
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
|
||||
err := initialise(cmd.Context(), config.Database, VerifyUser(config.Database.Username(), config.Database.Password()))
|
||||
logging.OnError(err).Fatal("unable to init user")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyUser(username, password string) func(context.Context, *database.DB) error {
|
||||
return func(ctx context.Context, db *database.DB) error {
|
||||
logging.WithFields("username", username).Info("verify user")
|
||||
|
||||
if password != "" {
|
||||
createUserStmt += " WITH PASSWORD '" + password + "'"
|
||||
}
|
||||
|
||||
return exec(ctx, db, fmt.Sprintf(createUserStmt, username), []string{roleAlreadyExistsCode})
|
||||
}
|
||||
}
|
82
apps/api/cmd/initialise/verify_user_test.go
Normal file
82
apps/api/cmd/initialise/verify_user_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_verifyUser(t *testing.T) {
|
||||
err := ReadStmts()
|
||||
if err != nil {
|
||||
t.Errorf("unable to read stmts: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
type args struct {
|
||||
db db
|
||||
username string
|
||||
password string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
targetErr error
|
||||
}{
|
||||
{
|
||||
name: "doesn't exists, create fails",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\"", sql.ErrTxDone),
|
||||
),
|
||||
username: "zitadel-user",
|
||||
password: "",
|
||||
},
|
||||
targetErr: sql.ErrTxDone,
|
||||
},
|
||||
{
|
||||
name: "correct without password",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\"", nil),
|
||||
),
|
||||
username: "zitadel-user",
|
||||
password: "",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
{
|
||||
name: "correct with password",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\" WITH PASSWORD 'password'", nil),
|
||||
),
|
||||
username: "zitadel-user",
|
||||
password: "password",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectExec("-- replace zitadel-user with the name of the user\nCREATE USER \"zitadel-user\" WITH PASSWORD 'password'", nil),
|
||||
),
|
||||
username: "zitadel-user",
|
||||
password: "",
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := VerifyUser(tt.args.username, tt.args.password)(context.Background(), tt.args.db.db); !errors.Is(err, tt.targetErr) {
|
||||
t.Errorf("VerifyGrant() error = %v, want: %v", err, tt.targetErr)
|
||||
}
|
||||
if err := tt.args.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
136
apps/api/cmd/initialise/verify_zitadel.go
Normal file
136
apps/api/cmd/initialise/verify_zitadel.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
es_v3 "github.com/zitadel/zitadel/internal/eventstore/v3"
|
||||
)
|
||||
|
||||
func newZitadel() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "zitadel",
|
||||
Short: "initialize ZITADEL internals",
|
||||
Long: `initialize ZITADEL internals.
|
||||
|
||||
Prerequisites:
|
||||
- postgreSQL with user and database
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
err := verifyZitadel(cmd.Context(), config.Database)
|
||||
logging.OnError(err).Fatal("unable to init zitadel")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyZitadel(ctx context.Context, db *database.DB, config database.Config) error {
|
||||
err := ReadStmts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn, err := db.Conn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
logging.WithFields().Info("verify system")
|
||||
if err := exec(ctx, conn, fmt.Sprintf(createSystemStmt, config.Username()), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.WithFields().Info("verify encryption keys")
|
||||
if err := createEncryptionKeys(ctx, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.WithFields().Info("verify projections")
|
||||
if err := exec(ctx, conn, fmt.Sprintf(createProjectionsStmt, config.Username()), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.WithFields().Info("verify eventstore")
|
||||
if err := exec(ctx, conn, fmt.Sprintf(createEventstoreStmt, config.Username()), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.WithFields().Info("verify events tables")
|
||||
if err := createEvents(ctx, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.WithFields().Info("verify unique constraints")
|
||||
if err := exec(ctx, conn, createUniqueConstraints, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyZitadel(ctx context.Context, config database.Config) error {
|
||||
logging.WithFields("database", config.DatabaseName()).Info("verify zitadel")
|
||||
|
||||
db, err := database.Connect(config, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifyZitadel(ctx, db, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Close()
|
||||
}
|
||||
|
||||
func createEncryptionKeys(ctx context.Context, db database.Beginner) error {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = tx.Exec(createEncryptionKeysStmt); err != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
logging.OnError(rollbackErr).Error("rollback failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func createEvents(ctx context.Context, conn *sql.Conn) (err error) {
|
||||
tx, err := conn.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
logging.OnError(rollbackErr).Error("rollback failed")
|
||||
return
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
// if events already exists events2 is created during a setup job
|
||||
var count int
|
||||
row := tx.QueryRow("SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'")
|
||||
if err = row.Scan(&count); err != nil {
|
||||
return err
|
||||
}
|
||||
if row.Err() != nil || count >= 1 {
|
||||
return row.Err()
|
||||
}
|
||||
_, err = tx.Exec(createEventsStmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return es_v3.CheckExecutionPlan(ctx, conn)
|
||||
}
|
177
apps/api/cmd/initialise/verify_zitadel_test.go
Normal file
177
apps/api/cmd/initialise/verify_zitadel_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package initialise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_verifyEvents(t *testing.T) {
|
||||
err := ReadStmts()
|
||||
if err != nil {
|
||||
t.Errorf("unable to read stmts: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
type args struct {
|
||||
db db
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
targetErr error
|
||||
}{
|
||||
{
|
||||
name: "unable to begin",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(sql.ErrConnDone),
|
||||
),
|
||||
},
|
||||
targetErr: sql.ErrConnDone,
|
||||
},
|
||||
{
|
||||
name: "events already exists",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectQuery(
|
||||
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
|
||||
nil,
|
||||
[]string{"count"},
|
||||
[][]driver.Value{
|
||||
{1},
|
||||
},
|
||||
),
|
||||
expectCommit(nil),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "events and events2 already exists",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectQuery(
|
||||
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
|
||||
nil,
|
||||
[]string{"count"},
|
||||
[][]driver.Value{
|
||||
{2},
|
||||
},
|
||||
),
|
||||
expectCommit(nil),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create table fails",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectQuery(
|
||||
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
|
||||
nil,
|
||||
[]string{"count"},
|
||||
[][]driver.Value{
|
||||
{0},
|
||||
},
|
||||
),
|
||||
expectExec(createEventsStmt, sql.ErrNoRows),
|
||||
expectRollback(nil),
|
||||
),
|
||||
},
|
||||
targetErr: sql.ErrNoRows,
|
||||
},
|
||||
{
|
||||
name: "correct",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectQuery(
|
||||
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
|
||||
nil,
|
||||
[]string{"count"},
|
||||
[][]driver.Value{
|
||||
{0},
|
||||
},
|
||||
),
|
||||
expectExec(createEventsStmt, nil),
|
||||
expectCommit(nil),
|
||||
),
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
conn, err := tt.args.db.db.Conn(context.Background())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if err := createEvents(context.Background(), conn); !errors.Is(err, tt.targetErr) {
|
||||
t.Errorf("createEvents() error = %v, want: %v", err, tt.targetErr)
|
||||
}
|
||||
if err := tt.args.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_verifyEncryptionKeys(t *testing.T) {
|
||||
type args struct {
|
||||
db db
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
targetErr error
|
||||
}{
|
||||
{
|
||||
name: "unable to begin",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(sql.ErrConnDone),
|
||||
),
|
||||
},
|
||||
targetErr: sql.ErrConnDone,
|
||||
},
|
||||
{
|
||||
name: "create table fails",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectExec(createEncryptionKeysStmt, sql.ErrNoRows),
|
||||
expectRollback(nil),
|
||||
),
|
||||
},
|
||||
targetErr: sql.ErrNoRows,
|
||||
},
|
||||
{
|
||||
name: "correct",
|
||||
args: args{
|
||||
db: prepareDB(t,
|
||||
expectBegin(nil),
|
||||
expectExec(createEncryptionKeysStmt, nil),
|
||||
expectCommit(nil),
|
||||
),
|
||||
},
|
||||
targetErr: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := createEncryptionKeys(context.Background(), tt.args.db.db); !errors.Is(err, tt.targetErr) {
|
||||
t.Errorf("createEvents() error = %v, want: %v", err, tt.targetErr)
|
||||
}
|
||||
if err := tt.args.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
131
apps/api/cmd/key/key.go
Normal file
131
apps/api/cmd/key/key.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package key
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
flagKeyFile = "file"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Database database.Config
|
||||
}
|
||||
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "keys",
|
||||
Short: "manage encryption keys",
|
||||
}
|
||||
AddMasterKeyFlag(cmd)
|
||||
cmd.AddCommand(newKey())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newKey() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "new [keyID=key]... [-f file]",
|
||||
Short: "create new encryption key(s)",
|
||||
Long: `create new encryption key(s) (encrypted by the provided master key)
|
||||
provide key(s) by YAML file and/or by argument
|
||||
Requirements:
|
||||
- postgreSQL`,
|
||||
Example: `new -f keys.yaml
|
||||
new key1=somekey key2=anotherkey
|
||||
new -f keys.yaml key2=anotherkey`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
keys, err := keysFromArgs(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filePath, _ := cmd.Flags().GetString(flagKeyFile)
|
||||
if filePath != "" {
|
||||
file, err := openFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
yamlKeys, err := keysFromYAML(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, yamlKeys...)
|
||||
}
|
||||
config := new(Config)
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
masterKey, err := MasterKey(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storage, err := keyStorage(config.Database, masterKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return storage.CreateKeys(cmd.Context(), keys...)
|
||||
},
|
||||
}
|
||||
cmd.PersistentFlags().StringP(flagKeyFile, "f", "", "path to keys file")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func keysFromArgs(args []string) ([]*crypto.Key, error) {
|
||||
keys := make([]*crypto.Key, len(args))
|
||||
for i, arg := range args {
|
||||
key := strings.Split(arg, "=")
|
||||
if len(key) != 2 {
|
||||
return nil, zerrors.ThrowInternal(nil, "KEY-JKd82", "argument is not in the valid format [keyID=key]")
|
||||
}
|
||||
keys[i] = &crypto.Key{
|
||||
ID: key[0],
|
||||
Value: key[1],
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func keysFromYAML(file io.Reader) ([]*crypto.Key, error) {
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, zerrors.ThrowInternal(err, "KEY-ajGFr", "unable to extract keys from file")
|
||||
}
|
||||
keysYAML := make(map[string]string)
|
||||
if err = yaml.Unmarshal(data, &keysYAML); err != nil {
|
||||
return nil, zerrors.ThrowInternal(err, "KEY-sd34K", "unable to extract keys from file")
|
||||
}
|
||||
keys := make([]*crypto.Key, 0, len(keysYAML))
|
||||
for id, key := range keysYAML {
|
||||
keys = append(keys, &crypto.Key{
|
||||
ID: id,
|
||||
Value: key,
|
||||
})
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func openFile(fileName string) (io.Reader, error) {
|
||||
file, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, zerrors.ThrowInternalf(err, "KEY-asGr2", "failed to open file: %s", fileName)
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func keyStorage(config database.Config, masterKey string) (crypto.KeyStorage, error) {
|
||||
db, err := database.Connect(config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cryptoDB.NewKeyStorage(db, masterKey)
|
||||
}
|
160
apps/api/cmd/key/key_test.go
Normal file
160
apps/api/cmd/key/key_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package key
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
func Test_keysFromArgs(t *testing.T) {
|
||||
type args struct {
|
||||
args []string
|
||||
}
|
||||
type res struct {
|
||||
keys []*crypto.Key
|
||||
err func(error) bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
"no args",
|
||||
args{},
|
||||
res{
|
||||
keys: []*crypto.Key{},
|
||||
},
|
||||
},
|
||||
{
|
||||
"invalid arg",
|
||||
args{
|
||||
args: []string{"keyID", "value"},
|
||||
},
|
||||
res{
|
||||
err: zerrors.IsInternal,
|
||||
},
|
||||
},
|
||||
{
|
||||
"single arg",
|
||||
args{
|
||||
args: []string{"keyID=value"},
|
||||
},
|
||||
res{
|
||||
keys: []*crypto.Key{
|
||||
{
|
||||
ID: "keyID",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"multiple args",
|
||||
args{
|
||||
args: []string{"keyID=value", "keyID2=value2"},
|
||||
},
|
||||
res{
|
||||
keys: []*crypto.Key{
|
||||
{
|
||||
ID: "keyID",
|
||||
Value: "value",
|
||||
},
|
||||
{
|
||||
ID: "keyID2",
|
||||
Value: "value2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := keysFromArgs(tt.args.args)
|
||||
if tt.res.err == nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tt.res.err != nil && !tt.res.err(err) {
|
||||
t.Errorf("got wrong err: %v ", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.res.keys) {
|
||||
t.Errorf("keysFromArgs() got = %v, want %v", got, tt.res.keys)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_keysFromYAML(t *testing.T) {
|
||||
type args struct {
|
||||
file io.Reader
|
||||
}
|
||||
type res struct {
|
||||
keys []*crypto.Key
|
||||
err func(error) bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
"invalid yaml",
|
||||
args{
|
||||
file: bytes.NewReader([]byte("keyID=ds")),
|
||||
},
|
||||
res{
|
||||
err: zerrors.IsInternal,
|
||||
},
|
||||
},
|
||||
{
|
||||
"single key",
|
||||
args{
|
||||
file: bytes.NewReader([]byte("keyID: value")),
|
||||
},
|
||||
res{
|
||||
keys: []*crypto.Key{
|
||||
{
|
||||
ID: "keyID",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"multiple keys",
|
||||
args{
|
||||
file: bytes.NewReader([]byte("keyID: value\nkeyID2: value2")),
|
||||
},
|
||||
res{
|
||||
keys: []*crypto.Key{
|
||||
{
|
||||
ID: "keyID",
|
||||
Value: "value",
|
||||
},
|
||||
{
|
||||
ID: "keyID2",
|
||||
Value: "value2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := keysFromYAML(tt.args.file)
|
||||
if tt.res.err == nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tt.res.err != nil && !tt.res.err(err) {
|
||||
t.Errorf("got wrong err: %v ", err)
|
||||
}
|
||||
assert.ElementsMatch(t, got, tt.res.keys)
|
||||
})
|
||||
}
|
||||
}
|
66
apps/api/cmd/key/masterkey.go
Normal file
66
apps/api/cmd/key/masterkey.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
flagMasterKey = "masterkeyFile"
|
||||
flagMasterKeyShort = "m"
|
||||
flagMasterKeyArg = "masterkey"
|
||||
flagMasterKeyEnv = "masterkeyFromEnv"
|
||||
envMasterKey = "ZITADEL_MASTERKEY"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotSingleFlag = errors.New("masterkey must either be provided by file path, value or environment variable")
|
||||
)
|
||||
|
||||
func AddMasterKeyFlag(cmd *cobra.Command) {
|
||||
if cmd.PersistentFlags().Lookup(flagMasterKey) != nil {
|
||||
return
|
||||
}
|
||||
cmd.PersistentFlags().StringP(flagMasterKey, flagMasterKeyShort, "", "path to the masterkey for en/decryption keys")
|
||||
cmd.PersistentFlags().String(flagMasterKeyArg, "", "masterkey as argument for en/decryption keys")
|
||||
cmd.PersistentFlags().Bool(flagMasterKeyEnv, false, "read masterkey for en/decryption keys from environment variable (ZITADEL_MASTERKEY)")
|
||||
}
|
||||
|
||||
func MasterKey(cmd *cobra.Command) (string, error) {
|
||||
masterKeyFile, _ := cmd.Flags().GetString(flagMasterKey)
|
||||
masterKeyFromArg, _ := cmd.Flags().GetString(flagMasterKeyArg)
|
||||
masterKeyFromEnv, _ := cmd.Flags().GetBool(flagMasterKeyEnv)
|
||||
if err := checkSingleFlag(masterKeyFile, masterKeyFromArg, masterKeyFromEnv); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if masterKeyFromArg != "" {
|
||||
return masterKeyFromArg, nil
|
||||
}
|
||||
if masterKeyFromEnv {
|
||||
return os.Getenv(envMasterKey), nil
|
||||
}
|
||||
data, err := os.ReadFile(masterKeyFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func checkSingleFlag(masterKeyFile, masterKeyFromArg string, masterKeyFromEnv bool) error {
|
||||
var flags int
|
||||
if masterKeyFile != "" {
|
||||
flags++
|
||||
}
|
||||
if masterKeyFromArg != "" {
|
||||
flags++
|
||||
}
|
||||
if masterKeyFromEnv {
|
||||
flags++
|
||||
}
|
||||
if flags != 1 {
|
||||
return ErrNotSingleFlag
|
||||
}
|
||||
return nil
|
||||
}
|
72
apps/api/cmd/key/masterkey_test.go
Normal file
72
apps/api/cmd/key/masterkey_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package key
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_checkSingleFlag(t *testing.T) {
|
||||
type args struct {
|
||||
masterKeyFile string
|
||||
masterKeyFromArg string
|
||||
masterKeyFromEnv bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
"no values, error",
|
||||
args{
|
||||
masterKeyFile: "",
|
||||
masterKeyFromArg: "",
|
||||
masterKeyFromEnv: false,
|
||||
},
|
||||
assert.Error,
|
||||
},
|
||||
{
|
||||
"multiple values, error",
|
||||
args{
|
||||
masterKeyFile: "file",
|
||||
masterKeyFromArg: "masterkey",
|
||||
masterKeyFromEnv: true,
|
||||
},
|
||||
assert.Error,
|
||||
},
|
||||
{
|
||||
"only file, ok",
|
||||
args{
|
||||
masterKeyFile: "file",
|
||||
masterKeyFromArg: "",
|
||||
masterKeyFromEnv: false,
|
||||
},
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"only argument, ok",
|
||||
args{
|
||||
masterKeyFile: "",
|
||||
masterKeyFromArg: "masterkey",
|
||||
masterKeyFromEnv: false,
|
||||
},
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"only env, ok",
|
||||
args{
|
||||
masterKeyFile: "",
|
||||
masterKeyFromArg: "",
|
||||
masterKeyFromEnv: true,
|
||||
},
|
||||
assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.wantErr(t, checkSingleFlag(tt.args.masterKeyFile, tt.args.masterKeyFromArg, tt.args.masterKeyFromEnv), fmt.Sprintf("checkSingleFlag(%v, %v)", tt.args.masterKeyFile, tt.args.masterKeyFromArg))
|
||||
})
|
||||
}
|
||||
}
|
95
apps/api/cmd/mirror/auth.go
Normal file
95
apps/api/cmd/mirror/auth.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func authCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "auth",
|
||||
Short: "mirrors the auth requests table from one database to another",
|
||||
Long: `mirrors the auth requests table from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Only auth requests are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyAuth(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete auth requests of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyAuth(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAuthRequests(ctx, sourceClient, destClient, config.MaxAuthRequestAge)
|
||||
}
|
||||
|
||||
func copyAuthRequests(ctx context.Context, source, dest *database.DB, maxAuthRequestAge time.Duration) {
|
||||
start := time.Now()
|
||||
|
||||
logging.Info("creating index on auth.auth_requests.change_date to speed up copy in source database")
|
||||
_, err := source.ExecContext(ctx, "CREATE INDEX CONCURRENTLY IF NOT EXISTS auth_requests_change_date ON auth.auth_requests (change_date)")
|
||||
logging.OnError(err).Fatal("unable to create index on auth.auth_requests.change_date")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+" AND change_date > NOW() - INTERVAL '"+strconv.FormatFloat(maxAuthRequestAge.Seconds(), 'f', -1, 64)+" seconds') TO STDOUT")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var affected int64
|
||||
err = destConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM auth.auth_requests "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY auth.auth_requests FROM STDIN")
|
||||
affected = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy auth requests to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy auth requests from source")
|
||||
logging.WithFields("took", time.Since(start), "count", affected).Info("auth requests migrated")
|
||||
}
|
87
apps/api/cmd/mirror/config.go
Normal file
87
apps/api/cmd/mirror/config.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/hooks"
|
||||
"github.com/zitadel/zitadel/internal/actions"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
metrics "github.com/zitadel/zitadel/internal/telemetry/metrics/config"
|
||||
)
|
||||
|
||||
type Migration struct {
|
||||
Source database.Config
|
||||
Destination database.Config
|
||||
|
||||
EventBulkSize uint32
|
||||
MaxAuthRequestAge time.Duration
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
Metrics metrics.Config
|
||||
}
|
||||
|
||||
var (
|
||||
//go:embed defaults.yaml
|
||||
defaultConfig []byte
|
||||
)
|
||||
|
||||
func mustNewMigrationConfig(v *viper.Viper) *Migration {
|
||||
config := new(Migration)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
err = config.Metrics.NewMeter()
|
||||
logging.OnError(err).Fatal("unable to set meter")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewProjectionsConfig(v *viper.Viper) *ProjectionsConfig {
|
||||
config := new(ProjectionsConfig)
|
||||
mustNewConfig(v, config)
|
||||
|
||||
err := config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
id.Configure(config.Machine)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func mustNewConfig(v *viper.Viper, config any) {
|
||||
err := v.Unmarshal(config,
|
||||
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
||||
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
|
||||
hooks.SliceTypeStringDecode[*command.SetQuota],
|
||||
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
|
||||
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
|
||||
hooks.MapTypeStringDecode[domain.Feature, any],
|
||||
hooks.MapHTTPHeaderStringDecode,
|
||||
hook.Base64ToBytesHookFunc(),
|
||||
hook.TagToLanguageHookFunc(),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
mapstructure.StringToSliceHookFunc(","),
|
||||
database.DecodeHook(true),
|
||||
actions.HTTPConfigDecodeHook,
|
||||
hook.EnumHookFunc(internal_authz.MemberTypeString),
|
||||
mapstructure.TextUnmarshallerHookFunc(),
|
||||
)),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
}
|
94
apps/api/cmd/mirror/defaults.yaml
Normal file
94
apps/api/cmd/mirror/defaults.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
Source:
|
||||
cockroach:
|
||||
Host: localhost # ZITADEL_SOURCE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_SOURCE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_SOURCE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_SOURCE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_SOURCE_COCKROACH_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_SOURCE_COCKROACH_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_SOURCE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_SOURCE_COCKROACH_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_SOURCE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_KEY
|
||||
# Postgres is used as soon as a value is set
|
||||
# The values describe the possible fields to set values
|
||||
postgres:
|
||||
Host: # ZITADEL_SOURCE_POSTGRES_HOST
|
||||
Port: # ZITADEL_SOURCE_POSTGRES_PORT
|
||||
Database: # ZITADEL_SOURCE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_SOURCE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_SOURCE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_SOURCE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_SOURCE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_SOURCE_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: # ZITADEL_SOURCE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_SOURCE_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: # ZITADEL_SOURCE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_SOURCE_POSTGRES_USER_SSL_KEY
|
||||
|
||||
Destination:
|
||||
postgres:
|
||||
Host: localhost # ZITADEL_DESTINATION_POSTGRES_HOST
|
||||
Port: 5432 # ZITADEL_DESTINATION_POSTGRES_PORT
|
||||
Database: zitadel # ZITADEL_DESTINATION_POSTGRES_DATABASE
|
||||
MaxOpenConns: 5 # ZITADEL_DESTINATION_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: 2 # ZITADEL_DESTINATION_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DESTINATION_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DESTINATION_POSTGRES_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DESTINATION_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DESTINATION_POSTGRES_USER_USERNAME
|
||||
Password: "" # ZITADEL_DESTINATION_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DESTINATION_POSTGRES_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_KEY
|
||||
|
||||
EventBulkSize: 10000 # ZITADEL_EVENTBULKSIZE
|
||||
# The maximum duration an auth request was last updated before it gets ignored.
|
||||
# Default is 30 days
|
||||
MaxAuthRequestAge: 720h # ZITADEL_MAXAUTHREQUESTAGE
|
||||
|
||||
Projections:
|
||||
# The maximum duration a transaction remains open
|
||||
# before it spots left folding additional events
|
||||
# and updates the table.
|
||||
TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
|
||||
# turn off scheduler during operation
|
||||
RequeueEvery: 0s
|
||||
ConcurrentInstances: 7 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES
|
||||
EventBulkLimit: 1000 # ZITADEL_PROJECTIONS_EVENTBULKLIMIT
|
||||
Customizations:
|
||||
notifications:
|
||||
MaxFailureCount: 1
|
||||
|
||||
Eventstore:
|
||||
MaxRetries: 3 # ZITADEL_EVENTSTORE_MAXRETRIES
|
||||
|
||||
Auth:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
Admin:
|
||||
Spooler:
|
||||
TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
|
||||
BulkLimit: 10 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
|
||||
|
||||
FirstInstance:
|
||||
# We only need to create an empty zitadel database so this step must be skipped
|
||||
Skip: true
|
||||
|
||||
Log:
|
||||
Level: info
|
64
apps/api/cmd/mirror/event.go
Normal file
64
apps/api/cmd/mirror/event.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/readmodel"
|
||||
"github.com/zitadel/zitadel/internal/v2/system"
|
||||
mirror_event "github.com/zitadel/zitadel/internal/v2/system/mirror"
|
||||
)
|
||||
|
||||
func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore.EventStore, source string) (*readmodel.LastSuccessfulMirror, error) {
|
||||
lastSuccess := readmodel.NewLastSuccessfulMirror(source)
|
||||
if shouldIgnorePrevious {
|
||||
return lastSuccess, nil
|
||||
}
|
||||
_, err := destinationES.Query(
|
||||
ctx,
|
||||
eventstore.NewQuery(
|
||||
system.AggregateInstance,
|
||||
lastSuccess,
|
||||
eventstore.SetFilters(lastSuccess.Filter()),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lastSuccess, nil
|
||||
}
|
||||
|
||||
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position decimal.Decimal) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewSucceededCommand(source, position)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func writeMigrationFailed(ctx context.Context, destinationES *eventstore.EventStore, id, source string, err error) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
system.AggregateInstance,
|
||||
eventstore.AppendAggregate(
|
||||
system.AggregateOwner,
|
||||
system.AggregateType,
|
||||
id,
|
||||
eventstore.CurrentSequenceMatches(0),
|
||||
eventstore.AppendCommands(mirror_event.NewFailedCommand(source, err)),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
264
apps/api/cmd/mirror/event_store.go
Normal file
264
apps/api/cmd/mirror/event_store.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/shopspring/decimal"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
db "github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/v2/database"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
var shouldIgnorePrevious bool
|
||||
|
||||
func eventstoreCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "eventstore",
|
||||
Short: "mirrors the eventstore of an instance from one database to another",
|
||||
Long: `mirrors the eventstore of an instance from one database to another
|
||||
ZITADEL needs to be initialized and set up with the --for-mirror flag
|
||||
Migrate only copies events2 and unique constraints`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copyEventstore(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete unique constraints of defined instances before copy")
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copyEventstore(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := db.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := db.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyEvents(ctx, sourceClient, destClient, config.EventBulkSize)
|
||||
copyUniqueConstraints(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func positionQuery(db *db.DB) string {
|
||||
switch db.Type() {
|
||||
case dialect.DatabaseTypePostgres:
|
||||
return "SELECT EXTRACT(EPOCH FROM clock_timestamp())"
|
||||
case dialect.DatabaseTypeCockroach:
|
||||
return "SELECT cluster_logical_timestamp()"
|
||||
default:
|
||||
logging.WithFields("db_type", db.Type()).Fatal("database type not recognized")
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
logging.Info("starting to copy events")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
migrationID, err := id.SonyFlakeGenerator().Next()
|
||||
logging.OnError(err).Fatal("unable to generate migration id")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
destinationES := eventstore.NewEventstoreFromOne(postgres.New(dest, &postgres.Config{
|
||||
MaxRetries: 3,
|
||||
}))
|
||||
|
||||
previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName())
|
||||
logging.OnError(err).Fatal("unable to query latest successful migration")
|
||||
|
||||
var maxPosition decimal.Decimal
|
||||
err = source.QueryRowContext(ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&maxPosition)
|
||||
},
|
||||
"SELECT MAX(position) FROM eventstore.events2 "+instanceClause(),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to query max position from source")
|
||||
|
||||
logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration")
|
||||
|
||||
nextPos := make(chan bool, 1)
|
||||
pos := make(chan decimal.Decimal, 1)
|
||||
errs := make(chan error, 3)
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
nextPos <- true
|
||||
var i uint32
|
||||
for position := range pos {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, aggregate_type, aggregate_id, event_type, sequence, revision, created_at, regexp_replace(payload::TEXT, '\\\\u0000', '', 'g')::JSON payload, creator, owner, ")
|
||||
stmt.WriteArg(position)
|
||||
stmt.WriteString(" position, row_number() OVER (PARTITION BY instance_id ORDER BY position, in_tx_order) AS in_tx_order FROM eventstore.events2 ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberAtMost(maxPosition).Write(&stmt, "position")
|
||||
stmt.WriteString(" AND ")
|
||||
database.NewNumberGreater(previousMigration.Position).Write(&stmt, "position")
|
||||
stmt.WriteString(" ORDER BY instance_id, position, in_tx_order")
|
||||
stmt.WriteString(" LIMIT ")
|
||||
stmt.WriteArg(bulkSize)
|
||||
stmt.WriteString(" OFFSET ")
|
||||
stmt.WriteArg(bulkSize * i)
|
||||
stmt.WriteString(") TO STDOUT")
|
||||
|
||||
// Copy does not allow args so we use we replace the args in the statement
|
||||
tag, err := conn.PgConn().CopyTo(ctx, writer, stmt.Debug())
|
||||
if err != nil {
|
||||
return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i)
|
||||
}
|
||||
logging.WithFields("batch_count", i).Info("batch of events copied")
|
||||
|
||||
if tag.RowsAffected() < int64(bulkSize) {
|
||||
logging.WithFields("batch_count", i).Info("last batch of events copied")
|
||||
return nil
|
||||
}
|
||||
|
||||
nextPos <- true
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
writer.Close()
|
||||
close(nextPos)
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
// generate next position for
|
||||
go func() {
|
||||
defer close(pos)
|
||||
for range nextPos {
|
||||
var position decimal.Decimal
|
||||
err := dest.QueryRowContext(
|
||||
ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&position)
|
||||
},
|
||||
positionQuery(dest),
|
||||
)
|
||||
if err != nil {
|
||||
errs <- zerrors.ThrowUnknown(err, "MIGRA-kMyPH", "unable to query next position")
|
||||
return
|
||||
}
|
||||
pos <- position
|
||||
}
|
||||
}()
|
||||
|
||||
var eventCount int64
|
||||
errs <- destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN")
|
||||
eventCount = tag.RowsAffected()
|
||||
if err != nil {
|
||||
pgErr := new(pgconn.PgError)
|
||||
errors.As(err, &pgErr)
|
||||
|
||||
logging.WithError(err).WithField("pg_err_details", pgErr.Detail).Error("unable to copy events into destination")
|
||||
return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
close(errs)
|
||||
writeCopyEventsDone(ctx, destinationES, migrationID, source.DatabaseName(), maxPosition, errs)
|
||||
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated")
|
||||
}
|
||||
|
||||
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position decimal.Decimal, errs <-chan error) {
|
||||
joinedErrs := make([]error, 0, len(errs))
|
||||
for err := range errs {
|
||||
joinedErrs = append(joinedErrs, err)
|
||||
}
|
||||
err := errors.Join(joinedErrs...)
|
||||
|
||||
if err != nil {
|
||||
logging.WithError(err).Error("unable to mirror events")
|
||||
err := writeMigrationFailed(ctx, es, id, source, err)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
return
|
||||
}
|
||||
|
||||
err = writeMigrationSucceeded(ctx, es, id, source, position)
|
||||
logging.OnError(err).Fatal("unable to write failed event")
|
||||
}
|
||||
|
||||
func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) {
|
||||
logging.Info("starting to copy unique constraints")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
|
||||
go func() {
|
||||
err := sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("COPY (SELECT instance_id, unique_type, unique_field FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
stmt.WriteString(") TO stdout")
|
||||
|
||||
_, err := conn.PgConn().CopyTo(ctx, writer, stmt.String())
|
||||
writer.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
|
||||
var eventCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
var stmt database.Statement
|
||||
stmt.WriteString("DELETE FROM eventstore.unique_constraints ")
|
||||
stmt.WriteString(instanceClause())
|
||||
|
||||
_, err := conn.Exec(ctx, stmt.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.unique_constraints FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy unique constraints to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy unique constraints from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("unique constraints migrated")
|
||||
}
|
99
apps/api/cmd/mirror/mirror.go
Normal file
99
apps/api/cmd/mirror/mirror.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
)
|
||||
|
||||
var (
|
||||
instanceIDs []string
|
||||
isSystem bool
|
||||
shouldReplace bool
|
||||
)
|
||||
|
||||
func New(configFiles *[]string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "mirror",
|
||||
Short: "mirrors all data of ZITADEL from one database to another",
|
||||
Long: `mirrors all data of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized and set up with --for-mirror
|
||||
|
||||
The command does mirror all data needed and recomputes the projections.
|
||||
For more details call the help functions of the sub commands.
|
||||
|
||||
Order of execution:
|
||||
1. mirror system tables
|
||||
2. mirror auth tables
|
||||
3. mirror event store tables
|
||||
4. recompute projections
|
||||
5. verify`,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.MergeConfig(bytes.NewBuffer(defaultConfig))
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
|
||||
for _, file := range *configFiles {
|
||||
viper.SetConfigFile(file)
|
||||
err := viper.MergeInConfig()
|
||||
logging.WithFields("file", file).OnError(err).Warn("unable to read config file")
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
projectionConfig := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
copySystem(cmd.Context(), config)
|
||||
copyAuth(cmd.Context(), config)
|
||||
copyEventstore(cmd.Context(), config)
|
||||
|
||||
projections(cmd.Context(), projectionConfig, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
mirrorFlags(cmd)
|
||||
cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table")
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, `replaces all data of the following tables for the provided instances or all if the "--system"-flag is set:
|
||||
* system.assets
|
||||
* auth.auth_requests
|
||||
* eventstore.unique_constraints
|
||||
The flag should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.`)
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
cmd.AddCommand(
|
||||
eventstoreCmd(),
|
||||
systemCmd(),
|
||||
projectionsCmd(),
|
||||
authCmd(),
|
||||
verifyCmd(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func mirrorFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringSliceVar(&instanceIDs, "instance", nil, "id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.PersistentFlags().BoolVar(&isSystem, "system", false, "migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.")
|
||||
cmd.MarkFlagsOneRequired("system", "instance")
|
||||
cmd.MarkFlagsMutuallyExclusive("system", "instance")
|
||||
}
|
||||
|
||||
func instanceClause() string {
|
||||
if isSystem {
|
||||
return "WHERE instance_id <> ''"
|
||||
}
|
||||
for i := range instanceIDs {
|
||||
instanceIDs[i] = "'" + instanceIDs[i] + "'"
|
||||
}
|
||||
|
||||
// COPY does not allow parameters so we need to set them directly
|
||||
return "WHERE instance_id IN (" + strings.Join(instanceIDs, ", ") + ")"
|
||||
}
|
351
apps/api/cmd/mirror/projections.go
Normal file
351
apps/api/cmd/mirror/projections.go
Normal file
@@ -0,0 +1,351 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/encryption"
|
||||
"github.com/zitadel/zitadel/cmd/key"
|
||||
"github.com/zitadel/zitadel/cmd/tls"
|
||||
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
|
||||
admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
|
||||
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/api/oidc"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
|
||||
auth_handler "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/handler"
|
||||
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
|
||||
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
|
||||
"github.com/zitadel/zitadel/internal/i18n"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/notification"
|
||||
"github.com/zitadel/zitadel/internal/notification/handlers"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
static_config "github.com/zitadel/zitadel/internal/static/config"
|
||||
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
|
||||
"github.com/zitadel/zitadel/internal/webauthn"
|
||||
)
|
||||
|
||||
func projectionsCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "projections",
|
||||
Short: "calls the projections synchronously",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewProjectionsConfig(viper.GetViper())
|
||||
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Fatal("unable to read master key")
|
||||
|
||||
projections(cmd.Context(), config, masterKey)
|
||||
},
|
||||
}
|
||||
|
||||
migrateProjectionsFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ProjectionsConfig struct {
|
||||
Destination database.Config
|
||||
Projections projection.Config
|
||||
Notifications handlers.WorkerConfig
|
||||
EncryptionKeys *encryption.EncryptionKeyConfig
|
||||
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
|
||||
Eventstore *eventstore.Config
|
||||
Caches *connector.CachesConfig
|
||||
|
||||
Admin admin_es.Config
|
||||
Auth auth_es.Config
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
|
||||
ExternalPort uint16
|
||||
ExternalDomain string
|
||||
ExternalSecure bool
|
||||
InternalAuthZ internal_authz.Config
|
||||
SystemAuthZ internal_authz.Config
|
||||
SystemDefaults systemdefaults.SystemDefaults
|
||||
Telemetry *handlers.TelemetryPusherConfig
|
||||
Login login.Config
|
||||
OIDC oidc.Config
|
||||
WebAuthNName string
|
||||
DefaultInstance command.InstanceSetup
|
||||
AssetStorage static_config.AssetStorageConfig
|
||||
}
|
||||
|
||||
func migrateProjectionsFlags(cmd *cobra.Command) {
|
||||
key.AddMasterKeyFlag(cmd)
|
||||
tls.AddTLSModeFlag(cmd)
|
||||
}
|
||||
|
||||
func projections(
|
||||
ctx context.Context,
|
||||
config *ProjectionsConfig,
|
||||
masterKey string,
|
||||
) {
|
||||
logging.Info("starting to fill projections")
|
||||
start := time.Now()
|
||||
|
||||
client, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to database")
|
||||
|
||||
keyStorage, err := crypto_db.NewKeyStorage(client, masterKey)
|
||||
logging.OnError(err).Fatal("cannot start key storage")
|
||||
|
||||
keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage)
|
||||
logging.OnError(err).Fatal("unable to read encryption keys")
|
||||
|
||||
staticStorage, err := config.AssetStorage.NewStorage(client.DB)
|
||||
logging.OnError(err).Fatal("unable create static storage")
|
||||
|
||||
newEventstore := new_es.NewEventstore(client)
|
||||
config.Eventstore.Querier = old_es.NewPostgres(client)
|
||||
config.Eventstore.Pusher = newEventstore
|
||||
config.Eventstore.Searcher = newEventstore
|
||||
|
||||
es := eventstore.NewEventstore(config.Eventstore)
|
||||
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{
|
||||
MaxRetries: config.Eventstore.MaxRetries,
|
||||
}))
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
cacheConnectors, err := connector.StartConnectors(config.Caches, client)
|
||||
logging.OnError(err).Fatal("unable to start caches")
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
es,
|
||||
esV4.Querier,
|
||||
client,
|
||||
client,
|
||||
cacheConnectors,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
keys.Target,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
sessionTokenVerifier,
|
||||
func(q *query.Queries) domain.PermissionCheck {
|
||||
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
}
|
||||
},
|
||||
0,
|
||||
config.SystemAPIUsers,
|
||||
false,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start queries")
|
||||
|
||||
authZRepo, err := authz.Start(queries, es, client, keys.OIDC, config.ExternalSecure)
|
||||
logging.OnError(err).Fatal("unable to start authz repo")
|
||||
|
||||
webAuthNConfig := &webauthn.Config{
|
||||
DisplayName: config.WebAuthNName,
|
||||
ExternalSecure: config.ExternalSecure,
|
||||
}
|
||||
commands, err := command.StartCommands(ctx,
|
||||
es,
|
||||
cacheConnectors,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
staticStorage,
|
||||
webAuthNConfig,
|
||||
config.ExternalDomain,
|
||||
config.ExternalSecure,
|
||||
config.ExternalPort,
|
||||
keys.IDPConfig,
|
||||
keys.OTP,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.User,
|
||||
keys.DomainVerification,
|
||||
keys.OIDC,
|
||||
keys.SAML,
|
||||
keys.Target,
|
||||
&http.Client{},
|
||||
func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, authZRepo, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
},
|
||||
sessionTokenVerifier,
|
||||
config.OIDC.DefaultAccessTokenLifetime,
|
||||
config.OIDC.DefaultRefreshTokenExpiration,
|
||||
config.OIDC.DefaultRefreshTokenIdleExpiration,
|
||||
config.DefaultInstance.SecretGenerators,
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to start commands")
|
||||
|
||||
err = projection.Create(ctx, client, es, config.Projections, keys.OIDC, keys.SAML, config.SystemAPIUsers)
|
||||
logging.OnError(err).Fatal("unable to start projections")
|
||||
|
||||
i18n.MustLoadSupportedLanguagesFromDir()
|
||||
|
||||
notification.Register(
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["backchannel"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
config.Notifications,
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
config.ExternalPort,
|
||||
config.ExternalSecure,
|
||||
commands,
|
||||
queries,
|
||||
es,
|
||||
config.Login.DefaultOTPEmailURLV2,
|
||||
config.SystemDefaults.Notifications.FileSystemPath,
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.OIDC,
|
||||
config.OIDC.DefaultBackChannelLogoutLifetime,
|
||||
nil,
|
||||
)
|
||||
|
||||
config.Auth.Spooler.Client = client
|
||||
config.Auth.Spooler.Eventstore = es
|
||||
authView, err := auth_view.StartView(config.Auth.Spooler.Client, keys.OIDC, queries, config.Auth.Spooler.Eventstore)
|
||||
logging.OnError(err).Fatal("unable to start auth view")
|
||||
auth_handler.Register(ctx, config.Auth.Spooler, authView, queries)
|
||||
|
||||
config.Admin.Spooler.Client = client
|
||||
config.Admin.Spooler.Eventstore = es
|
||||
adminView, err := admin_view.StartView(config.Admin.Spooler.Client)
|
||||
logging.OnError(err).Fatal("unable to start admin view")
|
||||
|
||||
admin_handler.Register(ctx, config.Admin.Spooler, adminView, staticStorage)
|
||||
|
||||
instances := make(chan string, config.Projections.ConcurrentInstances)
|
||||
failedInstances := make(chan string)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(int(config.Projections.ConcurrentInstances))
|
||||
|
||||
go func() {
|
||||
for instance := range failedInstances {
|
||||
logging.WithFields("instance", instance).Error("projection failed")
|
||||
}
|
||||
}()
|
||||
|
||||
for range int(config.Projections.ConcurrentInstances) {
|
||||
go execProjections(ctx, instances, failedInstances, &wg)
|
||||
}
|
||||
|
||||
existingInstances := queryInstanceIDs(ctx, client)
|
||||
for i, instance := range existingInstances {
|
||||
instances <- instance
|
||||
logging.WithFields("id", instance, "index", fmt.Sprintf("%d/%d", i, len(existingInstances))).Info("instance queued for projection")
|
||||
}
|
||||
close(instances)
|
||||
wg.Wait()
|
||||
|
||||
close(failedInstances)
|
||||
|
||||
logging.WithFields("took", time.Since(start)).Info("projections executed")
|
||||
}
|
||||
|
||||
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
|
||||
for instance := range instances {
|
||||
logging.WithFields("instance", instance).Info("starting projections")
|
||||
ctx = internal_authz.WithInstanceID(ctx, instance)
|
||||
|
||||
err := projection.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = projection.ProjectInstanceFields(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = admin_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger admin handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = projection.ProjectInstanceFields(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = auth_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger auth handler failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = notification.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger notification failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
logging.WithFields("instance", instance).Info("projections done")
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// queryInstanceIDs returns the instance configured by flag
|
||||
// or all instances which are not removed
|
||||
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
|
||||
if len(instanceIDs) > 0 {
|
||||
return instanceIDs
|
||||
}
|
||||
|
||||
instances := []string{}
|
||||
err := source.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var instance string
|
||||
|
||||
if err := r.Scan(&instance); err != nil {
|
||||
return err
|
||||
}
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT DISTINCT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.added' AND instance_id NOT IN (SELECT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.removed')",
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to query instances")
|
||||
|
||||
return instances
|
||||
}
|
140
apps/api/cmd/mirror/system.go
Normal file
140
apps/api/cmd/mirror/system.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
func systemCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "system",
|
||||
Short: "mirrors the system tables of ZITADEL from one database to another",
|
||||
Long: `mirrors the system tables of ZITADEL from one database to another
|
||||
ZITADEL needs to be initialized
|
||||
Only keys and assets are mirrored`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
copySystem(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete ALL keys and assets of defined instances before copy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func copySystem(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAssets(ctx, sourceClient, destClient)
|
||||
copyEncryptionKeys(ctx, sourceClient, destClient)
|
||||
}
|
||||
|
||||
func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy assets")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT instance_id, asset_type, resource_owner, name, content_type, data, updated_at FROM system.assets "+instanceClause()+") TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var assetCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "DELETE FROM system.assets "+instanceClause())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin")
|
||||
assetCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy assets to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy assets from source")
|
||||
logging.WithFields("took", time.Since(start), "count", assetCount).Info("assets migrated")
|
||||
}
|
||||
|
||||
func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy encryption keys")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire source connection")
|
||||
defer sourceConn.Close()
|
||||
|
||||
r, w := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
// ignore hash column because it's computed
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY system.encryption_keys TO stdout")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
errs <- err
|
||||
}()
|
||||
|
||||
destConn, err := dest.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var keyCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
_, err := conn.Exec(ctx, "TRUNCATE system.encryption_keys")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin")
|
||||
keyCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy encryption keys to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy encryption keys from source")
|
||||
logging.WithFields("took", time.Since(start), "count", keyCount).Info("encryption keys migrated")
|
||||
}
|
123
apps/api/cmd/mirror/verify.go
Normal file
123
apps/api/cmd/mirror/verify.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
cryptoDatabase "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
)
|
||||
|
||||
func verifyCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "verify",
|
||||
Short: "counts if source and dest have the same amount of entries",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := mustNewMigrationConfig(viper.GetViper())
|
||||
verifyMigration(cmd.Context(), config)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var schemas = []string{
|
||||
"adminapi",
|
||||
"auth",
|
||||
"eventstore",
|
||||
"projections",
|
||||
"system",
|
||||
}
|
||||
|
||||
func verifyMigration(ctx context.Context, config *Migration) {
|
||||
sourceClient, err := database.Connect(config.Source, false)
|
||||
logging.OnError(err).Fatal("unable to connect to source database")
|
||||
defer sourceClient.Close()
|
||||
|
||||
destClient, err := database.Connect(config.Destination, false)
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
for _, schema := range schemas {
|
||||
for _, table := range append(getTables(ctx, destClient, schema), getViews(ctx, destClient, schema)...) {
|
||||
sourceCount := countEntries(ctx, sourceClient, table)
|
||||
destCount := countEntries(ctx, destClient, table)
|
||||
|
||||
entry := logging.WithFields("table", table, "dest", destCount, "source", sourceCount)
|
||||
if sourceCount == destCount {
|
||||
entry.Debug("equal count")
|
||||
continue
|
||||
}
|
||||
entry.WithField("diff", destCount-sourceCount).Info("unequal count")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTables(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', tablename) FROM pg_tables WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query tables")
|
||||
return tables
|
||||
}
|
||||
|
||||
func getViews(ctx context.Context, dest *database.DB, schema string) (tables []string) {
|
||||
err := dest.QueryContext(
|
||||
ctx,
|
||||
func(r *sql.Rows) error {
|
||||
for r.Next() {
|
||||
var table string
|
||||
if err := r.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return r.Err()
|
||||
},
|
||||
"SELECT CONCAT(schemaname, '.', viewname) FROM pg_views WHERE schemaname = $1",
|
||||
schema,
|
||||
)
|
||||
logging.WithFields("schema", schema).OnError(err).Fatal("unable to query views")
|
||||
return tables
|
||||
}
|
||||
|
||||
func countEntries(ctx context.Context, client *database.DB, table string) (count int) {
|
||||
instanceClause := instanceClause()
|
||||
noInstanceIDColumn := []string{
|
||||
projection.InstanceProjectionTable,
|
||||
projection.SystemFeatureTable,
|
||||
cryptoDatabase.EncryptionKeysTable,
|
||||
}
|
||||
if slices.Contains(noInstanceIDColumn, table) {
|
||||
instanceClause = ""
|
||||
}
|
||||
|
||||
err := client.QueryRowContext(
|
||||
ctx,
|
||||
func(r *sql.Row) error {
|
||||
return r.Scan(&count)
|
||||
},
|
||||
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause),
|
||||
)
|
||||
logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count")
|
||||
|
||||
return count
|
||||
}
|
39
apps/api/cmd/ready/config.go
Normal file
39
apps/api/cmd/ready/config.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/config/network"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Log *logging.Config
|
||||
Port uint16
|
||||
TLS network.TLS
|
||||
}
|
||||
|
||||
func MustNewConfig(v *viper.Viper) *Config {
|
||||
config := new(Config)
|
||||
err := v.Unmarshal(config,
|
||||
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
||||
hook.Base64ToBytesHookFunc(),
|
||||
mapstructure.StringToTimeDurationHookFunc(),
|
||||
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
||||
mapstructure.StringToSliceHookFunc(","),
|
||||
hook.EnumHookFunc(internal_authz.MemberTypeString),
|
||||
mapstructure.TextUnmarshallerHookFunc(),
|
||||
)),
|
||||
)
|
||||
logging.OnError(err).Fatal("unable to read default config")
|
||||
|
||||
err = config.Log.SetLogger()
|
||||
logging.OnError(err).Fatal("unable to set logger")
|
||||
|
||||
return config
|
||||
}
|
47
apps/api/cmd/ready/ready.go
Normal file
47
apps/api/cmd/ready/ready.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package ready
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
func New() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "ready",
|
||||
Short: "Checks if zitadel is ready",
|
||||
Long: "Checks if zitadel is ready",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config := MustNewConfig(viper.GetViper())
|
||||
if !ready(config) {
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ready(config *Config) bool {
|
||||
scheme := "https"
|
||||
if !config.TLS.Enabled {
|
||||
scheme = "http"
|
||||
}
|
||||
// Checking the TLS cert is not in the scope of the readiness check
|
||||
httpClient := http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
|
||||
res, err := httpClient.Get(scheme + "://" + net.JoinHostPort("localhost", strconv.Itoa(int(config.Port))) + "/debug/ready")
|
||||
if err != nil {
|
||||
logging.WithError(err).Warn("ready check failed")
|
||||
return false
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
logging.WithFields("status", res.StatusCode).Warn("ready check failed")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
32
apps/api/cmd/setup/01.go
Normal file
32
apps/api/cmd/setup/01.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 01_sql/adminapi.sql
|
||||
createAdminViews string
|
||||
//go:embed 01_sql/auth.sql
|
||||
createAuthViews string
|
||||
//go:embed 01_sql/projections.sql
|
||||
createProjections string
|
||||
)
|
||||
|
||||
type ProjectionTable struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func (mig *ProjectionTable) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
stmt := createAdminViews + createAuthViews + createProjections
|
||||
_, err := mig.dbClient.ExecContext(ctx, stmt)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *ProjectionTable) String() string {
|
||||
return "01_tables"
|
||||
}
|
57
apps/api/cmd/setup/01_sql/adminapi.sql
Normal file
57
apps/api/cmd/setup/01_sql/adminapi.sql
Normal file
@@ -0,0 +1,57 @@
|
||||
CREATE SCHEMA adminapi;
|
||||
|
||||
CREATE TABLE adminapi.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.current_sequences (
|
||||
view_name TEXT,
|
||||
current_sequence BIGINT,
|
||||
event_date TIMESTAMPTZ,
|
||||
last_successful_spooler_run TIMESTAMPTZ,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.failed_events (
|
||||
view_name TEXT,
|
||||
failed_sequence BIGINT,
|
||||
failure_count SMALLINT,
|
||||
err_msg TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, failed_sequence, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.styling (
|
||||
aggregate_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
label_policy_state INT2 NOT NULL DEFAULT 0::INT2,
|
||||
sequence INT8 NULL,
|
||||
primary_color TEXT NULL,
|
||||
background_color TEXT NULL,
|
||||
warn_color TEXT NULL,
|
||||
font_color TEXT NULL,
|
||||
primary_color_dark TEXT NULL,
|
||||
background_color_dark TEXT NULL,
|
||||
warn_color_dark TEXT NULL,
|
||||
font_color_dark TEXT NULL,
|
||||
logo_url TEXT NULL,
|
||||
icon_url TEXT NULL,
|
||||
logo_dark_url TEXT NULL,
|
||||
icon_dark_url TEXT NULL,
|
||||
font_url TEXT NULL,
|
||||
err_msg_popup BOOL NULL,
|
||||
disable_watermark BOOL NULL,
|
||||
hide_login_name_suffix BOOL NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (aggregate_id, label_policy_state, instance_id)
|
||||
);
|
229
apps/api/cmd/setup/01_sql/auth.sql
Normal file
229
apps/api/cmd/setup/01_sql/auth.sql
Normal file
@@ -0,0 +1,229 @@
|
||||
CREATE SCHEMA auth;
|
||||
|
||||
CREATE TABLE auth.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.current_sequences (
|
||||
view_name TEXT,
|
||||
current_sequence BIGINT,
|
||||
event_date TIMESTAMPTZ,
|
||||
last_successful_spooler_run TIMESTAMPTZ,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.failed_events (
|
||||
view_name TEXT,
|
||||
failed_sequence BIGINT,
|
||||
failure_count SMALLINT,
|
||||
err_msg TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (view_name, failed_sequence, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.users (
|
||||
id TEXT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
resource_owner TEXT NULL,
|
||||
user_state INT2 NULL,
|
||||
password_set BOOL NULL,
|
||||
password_change_required BOOL NULL,
|
||||
password_change TIMESTAMPTZ NULL,
|
||||
last_login TIMESTAMPTZ NULL,
|
||||
user_name TEXT NULL,
|
||||
login_names TEXT[] NULL,
|
||||
preferred_login_name TEXT NULL,
|
||||
first_name TEXT NULL,
|
||||
last_name TEXT NULL,
|
||||
nick_name TEXT NULL,
|
||||
display_name TEXT NULL,
|
||||
preferred_language TEXT NULL,
|
||||
gender INT2 NULL,
|
||||
email TEXT NULL,
|
||||
is_email_verified BOOL NULL,
|
||||
phone TEXT NULL,
|
||||
is_phone_verified BOOL NULL,
|
||||
country TEXT NULL,
|
||||
locality TEXT NULL,
|
||||
postal_code TEXT NULL,
|
||||
region TEXT NULL,
|
||||
street_address TEXT NULL,
|
||||
otp_state INT2 NULL,
|
||||
mfa_max_set_up INT2 NULL,
|
||||
mfa_init_skipped TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
init_required BOOL NULL,
|
||||
username_change_required BOOL NULL,
|
||||
machine_name TEXT NULL,
|
||||
machine_description TEXT NULL,
|
||||
user_type TEXT NULL,
|
||||
u2f_tokens BYTEA NULL,
|
||||
passwordless_tokens BYTEA NULL,
|
||||
avatar_key TEXT NULL,
|
||||
passwordless_init_required BOOL NULL,
|
||||
password_init_required BOOL NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.user_sessions (
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
resource_owner TEXT NULL,
|
||||
state INT2 NULL,
|
||||
user_agent_id TEXT NULL,
|
||||
user_id TEXT NULL,
|
||||
user_name TEXT NULL,
|
||||
password_verification TIMESTAMPTZ NULL,
|
||||
second_factor_verification TIMESTAMPTZ NULL,
|
||||
multi_factor_verification TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
second_factor_verification_type INT2 NULL,
|
||||
multi_factor_verification_type INT2 NULL,
|
||||
user_display_name TEXT NULL,
|
||||
login_name TEXT NULL,
|
||||
external_login_verification TIMESTAMPTZ NULL,
|
||||
selected_idp_config_id TEXT NULL,
|
||||
passwordless_verification TIMESTAMPTZ NULL,
|
||||
avatar_key TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_agent_id, user_id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.user_external_idps (
|
||||
external_user_id TEXT NOT NULL,
|
||||
idp_config_id TEXT NOT NULL,
|
||||
user_id TEXT NULL,
|
||||
idp_name TEXT NULL,
|
||||
user_display_name TEXT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
resource_owner TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (external_user_id, idp_config_id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.tokens (
|
||||
id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
resource_owner TEXT NULL,
|
||||
application_id TEXT NULL,
|
||||
user_agent_id TEXT NULL,
|
||||
user_id TEXT NULL,
|
||||
expiration TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
scopes TEXT[] NULL,
|
||||
audience TEXT[] NULL,
|
||||
preferred_language TEXT NULL,
|
||||
refresh_token_id TEXT NULL,
|
||||
is_pat BOOL NOT NULL DEFAULT false,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (id, instance_id)
|
||||
);
|
||||
|
||||
CREATE INDEX user_user_agent_idx ON auth.tokens (user_id, user_agent_id);
|
||||
|
||||
CREATE TABLE auth.refresh_tokens (
|
||||
id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
resource_owner TEXT NULL,
|
||||
token TEXT NULL,
|
||||
client_id TEXT NOT NULL,
|
||||
user_agent_id TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
auth_time TIMESTAMPTZ NULL,
|
||||
idle_expiration TIMESTAMPTZ NULL,
|
||||
expiration TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
scopes TEXT[] NULL,
|
||||
audience TEXT[] NULL,
|
||||
amr TEXT[] NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (id, instance_id)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX unique_client_user_index ON auth.refresh_tokens (client_id, user_agent_id, user_id);
|
||||
|
||||
CREATE TABLE auth.org_project_mapping (
|
||||
org_id TEXT NOT NULL,
|
||||
project_id TEXT NOT NULL,
|
||||
project_grant_id TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (org_id, project_id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.idp_providers (
|
||||
aggregate_id TEXT NOT NULL,
|
||||
idp_config_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
name TEXT NULL,
|
||||
idp_config_type INT2 NULL,
|
||||
idp_provider_type INT2 NULL,
|
||||
idp_state INT2 NULL,
|
||||
styling_type INT2 NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (aggregate_id, idp_config_id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.idp_configs (
|
||||
idp_config_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
aggregate_id TEXT NULL,
|
||||
name TEXT NULL,
|
||||
idp_state INT2 NULL,
|
||||
idp_provider_type INT2 NULL,
|
||||
is_oidc BOOL NULL,
|
||||
oidc_client_id TEXT NULL,
|
||||
oidc_client_secret JSONB NULL,
|
||||
oidc_issuer TEXT NULL,
|
||||
oidc_scopes TEXT[] NULL,
|
||||
oidc_idp_display_name_mapping INT2 NULL,
|
||||
oidc_idp_username_mapping INT2 NULL,
|
||||
styling_type INT2 NULL,
|
||||
oauth_authorization_endpoint TEXT NULL,
|
||||
oauth_token_endpoint TEXT NULL,
|
||||
auto_register BOOL NULL,
|
||||
jwt_endpoint TEXT NULL,
|
||||
jwt_keys_endpoint TEXT NULL,
|
||||
jwt_header_name TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (idp_config_id, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.auth_requests (
|
||||
id TEXT NOT NULL,
|
||||
request JSONB NULL,
|
||||
code TEXT NULL,
|
||||
request_type INT2 NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (id, instance_id)
|
||||
);
|
||||
|
||||
CREATE INDEX auth_code_idx ON auth.auth_requests (code);
|
28
apps/api/cmd/setup/01_sql/projections.sql
Normal file
28
apps/api/cmd/setup/01_sql/projections.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
CREATE TABLE projections.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
projection_name TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (projection_name, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE projections.current_sequences (
|
||||
projection_name TEXT,
|
||||
aggregate_type TEXT,
|
||||
current_sequence BIGINT,
|
||||
instance_id TEXT NOT NULL,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
PRIMARY KEY (projection_name, aggregate_type, instance_id)
|
||||
);
|
||||
|
||||
CREATE TABLE projections.failed_events (
|
||||
projection_name TEXT,
|
||||
failed_sequence BIGINT,
|
||||
failure_count SMALLINT,
|
||||
error TEXT,
|
||||
instance_id TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (projection_name, failed_sequence, instance_id)
|
||||
);
|
38
apps/api/cmd/setup/02.go
Normal file
38
apps/api/cmd/setup/02.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
const (
|
||||
createAssets = `
|
||||
CREATE TABLE system.assets (
|
||||
instance_id TEXT,
|
||||
asset_type TEXT,
|
||||
resource_owner TEXT,
|
||||
name TEXT,
|
||||
content_type TEXT,
|
||||
hash TEXT GENERATED ALWAYS AS (md5(data)) STORED,
|
||||
data BYTEA,
|
||||
updated_at TIMESTAMPTZ,
|
||||
|
||||
PRIMARY KEY (instance_id, resource_owner, name)
|
||||
);
|
||||
`
|
||||
)
|
||||
|
||||
type AssetTable struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func (mig *AssetTable) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, createAssets)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AssetTable) String() string {
|
||||
return "02_assets"
|
||||
}
|
206
apps/api/cmd/setup/03.go
Normal file
206
apps/api/cmd/setup/03.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
type FirstInstance struct {
|
||||
InstanceName string
|
||||
DefaultLanguage language.Tag
|
||||
Org command.InstanceOrgSetup
|
||||
MachineKeyPath string
|
||||
PatPath string
|
||||
LoginClientPatPath string
|
||||
Features *command.InstanceFeatures
|
||||
|
||||
Skip bool
|
||||
|
||||
instanceSetup command.InstanceSetup
|
||||
userEncryptionKey *crypto.KeyConfig
|
||||
smtpEncryptionKey *crypto.KeyConfig
|
||||
oidcEncryptionKey *crypto.KeyConfig
|
||||
masterKey string
|
||||
db *database.DB
|
||||
es *eventstore.Eventstore
|
||||
defaults systemdefaults.SystemDefaults
|
||||
zitadelRoles []authz.RoleMapping
|
||||
externalDomain string
|
||||
externalSecure bool
|
||||
externalPort uint16
|
||||
domain string
|
||||
}
|
||||
|
||||
func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
if mig.Skip {
|
||||
return nil
|
||||
}
|
||||
keyStorage, err := mig.verifyEncryptionKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userAlg, err := crypto.NewAESCrypto(mig.userEncryptionKey, keyStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
smtpEncryption, err := crypto.NewAESCrypto(mig.smtpEncryptionKey, keyStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oidcEncryption, err := crypto.NewAESCrypto(mig.oidcEncryptionKey, keyStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd, err := command.StartCommands(ctx,
|
||||
mig.es,
|
||||
connector.Connectors{},
|
||||
mig.defaults,
|
||||
mig.zitadelRoles,
|
||||
nil,
|
||||
nil,
|
||||
mig.externalDomain,
|
||||
mig.externalSecure,
|
||||
mig.externalPort,
|
||||
nil,
|
||||
nil,
|
||||
smtpEncryption,
|
||||
nil,
|
||||
userAlg,
|
||||
nil,
|
||||
oidcEncryption,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mig.instanceSetup.InstanceName = mig.InstanceName
|
||||
mig.instanceSetup.CustomDomain = mig.externalDomain
|
||||
mig.instanceSetup.DefaultLanguage = mig.DefaultLanguage
|
||||
mig.instanceSetup.Org = mig.Org
|
||||
// check if username is email style or else append @<orgname>.<custom-domain>
|
||||
//this way we have the same value as before changing `UserLoginMustBeDomain` to false
|
||||
if !mig.instanceSetup.DomainPolicy.UserLoginMustBeDomain && !strings.Contains(mig.instanceSetup.Org.Human.Username, "@") {
|
||||
orgDomain, err := domain.NewIAMDomainName(mig.instanceSetup.Org.Name, mig.instanceSetup.CustomDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mig.instanceSetup.Org.Human.Username = mig.instanceSetup.Org.Human.Username + "@" + orgDomain
|
||||
}
|
||||
mig.instanceSetup.Org.Human.Email.Address = mig.instanceSetup.Org.Human.Email.Address.Normalize()
|
||||
if mig.instanceSetup.Org.Human.Email.Address == "" {
|
||||
mig.instanceSetup.Org.Human.Email.Address = domain.EmailAddress(mig.instanceSetup.Org.Human.Username)
|
||||
if !strings.Contains(string(mig.instanceSetup.Org.Human.Email.Address), "@") {
|
||||
orgDomain, err := domain.NewIAMDomainName(mig.instanceSetup.Org.Name, mig.instanceSetup.CustomDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mig.instanceSetup.Org.Human.Email.Address = domain.EmailAddress(mig.instanceSetup.Org.Human.Username + "@" + orgDomain)
|
||||
}
|
||||
}
|
||||
|
||||
_, token, key, loginClientToken, _, err := cmd.SetUpInstance(ctx, &mig.instanceSetup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (mig.instanceSetup.Org.Machine != nil &&
|
||||
((mig.instanceSetup.Org.Machine.Pat != nil && token == "") ||
|
||||
(mig.instanceSetup.Org.Machine.MachineKey != nil && key == nil))) ||
|
||||
(mig.instanceSetup.Org.LoginClient != nil &&
|
||||
(mig.instanceSetup.Org.LoginClient.Pat != nil && loginClientToken == "")) {
|
||||
return err
|
||||
}
|
||||
return mig.outputMachineAuthentication(key, token, loginClientToken)
|
||||
}
|
||||
|
||||
func (mig *FirstInstance) verifyEncryptionKeys(ctx context.Context) (*crypto_db.Database, error) {
|
||||
keyStorage, err := crypto_db.NewKeyStorage(mig.db, mig.masterKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot start key storage: %w", err)
|
||||
}
|
||||
if err = verifyKey(ctx, mig.userEncryptionKey, keyStorage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = verifyKey(ctx, mig.smtpEncryptionKey, keyStorage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = verifyKey(ctx, mig.oidcEncryptionKey, keyStorage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyStorage, nil
|
||||
}
|
||||
|
||||
func (mig *FirstInstance) outputMachineAuthentication(key *command.MachineKey, token, loginClientToken string) error {
|
||||
if key != nil {
|
||||
keyDetails, err := key.Detail()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := outputStdoutOrPath(mig.MachineKeyPath, string(keyDetails)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if token != "" {
|
||||
if err := outputStdoutOrPath(mig.PatPath, token); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if loginClientToken != "" {
|
||||
if err := outputStdoutOrPath(mig.LoginClientPatPath, loginClientToken); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func outputStdoutOrPath(path string, content string) (err error) {
|
||||
f := os.Stdout
|
||||
if path != "" {
|
||||
f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
}
|
||||
_, err = fmt.Fprintln(f, content)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *FirstInstance) String() string {
|
||||
return "03_default_instance"
|
||||
}
|
||||
|
||||
func verifyKey(ctx context.Context, key *crypto.KeyConfig, storage crypto.KeyStorage) (err error) {
|
||||
_, err = crypto.LoadKey(key.EncryptionKeyID, storage)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
k, err := crypto.NewKey(key.EncryptionKeyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return storage.CreateKeys(ctx, k)
|
||||
}
|
27
apps/api/cmd/setup/05.go
Normal file
27
apps/api/cmd/setup/05.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 05.sql
|
||||
lastFailedStmts string
|
||||
)
|
||||
|
||||
type LastFailed struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func (mig *LastFailed) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, lastFailedStmts)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *LastFailed) String() string {
|
||||
return "05_last_failed"
|
||||
}
|
11
apps/api/cmd/setup/05.sql
Normal file
11
apps/api/cmd/setup/05.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
CREATE INDEX IF NOT EXISTS current_sequences_instance_id_idx ON adminapi.current_sequences (instance_id);
|
||||
CREATE INDEX IF NOT EXISTS current_sequences_instance_id_idx ON auth.current_sequences (instance_id);
|
||||
CREATE INDEX IF NOT EXISTS current_sequences_instance_id_idx ON projections.current_sequences (instance_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS failed_events_instance_id_idx ON adminapi.failed_events (instance_id);
|
||||
CREATE INDEX IF NOT EXISTS failed_events_instance_id_idx ON auth.failed_events (instance_id);
|
||||
CREATE INDEX IF NOT EXISTS failed_events_instance_id_idx ON projections.failed_events (instance_id);
|
||||
|
||||
ALTER TABLE adminapi.failed_events ADD COLUMN IF NOT EXISTS last_failed TIMESTAMPTZ;
|
||||
ALTER TABLE auth.failed_events ADD COLUMN IF NOT EXISTS last_failed TIMESTAMPTZ;
|
||||
ALTER TABLE projections.failed_events ADD COLUMN IF NOT EXISTS last_failed TIMESTAMPTZ;
|
30
apps/api/cmd/setup/06.go
Normal file
30
apps/api/cmd/setup/06.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 06/adminapi.sql
|
||||
createAdminViews06 string
|
||||
//go:embed 06/auth.sql
|
||||
createAuthViews06 string
|
||||
)
|
||||
|
||||
type OwnerRemoveColumns struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func (mig *OwnerRemoveColumns) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
stmt := createAdminViews06 + createAuthViews06
|
||||
_, err := mig.dbClient.ExecContext(ctx, stmt)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *OwnerRemoveColumns) String() string {
|
||||
return "06_resource_owner_columns"
|
||||
}
|
30
apps/api/cmd/setup/06/adminapi.sql
Normal file
30
apps/api/cmd/setup/06/adminapi.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
CREATE TABLE adminapi.styling2 (
|
||||
aggregate_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
label_policy_state INT2 NOT NULL DEFAULT 0::INT2,
|
||||
sequence INT8 NULL,
|
||||
primary_color TEXT NULL,
|
||||
background_color TEXT NULL,
|
||||
warn_color TEXT NULL,
|
||||
font_color TEXT NULL,
|
||||
primary_color_dark TEXT NULL,
|
||||
background_color_dark TEXT NULL,
|
||||
warn_color_dark TEXT NULL,
|
||||
font_color_dark TEXT NULL,
|
||||
logo_url TEXT NULL,
|
||||
icon_url TEXT NULL,
|
||||
logo_dark_url TEXT NULL,
|
||||
icon_dark_url TEXT NULL,
|
||||
font_url TEXT NULL,
|
||||
err_msg_popup BOOL NULL,
|
||||
disable_watermark BOOL NULL,
|
||||
hide_login_name_suffix BOOL NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, aggregate_id, label_policy_state)
|
||||
);
|
||||
|
||||
CREATE INDEX st2_owner_removed_idx ON adminapi.styling2 (owner_removed);
|
124
apps/api/cmd/setup/06/auth.sql
Normal file
124
apps/api/cmd/setup/06/auth.sql
Normal file
@@ -0,0 +1,124 @@
|
||||
CREATE TABLE auth.users2 (
|
||||
id TEXT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
resource_owner TEXT NULL,
|
||||
user_state INT2 NULL,
|
||||
password_set BOOL NULL,
|
||||
password_change_required BOOL NULL,
|
||||
password_change TIMESTAMPTZ NULL,
|
||||
last_login TIMESTAMPTZ NULL,
|
||||
user_name TEXT NULL,
|
||||
login_names TEXT[] NULL,
|
||||
preferred_login_name TEXT NULL,
|
||||
first_name TEXT NULL,
|
||||
last_name TEXT NULL,
|
||||
nick_name TEXT NULL,
|
||||
display_name TEXT NULL,
|
||||
preferred_language TEXT NULL,
|
||||
gender INT2 NULL,
|
||||
email TEXT NULL,
|
||||
is_email_verified BOOL NULL,
|
||||
phone TEXT NULL,
|
||||
is_phone_verified BOOL NULL,
|
||||
country TEXT NULL,
|
||||
locality TEXT NULL,
|
||||
postal_code TEXT NULL,
|
||||
region TEXT NULL,
|
||||
street_address TEXT NULL,
|
||||
otp_state INT2 NULL,
|
||||
mfa_max_set_up INT2 NULL,
|
||||
mfa_init_skipped TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
init_required BOOL NULL,
|
||||
username_change_required BOOL NULL,
|
||||
machine_name TEXT NULL,
|
||||
machine_description TEXT NULL,
|
||||
user_type TEXT NULL,
|
||||
u2f_tokens BYTEA NULL,
|
||||
passwordless_tokens BYTEA NULL,
|
||||
avatar_key TEXT NULL,
|
||||
passwordless_init_required BOOL NULL,
|
||||
password_init_required BOOL NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, id)
|
||||
);
|
||||
CREATE INDEX u2_owner_removed_idx ON auth.users2 (owner_removed);
|
||||
|
||||
CREATE TABLE auth.user_external_idps2 (
|
||||
external_user_id TEXT NOT NULL,
|
||||
idp_config_id TEXT NOT NULL,
|
||||
user_id TEXT NULL,
|
||||
idp_name TEXT NULL,
|
||||
user_display_name TEXT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
resource_owner TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, external_user_id, idp_config_id)
|
||||
);
|
||||
CREATE INDEX ext_idps2_owner_removed_idx ON auth.user_external_idps2 (owner_removed);
|
||||
|
||||
CREATE TABLE auth.org_project_mapping2 (
|
||||
org_id TEXT NOT NULL,
|
||||
project_id TEXT NOT NULL,
|
||||
project_grant_id TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, org_id, project_id)
|
||||
);
|
||||
CREATE INDEX org_proj_m2_owner_removed_idx ON auth.org_project_mapping2 (owner_removed);
|
||||
|
||||
CREATE TABLE auth.idp_providers2 (
|
||||
aggregate_id TEXT NOT NULL,
|
||||
idp_config_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
name TEXT NULL,
|
||||
idp_config_type INT2 NULL,
|
||||
idp_provider_type INT2 NULL,
|
||||
idp_state INT2 NULL,
|
||||
styling_type INT2 NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, aggregate_id, idp_config_id)
|
||||
);
|
||||
CREATE INDEX idp_prov2_owner_removed_idx ON auth.idp_providers2 (owner_removed);
|
||||
|
||||
CREATE TABLE auth.idp_configs2 (
|
||||
idp_config_id TEXT NOT NULL,
|
||||
creation_date TIMESTAMPTZ NULL,
|
||||
change_date TIMESTAMPTZ NULL,
|
||||
sequence INT8 NULL,
|
||||
aggregate_id TEXT NULL,
|
||||
name TEXT NULL,
|
||||
idp_state INT2 NULL,
|
||||
idp_provider_type INT2 NULL,
|
||||
is_oidc BOOL NULL,
|
||||
oidc_client_id TEXT NULL,
|
||||
oidc_client_secret JSONB NULL,
|
||||
oidc_issuer TEXT NULL,
|
||||
oidc_scopes TEXT[] NULL,
|
||||
oidc_idp_display_name_mapping INT2 NULL,
|
||||
oidc_idp_username_mapping INT2 NULL,
|
||||
styling_type INT2 NULL,
|
||||
oauth_authorization_endpoint TEXT NULL,
|
||||
oauth_token_endpoint TEXT NULL,
|
||||
auto_register BOOL NULL,
|
||||
jwt_endpoint TEXT NULL,
|
||||
jwt_keys_endpoint TEXT NULL,
|
||||
jwt_header_name TEXT NULL,
|
||||
instance_id TEXT NOT NULL,
|
||||
owner_removed BOOL DEFAULT false,
|
||||
|
||||
PRIMARY KEY (instance_id, idp_config_id)
|
||||
);
|
||||
CREATE INDEX idp_conf2_owner_removed_idx ON auth.idp_configs2 (owner_removed);
|
34
apps/api/cmd/setup/07.go
Normal file
34
apps/api/cmd/setup/07.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"strings"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 07/logstore.sql
|
||||
createLogstoreSchema07 string
|
||||
//go:embed 07/access.sql
|
||||
createAccessLogsTable07 string
|
||||
//go:embed 07/execution.sql
|
||||
createExecutionLogsTable07 string
|
||||
)
|
||||
|
||||
type LogstoreTables struct {
|
||||
dbClient *sql.DB
|
||||
username string
|
||||
}
|
||||
|
||||
func (mig *LogstoreTables) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
stmt := strings.ReplaceAll(createLogstoreSchema07, "%[1]s", mig.username) + createAccessLogsTable07 + createExecutionLogsTable07
|
||||
_, err := mig.dbClient.ExecContext(ctx, stmt)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *LogstoreTables) String() string {
|
||||
return "07_logstore"
|
||||
}
|
14
apps/api/cmd/setup/07/access.sql
Normal file
14
apps/api/cmd/setup/07/access.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE IF NOT EXISTS logstore.access (
|
||||
log_date TIMESTAMPTZ NOT NULL
|
||||
, protocol INT NOT NULL
|
||||
, request_url TEXT NOT NULL
|
||||
, response_status INT NOT NULL
|
||||
, request_headers JSONB
|
||||
, response_headers JSONB
|
||||
, instance_id TEXT NOT NULL
|
||||
, project_id TEXT NOT NULL
|
||||
, requested_domain TEXT
|
||||
, requested_host TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX protocol_date_desc ON logstore.access (instance_id, protocol, log_date DESC) INCLUDE (request_url, response_status, request_headers);
|
11
apps/api/cmd/setup/07/execution.sql
Normal file
11
apps/api/cmd/setup/07/execution.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE IF NOT EXISTS logstore.execution (
|
||||
log_date TIMESTAMPTZ NOT NULL
|
||||
, took INTERVAL
|
||||
, message TEXT NOT NULL
|
||||
, loglevel INT NOT NULL
|
||||
, instance_id TEXT NOT NULL
|
||||
, action_id TEXT NOT NULL
|
||||
, metadata JSONB
|
||||
);
|
||||
|
||||
CREATE INDEX log_date_desc ON logstore.execution (instance_id, log_date DESC) INCLUDE (took);
|
3
apps/api/cmd/setup/07/logstore.sql
Normal file
3
apps/api/cmd/setup/07/logstore.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE SCHEMA IF NOT EXISTS logstore;
|
||||
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA logstore TO "%[1]s";
|
27
apps/api/cmd/setup/08.go
Normal file
27
apps/api/cmd/setup/08.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 08/08.sql
|
||||
tokenIndexes08 string
|
||||
)
|
||||
|
||||
type AuthTokenIndexes struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AuthTokenIndexes) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, tokenIndexes08)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AuthTokenIndexes) String() string {
|
||||
return "08_auth_token_indexes"
|
||||
}
|
5
apps/api/cmd/setup/08/08.sql
Normal file
5
apps/api/cmd/setup/08/08.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
CREATE INDEX IF NOT EXISTS inst_refresh_tkn_idx ON auth.tokens(instance_id, refresh_token_id);
|
||||
CREATE INDEX IF NOT EXISTS inst_app_tkn_idx ON auth.tokens(instance_id, application_id);
|
||||
CREATE INDEX IF NOT EXISTS inst_ro_tkn_idx ON auth.tokens(instance_id, resource_owner);
|
||||
DROP INDEX IF EXISTS auth.user_user_agent_idx;
|
||||
CREATE INDEX IF NOT EXISTS inst_usr_agnt_tkn_idx ON auth.tokens(instance_id, user_id, user_agent_id);
|
79
apps/api/cmd/setup/10.go
Normal file
79
apps/api/cmd/setup/10.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/cockroach-go/v2/crdb"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 10/10_create_temp_table.sql
|
||||
correctCreationDate10CreateTable string
|
||||
//go:embed 10/10_fill_table.sql
|
||||
correctCreationDate10FillTable string
|
||||
//go:embed 10/10_update.sql
|
||||
correctCreationDate10Update string
|
||||
//go:embed 10/10_count_wrong_events.sql
|
||||
correctCreationDate10CountWrongEvents string
|
||||
//go:embed 10/10_empty_table.sql
|
||||
correctCreationDate10Truncate string
|
||||
)
|
||||
|
||||
type CorrectCreationDate struct {
|
||||
dbClient *database.DB
|
||||
FailAfter time.Duration
|
||||
}
|
||||
|
||||
func (mig *CorrectCreationDate) Execute(ctx context.Context, _ eventstore.Event) (err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, mig.FailAfter)
|
||||
defer cancel()
|
||||
|
||||
for i := 0; ; i++ {
|
||||
logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration")
|
||||
var affected int64
|
||||
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, correctCreationDate10CreateTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table created")
|
||||
|
||||
_, err = tx.ExecContext(ctx, correctCreationDate10Truncate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, correctCreationDate10FillTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table filled")
|
||||
|
||||
res := tx.QueryRowContext(ctx, correctCreationDate10CountWrongEvents)
|
||||
if err := res.Scan(&affected); err != nil || affected == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, correctCreationDate10Update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logging.WithFields("mig", mig.String(), "iteration", i, "count", affected).Debug("creation dates updated")
|
||||
return nil
|
||||
})
|
||||
logging.WithFields("mig", mig.String(), "iteration", i).Debug("end iteration")
|
||||
if affected == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mig *CorrectCreationDate) String() string {
|
||||
return "10_correct_creation_date"
|
||||
}
|
1
apps/api/cmd/setup/10/10_count_wrong_events.sql
Normal file
1
apps/api/cmd/setup/10/10_count_wrong_events.sql
Normal file
@@ -0,0 +1 @@
|
||||
SELECT COUNT(*) FROM wrong_events
|
6
apps/api/cmd/setup/10/10_create_temp_table.sql
Normal file
6
apps/api/cmd/setup/10/10_create_temp_table.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS wrong_events (
|
||||
instance_id TEXT
|
||||
, event_sequence BIGINT
|
||||
, current_cd TIMESTAMPTZ
|
||||
, next_cd TIMESTAMPTZ
|
||||
);
|
1
apps/api/cmd/setup/10/10_empty_table.sql
Normal file
1
apps/api/cmd/setup/10/10_empty_table.sql
Normal file
@@ -0,0 +1 @@
|
||||
TRUNCATE wrong_events
|
19
apps/api/cmd/setup/10/10_fill_table.sql
Normal file
19
apps/api/cmd/setup/10/10_fill_table.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
INSERT INTO wrong_events (
|
||||
SELECT * FROM (
|
||||
SELECT
|
||||
instance_id
|
||||
, event_sequence
|
||||
, creation_date AS current_cd
|
||||
, lead(creation_date) OVER (
|
||||
PARTITION BY instance_id
|
||||
ORDER BY event_sequence DESC
|
||||
) AS next_cd
|
||||
FROM
|
||||
eventstore.events
|
||||
WHERE
|
||||
"position" IS NULL
|
||||
) sub WHERE
|
||||
current_cd < next_cd
|
||||
ORDER BY
|
||||
event_sequence DESC
|
||||
);
|
10
apps/api/cmd/setup/10/10_update.sql
Normal file
10
apps/api/cmd/setup/10/10_update.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
UPDATE
|
||||
eventstore.events e
|
||||
SET
|
||||
creation_date = we.next_cd
|
||||
, "position" = (EXTRACT(EPOCH FROM we.next_cd))
|
||||
FROM
|
||||
wrong_events we
|
||||
WHERE
|
||||
e.event_sequence = we.event_sequence
|
||||
AND e.instance_id = we.instance_id;
|
27
apps/api/cmd/setup/12.go
Normal file
27
apps/api/cmd/setup/12.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 12/12_add_otp_columns.sql
|
||||
addOTPColumns string
|
||||
)
|
||||
|
||||
type AddOTPColumns struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddOTPColumns) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addOTPColumns)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddOTPColumns) String() string {
|
||||
return "12_auth_users_otp_columns"
|
||||
}
|
2
apps/api/cmd/setup/12/12_add_otp_columns.sql
Normal file
2
apps/api/cmd/setup/12/12_add_otp_columns.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE auth.users2 ADD COLUMN otp_sms_added BOOL DEFAULT false;
|
||||
ALTER TABLE auth.users2 ADD COLUMN otp_email_added BOOL DEFAULT false;
|
27
apps/api/cmd/setup/13.go
Normal file
27
apps/api/cmd/setup/13.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 13/13_fix_quota_constraints.sql
|
||||
fixQuotaConstraints string
|
||||
)
|
||||
|
||||
type FixQuotaConstraints struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *FixQuotaConstraints) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, fixQuotaConstraints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *FixQuotaConstraints) String() string {
|
||||
return "13_fix_quota_constraints"
|
||||
}
|
4
apps/api/cmd/setup/13/13_fix_quota_constraints.sql
Normal file
4
apps/api/cmd/setup/13/13_fix_quota_constraints.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
ALTER TABLE IF EXISTS projections.quotas ALTER COLUMN from_anchor DROP NOT NULL;
|
||||
ALTER TABLE IF EXISTS projections.quotas ALTER COLUMN amount DROP NOT NULL;
|
||||
ALTER TABLE IF EXISTS projections.quotas ALTER COLUMN interval DROP NOT NULL;
|
||||
ALTER TABLE IF EXISTS projections.quotas ALTER COLUMN limit_usage DROP NOT NULL;
|
67
apps/api/cmd/setup/14.go
Normal file
67
apps/api/cmd/setup/14.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 14/*.sql
|
||||
newEventsTable embed.FS
|
||||
)
|
||||
|
||||
type NewEventsTable struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *NewEventsTable) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
// if events already exists events2 is created during a setup job
|
||||
var count int
|
||||
err := mig.dbClient.QueryRowContext(ctx,
|
||||
func(row *sql.Row) error {
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return err
|
||||
}
|
||||
return row.Err()
|
||||
},
|
||||
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events2'",
|
||||
)
|
||||
if err != nil || count == 1 {
|
||||
return err
|
||||
}
|
||||
|
||||
statements, err := readStatements(newEventsTable, "14")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, stmt := range statements {
|
||||
stmt.query = strings.ReplaceAll(stmt.query, "{{.username}}", mig.dbClient.Username())
|
||||
logging.WithFields("file", stmt.file, "migration", mig.String()).Info("execute statement")
|
||||
_, err = mig.dbClient.ExecContext(ctx, stmt.query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mig *NewEventsTable) String() string {
|
||||
return "14_events_push"
|
||||
}
|
||||
|
||||
func (mig *NewEventsTable) ContinueOnErr(err error) bool {
|
||||
pgErr := new(pgconn.PgError)
|
||||
if errors.As(err, &pgErr) {
|
||||
return pgErr.Code == "42P01"
|
||||
}
|
||||
return false
|
||||
}
|
1
apps/api/cmd/setup/14/01_disable_inserts.sql
Normal file
1
apps/api/cmd/setup/14/01_disable_inserts.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE eventstore.events RENAME TO events_old;
|
31
apps/api/cmd/setup/14/02_create_and_fill_events2.sql
Normal file
31
apps/api/cmd/setup/14/02_create_and_fill_events2.sql
Normal file
@@ -0,0 +1,31 @@
|
||||
CREATE TABLE eventstore.events2 (
|
||||
instance_id,
|
||||
aggregate_type,
|
||||
aggregate_id,
|
||||
|
||||
event_type,
|
||||
"sequence",
|
||||
revision,
|
||||
created_at,
|
||||
payload,
|
||||
creator,
|
||||
"owner",
|
||||
|
||||
"position",
|
||||
in_tx_order
|
||||
) AS SELECT
|
||||
instance_id,
|
||||
aggregate_type,
|
||||
aggregate_id,
|
||||
|
||||
event_type,
|
||||
event_sequence,
|
||||
substr(aggregate_version, 2)::SMALLINT,
|
||||
creation_date,
|
||||
event_data,
|
||||
editor_user,
|
||||
resource_owner,
|
||||
|
||||
EXTRACT(EPOCH FROM creation_date),
|
||||
event_sequence
|
||||
FROM eventstore.events_old;
|
4
apps/api/cmd/setup/14/03_events2_pk.sql
Normal file
4
apps/api/cmd/setup/14/03_events2_pk.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
BEGIN;
|
||||
ALTER TABLE eventstore.events2 DROP CONSTRAINT IF EXISTS events2_pkey;
|
||||
ALTER TABLE eventstore.events2 ADD PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence");
|
||||
COMMIT;
|
7
apps/api/cmd/setup/14/04_constraints.sql
Normal file
7
apps/api/cmd/setup/14/04_constraints.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL,
|
||||
ALTER COLUMN revision SET NOT NULL,
|
||||
ALTER COLUMN created_at SET NOT NULL,
|
||||
ALTER COLUMN creator SET NOT NULL,
|
||||
ALTER COLUMN "owner" SET NOT NULL,
|
||||
ALTER COLUMN "position" SET NOT NULL,
|
||||
ALTER COLUMN in_tx_order SET NOT NULL;
|
3
apps/api/cmd/setup/14/05_indexes.sql
Normal file
3
apps/api/cmd/setup/14/05_indexes.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC, instance_id);
|
||||
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
|
||||
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");
|
39
apps/api/cmd/setup/15.go
Normal file
39
apps/api/cmd/setup/15.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 15/*.sql
|
||||
currentProjectionState embed.FS
|
||||
)
|
||||
|
||||
type CurrentProjectionState struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *CurrentProjectionState) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
statements, err := readStatements(currentProjectionState, "15")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, stmt := range statements {
|
||||
logging.WithFields("file", stmt.file, "migration", mig.String()).Info("execute statement")
|
||||
_, err = mig.dbClient.ExecContext(ctx, stmt.query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mig *CurrentProjectionState) String() string {
|
||||
return "15_current_projection_state"
|
||||
}
|
16
apps/api/cmd/setup/15/01_new_failed_events.sql
Normal file
16
apps/api/cmd/setup/15/01_new_failed_events.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
|
||||
projection_name TEXT NOT NULL
|
||||
, instance_id TEXT NOT NULL
|
||||
|
||||
, aggregate_type TEXT NOT NULL
|
||||
, aggregate_id TEXT NOT NULL
|
||||
, event_creation_date TIMESTAMPTZ NOT NULL
|
||||
, failed_sequence INT8 NOT NULL
|
||||
|
||||
, failure_count INT2 NULL DEFAULT 0
|
||||
, error TEXT
|
||||
, last_failed TIMESTAMPTZ
|
||||
|
||||
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);
|
26
apps/api/cmd/setup/15/02_fe_from_projections.sql
Normal file
26
apps/api/cmd/setup/15/02_fe_from_projections.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
INSERT INTO projections.failed_events2 (
|
||||
projection_name
|
||||
, instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
, event_creation_date
|
||||
, failed_sequence
|
||||
, failure_count
|
||||
, error
|
||||
, last_failed
|
||||
) SELECT
|
||||
fe.projection_name
|
||||
, fe.instance_id
|
||||
, e.aggregate_type
|
||||
, e.aggregate_id
|
||||
, e.created_at
|
||||
, e.sequence
|
||||
, fe.failure_count
|
||||
, fe.error
|
||||
, fe.last_failed
|
||||
FROM
|
||||
projections.failed_events fe
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = fe.instance_id
|
||||
AND e.sequence = fe.failed_sequence
|
||||
ON CONFLICT DO NOTHING;
|
26
apps/api/cmd/setup/15/03_fe_from_adminapi.sql
Normal file
26
apps/api/cmd/setup/15/03_fe_from_adminapi.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
INSERT INTO projections.failed_events2 (
|
||||
projection_name
|
||||
, instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
, event_creation_date
|
||||
, failed_sequence
|
||||
, failure_count
|
||||
, error
|
||||
, last_failed
|
||||
) SELECT
|
||||
fe.view_name
|
||||
, fe.instance_id
|
||||
, e.aggregate_type
|
||||
, e.aggregate_id
|
||||
, e.created_at
|
||||
, e.sequence
|
||||
, fe.failure_count
|
||||
, fe.err_msg
|
||||
, fe.last_failed
|
||||
FROM
|
||||
adminapi.failed_events fe
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = fe.instance_id
|
||||
AND e.sequence = fe.failed_sequence
|
||||
ON CONFLICT DO NOTHING;
|
26
apps/api/cmd/setup/15/04_fe_from_auth.sql
Normal file
26
apps/api/cmd/setup/15/04_fe_from_auth.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
INSERT INTO projections.failed_events2 (
|
||||
projection_name
|
||||
, instance_id
|
||||
, aggregate_type
|
||||
, aggregate_id
|
||||
, event_creation_date
|
||||
, failed_sequence
|
||||
, failure_count
|
||||
, error
|
||||
, last_failed
|
||||
) SELECT
|
||||
fe.view_name
|
||||
, fe.instance_id
|
||||
, e.aggregate_type
|
||||
, e.aggregate_id
|
||||
, e.created_at
|
||||
, e.sequence
|
||||
, fe.failure_count
|
||||
, fe.err_msg
|
||||
, fe.last_failed
|
||||
FROM
|
||||
auth.failed_events fe
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = fe.instance_id
|
||||
AND e.sequence = fe.failed_sequence
|
||||
ON CONFLICT DO NOTHING;
|
15
apps/api/cmd/setup/15/05_current_states.sql
Normal file
15
apps/api/cmd/setup/15/05_current_states.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
CREATE TABLE IF NOT EXISTS projections.current_states (
|
||||
projection_name TEXT NOT NULL
|
||||
, instance_id TEXT NOT NULL
|
||||
|
||||
, last_updated TIMESTAMPTZ
|
||||
|
||||
, aggregate_id TEXT
|
||||
, aggregate_type TEXT
|
||||
, "sequence" INT8
|
||||
, event_date TIMESTAMPTZ
|
||||
, "position" DECIMAL
|
||||
|
||||
, PRIMARY KEY (projection_name, instance_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);
|
28
apps/api/cmd/setup/15/06_cs_from_projections.sql
Normal file
28
apps/api/cmd/setup/15/06_cs_from_projections.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
INSERT INTO projections.current_states (
|
||||
projection_name
|
||||
, instance_id
|
||||
, event_date
|
||||
, "position"
|
||||
, last_updated
|
||||
) SELECT
|
||||
cs.projection_name
|
||||
, cs.instance_id
|
||||
, e.created_at
|
||||
, e.position
|
||||
, cs.timestamp
|
||||
FROM
|
||||
projections.current_sequences cs
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = cs.instance_id
|
||||
AND e.aggregate_type = cs.aggregate_type
|
||||
AND e.sequence = cs.current_sequence
|
||||
AND cs.current_sequence = (
|
||||
SELECT
|
||||
MAX(cs2.current_sequence)
|
||||
FROM
|
||||
projections.current_sequences cs2
|
||||
WHERE
|
||||
cs.projection_name = cs2.projection_name
|
||||
AND cs.instance_id = cs2.instance_id
|
||||
)
|
||||
ON CONFLICT DO NOTHING;
|
27
apps/api/cmd/setup/15/07_cs_from_adminapi.sql
Normal file
27
apps/api/cmd/setup/15/07_cs_from_adminapi.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
INSERT INTO projections.current_states (
|
||||
projection_name
|
||||
, instance_id
|
||||
, event_date
|
||||
, "position"
|
||||
, last_updated
|
||||
) SELECT
|
||||
cs.view_name
|
||||
, cs.instance_id
|
||||
, e.created_at
|
||||
, e.position
|
||||
, cs.last_successful_spooler_run
|
||||
FROM
|
||||
adminapi.current_sequences cs
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = cs.instance_id
|
||||
AND e.sequence = cs.current_sequence
|
||||
AND cs.current_sequence = (
|
||||
SELECT
|
||||
MAX(cs2.current_sequence)
|
||||
FROM
|
||||
adminapi.current_sequences cs2
|
||||
WHERE
|
||||
cs.view_name = cs2.view_name
|
||||
AND cs.instance_id = cs2.instance_id
|
||||
)
|
||||
ON CONFLICT DO NOTHING;
|
27
apps/api/cmd/setup/15/08_cs_from_auth.sql
Normal file
27
apps/api/cmd/setup/15/08_cs_from_auth.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
INSERT INTO projections.current_states (
|
||||
projection_name
|
||||
, instance_id
|
||||
, event_date
|
||||
, "position"
|
||||
, last_updated
|
||||
) SELECT
|
||||
cs.view_name
|
||||
, cs.instance_id
|
||||
, e.created_at
|
||||
, e.position
|
||||
, cs.last_successful_spooler_run
|
||||
FROM
|
||||
auth.current_sequences cs
|
||||
JOIN eventstore.events2 e ON
|
||||
e.instance_id = cs.instance_id
|
||||
AND e.sequence = cs.current_sequence
|
||||
AND cs.current_sequence = (
|
||||
SELECT
|
||||
MAX(cs2.current_sequence)
|
||||
FROM
|
||||
auth.current_sequences cs2
|
||||
WHERE
|
||||
cs.view_name = cs2.view_name
|
||||
AND cs.instance_id = cs2.instance_id
|
||||
)
|
||||
ON CONFLICT DO NOTHING;
|
34
apps/api/cmd/setup/16.go
Normal file
34
apps/api/cmd/setup/16.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 16.sql
|
||||
uniqueConstraintLower string
|
||||
)
|
||||
|
||||
type UniqueConstraintToLower struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *UniqueConstraintToLower) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
res, err := mig.dbClient.ExecContext(ctx, uniqueConstraintLower)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count, err := res.RowsAffected()
|
||||
logging.WithFields("count", count).Info("unique constraints updated")
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *UniqueConstraintToLower) String() string {
|
||||
return "16_unique_constraint_lower"
|
||||
}
|
13
apps/api/cmd/setup/16.sql
Normal file
13
apps/api/cmd/setup/16.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
WITH casesensitive as (
|
||||
SELECT instance_id, unique_type, lower(unique_field)
|
||||
FROM eventstore.unique_constraints
|
||||
GROUP BY instance_id, unique_type, lower(unique_field)
|
||||
HAVING count(unique_field) < 2
|
||||
)
|
||||
UPDATE eventstore.unique_constraints c
|
||||
SET unique_field = casesensitive.lower
|
||||
FROM casesensitive
|
||||
WHERE c.instance_id = casesensitive.instance_id
|
||||
AND c.unique_type = casesensitive.unique_type
|
||||
AND lower(c.unique_field) = casesensitive.lower
|
||||
AND c.unique_field <> casesensitive.lower;
|
27
apps/api/cmd/setup/17.go
Normal file
27
apps/api/cmd/setup/17.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 17.sql
|
||||
addOffsetField string
|
||||
)
|
||||
|
||||
type AddOffsetToCurrentStates struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddOffsetToCurrentStates) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addOffsetField)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddOffsetToCurrentStates) String() string {
|
||||
return "17_add_offset_col_to_current_states"
|
||||
}
|
1
apps/api/cmd/setup/17.sql
Normal file
1
apps/api/cmd/setup/17.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE projections.current_states ADD filter_offset INTEGER;
|
27
apps/api/cmd/setup/18.go
Normal file
27
apps/api/cmd/setup/18.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 18.sql
|
||||
addLowerFieldsToLoginNames string
|
||||
)
|
||||
|
||||
type AddLowerFieldsToLoginNames struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddLowerFieldsToLoginNames) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addLowerFieldsToLoginNames)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddLowerFieldsToLoginNames) String() string {
|
||||
return "18_add_lower_fields_to_login_names"
|
||||
}
|
6
apps/api/cmd/setup/18.sql
Normal file
6
apps/api/cmd/setup/18.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
ALTER TABLE IF EXISTS projections.login_names3_users ADD COLUMN IF NOT EXISTS user_name_lower TEXT GENERATED ALWAYS AS (lower(user_name)) STORED;
|
||||
CREATE INDEX IF NOT EXISTS login_names3_users_search ON projections.login_names3_users (instance_id, user_name_lower) INCLUDE (resource_owner);
|
||||
|
||||
ALTER TABLE IF EXISTS projections.login_names3_domains ADD COLUMN IF NOT EXISTS name_lower TEXT GENERATED ALWAYS AS (lower(name)) STORED;
|
||||
CREATE INDEX IF NOT EXISTS login_names3_domain_search ON projections.login_names3_domains (instance_id, resource_owner, name_lower);
|
||||
CREATE INDEX IF NOT EXISTS login_names3_domain_search_result ON projections.login_names3_domains (instance_id, resource_owner) INCLUDE (is_primary);
|
27
apps/api/cmd/setup/19.go
Normal file
27
apps/api/cmd/setup/19.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 19.sql
|
||||
addCurrentSequencesIndex string
|
||||
)
|
||||
|
||||
type AddCurrentSequencesIndex struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddCurrentSequencesIndex) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addCurrentSequencesIndex)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddCurrentSequencesIndex) String() string {
|
||||
return "19_add_current_sequences_index"
|
||||
}
|
1
apps/api/cmd/setup/19.sql
Normal file
1
apps/api/cmd/setup/19.sql
Normal file
@@ -0,0 +1 @@
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS events2_current_sequence ON eventstore.events2 ("sequence" DESC, aggregate_id, aggregate_type, instance_id);
|
27
apps/api/cmd/setup/20.go
Normal file
27
apps/api/cmd/setup/20.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 20.sql
|
||||
addByUserIndexToSession string
|
||||
)
|
||||
|
||||
type AddByUserIndexToSession struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddByUserIndexToSession) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addByUserIndexToSession)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddByUserIndexToSession) String() string {
|
||||
return "20_add_by_user_index_on_session"
|
||||
}
|
1
apps/api/cmd/setup/20.sql
Normal file
1
apps/api/cmd/setup/20.sql
Normal file
@@ -0,0 +1 @@
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS user_sessions_by_user ON auth.user_sessions (instance_id, user_id);
|
27
apps/api/cmd/setup/21.go
Normal file
27
apps/api/cmd/setup/21.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 21.sql
|
||||
addBlockFieldToLimits string
|
||||
)
|
||||
|
||||
type AddBlockFieldToLimits struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *AddBlockFieldToLimits) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addBlockFieldToLimits)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *AddBlockFieldToLimits) String() string {
|
||||
return "21_add_block_field_to_limits"
|
||||
}
|
1
apps/api/cmd/setup/21.sql
Normal file
1
apps/api/cmd/setup/21.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE IF EXISTS projections.limits ADD COLUMN IF NOT EXISTS block BOOLEAN;
|
27
apps/api/cmd/setup/22.go
Normal file
27
apps/api/cmd/setup/22.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 22.sql
|
||||
activeInstanceEvents string
|
||||
)
|
||||
|
||||
type ActiveInstanceEvents struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *ActiveInstanceEvents) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, activeInstanceEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *ActiveInstanceEvents) String() string {
|
||||
return "22_active_instance_events_index"
|
||||
}
|
1
apps/api/cmd/setup/22.sql
Normal file
1
apps/api/cmd/setup/22.sql
Normal file
@@ -0,0 +1 @@
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS active_instances_events ON eventstore.events2 (aggregate_type, event_type) WHERE aggregate_type = 'instance' AND event_type IN ('instance.added', 'instance.removed');
|
27
apps/api/cmd/setup/23.go
Normal file
27
apps/api/cmd/setup/23.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 23.sql
|
||||
correctGlobalUniqueConstraints string
|
||||
)
|
||||
|
||||
type CorrectGlobalUniqueConstraints struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *CorrectGlobalUniqueConstraints) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, correctGlobalUniqueConstraints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *CorrectGlobalUniqueConstraints) String() string {
|
||||
return "23_correct_global_unique_constraints"
|
||||
}
|
1
apps/api/cmd/setup/23.sql
Normal file
1
apps/api/cmd/setup/23.sql
Normal file
@@ -0,0 +1 @@
|
||||
UPDATE eventstore.unique_constraints SET instance_id = '' WHERE unique_type = 'instance_domain';
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user