mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-12 02:57:32 +00:00
Merge branch 'main' into clean-transactional-propsal
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
@@ -41,12 +42,16 @@ func copyAuth(ctx context.Context, config *Migration) {
|
||||
logging.OnError(err).Fatal("unable to connect to destination database")
|
||||
defer destClient.Close()
|
||||
|
||||
copyAuthRequests(ctx, sourceClient, destClient)
|
||||
copyAuthRequests(ctx, sourceClient, destClient, config.MaxAuthRequestAge)
|
||||
}
|
||||
|
||||
func copyAuthRequests(ctx context.Context, source, dest *database.DB) {
|
||||
func copyAuthRequests(ctx context.Context, source, dest *database.DB, maxAuthRequestAge time.Duration) {
|
||||
start := time.Now()
|
||||
|
||||
logging.Info("creating index on auth.auth_requests.change_date to speed up copy in source database")
|
||||
_, err := source.ExecContext(ctx, "CREATE INDEX CONCURRENTLY IF NOT EXISTS auth_requests_change_date ON auth.auth_requests (change_date)")
|
||||
logging.OnError(err).Fatal("unable to create index on auth.auth_requests.change_date")
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
logging.OnError(err).Fatal("unable to acquire connection")
|
||||
defer sourceConn.Close()
|
||||
@@ -55,9 +60,9 @@ func copyAuthRequests(ctx context.Context, source, dest *database.DB) {
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
err = sourceConn.Raw(func(driverConn interface{}) error {
|
||||
err = sourceConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+") TO STDOUT")
|
||||
_, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+" AND change_date > NOW() - INTERVAL '"+strconv.FormatFloat(maxAuthRequestAge.Seconds(), 'f', -1, 64)+" seconds') TO STDOUT")
|
||||
w.Close()
|
||||
return err
|
||||
})
|
||||
@@ -69,7 +74,7 @@ func copyAuthRequests(ctx context.Context, source, dest *database.DB) {
|
||||
defer destConn.Close()
|
||||
|
||||
var affected int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
err = destConn.Raw(func(driverConn any) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
if shouldReplace {
|
||||
|
@@ -23,7 +23,8 @@ type Migration struct {
|
||||
Source database.Config
|
||||
Destination database.Config
|
||||
|
||||
EventBulkSize uint32
|
||||
EventBulkSize uint32
|
||||
MaxAuthRequestAge time.Duration
|
||||
|
||||
Log *logging.Config
|
||||
Machine *id.Config
|
||||
|
@@ -1,61 +1,64 @@
|
||||
Source:
|
||||
cockroach:
|
||||
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
|
||||
Host: localhost # ZITADEL_SOURCE_COCKROACH_HOST
|
||||
Port: 26257 # ZITADEL_SOURCE_COCKROACH_PORT
|
||||
Database: zitadel # ZITADEL_SOURCE_COCKROACH_DATABASE
|
||||
MaxOpenConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXOPENCONNS
|
||||
MaxIdleConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_SOURCE_COCKROACH_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_SOURCE_COCKROACH_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_SOURCE_COCKROACH_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD
|
||||
Username: zitadel # ZITADEL_SOURCE_COCKROACH_USER_USERNAME
|
||||
Password: "" # ZITADEL_SOURCE_COCKROACH_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY
|
||||
Mode: disable # ZITADEL_SOURCE_COCKROACH_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_KEY
|
||||
# Postgres is used as soon as a value is set
|
||||
# The values describe the possible fields to set values
|
||||
postgres:
|
||||
Host: # ZITADEL_DATABASE_POSTGRES_HOST
|
||||
Port: # ZITADEL_DATABASE_POSTGRES_PORT
|
||||
Database: # ZITADEL_DATABASE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS
|
||||
Host: # ZITADEL_SOURCE_POSTGRES_HOST
|
||||
Port: # ZITADEL_SOURCE_POSTGRES_PORT
|
||||
Database: # ZITADEL_SOURCE_POSTGRES_DATABASE
|
||||
MaxOpenConns: # ZITADEL_SOURCE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: # ZITADEL_SOURCE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: # ZITADEL_SOURCE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: # ZITADEL_SOURCE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: # ZITADEL_SOURCE_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
|
||||
Username: # ZITADEL_SOURCE_POSTGRES_USER_USERNAME
|
||||
Password: # ZITADEL_SOURCE_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
|
||||
Mode: # ZITADEL_SOURCE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_CERT
|
||||
Key: # ZITADEL_SOURCE_POSTGRES_USER_SSL_KEY
|
||||
|
||||
Destination:
|
||||
postgres:
|
||||
Host: localhost # ZITADEL_DATABASE_POSTGRES_HOST
|
||||
Port: 5432 # ZITADEL_DATABASE_POSTGRES_PORT
|
||||
Database: zitadel # ZITADEL_DATABASE_POSTGRES_DATABASE
|
||||
MaxOpenConns: 5 # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: 2 # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DATABASE_POSTGRES_OPTIONS
|
||||
Host: localhost # ZITADEL_DESTINATION_POSTGRES_HOST
|
||||
Port: 5432 # ZITADEL_DESTINATION_POSTGRES_PORT
|
||||
Database: zitadel # ZITADEL_DESTINATION_POSTGRES_DATABASE
|
||||
MaxOpenConns: 5 # ZITADEL_DESTINATION_POSTGRES_MAXOPENCONNS
|
||||
MaxIdleConns: 2 # ZITADEL_DESTINATION_POSTGRES_MAXIDLECONNS
|
||||
MaxConnLifetime: 30m # ZITADEL_DESTINATION_POSTGRES_MAXCONNLIFETIME
|
||||
MaxConnIdleTime: 5m # ZITADEL_DESTINATION_POSTGRES_MAXCONNIDLETIME
|
||||
Options: "" # ZITADEL_DESTINATION_POSTGRES_OPTIONS
|
||||
User:
|
||||
Username: zitadel # ZITADEL_DATABASE_POSTGRES_USER_USERNAME
|
||||
Password: "" # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD
|
||||
Username: zitadel # ZITADEL_DESTINATION_POSTGRES_USER_USERNAME
|
||||
Password: "" # ZITADEL_DESTINATION_POSTGRES_USER_PASSWORD
|
||||
SSL:
|
||||
Mode: disable # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY
|
||||
Mode: disable # ZITADEL_DESTINATION_POSTGRES_USER_SSL_MODE
|
||||
RootCert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_ROOTCERT
|
||||
Cert: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_CERT
|
||||
Key: "" # ZITADEL_DESTINATION_POSTGRES_USER_SSL_KEY
|
||||
|
||||
EventBulkSize: 10000
|
||||
EventBulkSize: 10000 # ZITADEL_EVENTBULKSIZE
|
||||
# The maximum duration an auth request was last updated before it gets ignored.
|
||||
# Default is 30 days
|
||||
MaxAuthRequestAge: 720h # ZITADEL_MAXAUTHREQUESTAGE
|
||||
|
||||
Projections:
|
||||
# The maximum duration a transaction remains open
|
||||
@@ -64,14 +67,14 @@ Projections:
|
||||
TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
|
||||
# turn off scheduler during operation
|
||||
RequeueEvery: 0s
|
||||
ConcurrentInstances: 7
|
||||
EventBulkLimit: 1000
|
||||
Customizations:
|
||||
ConcurrentInstances: 7 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES
|
||||
EventBulkLimit: 1000 # ZITADEL_PROJECTIONS_EVENTBULKLIMIT
|
||||
Customizations:
|
||||
notifications:
|
||||
MaxFailureCount: 1
|
||||
|
||||
Eventstore:
|
||||
MaxRetries: 3
|
||||
MaxRetries: 3 # ZITADEL_EVENTSTORE_MAXRETRIES
|
||||
|
||||
Auth:
|
||||
Spooler:
|
||||
|
@@ -3,6 +3,8 @@ package mirror
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/v2/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/v2/readmodel"
|
||||
"github.com/zitadel/zitadel/internal/v2/system"
|
||||
@@ -29,7 +31,7 @@ func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore
|
||||
return lastSuccess, nil
|
||||
}
|
||||
|
||||
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position float64) error {
|
||||
func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position decimal.Decimal) error {
|
||||
return destinationES.Push(
|
||||
ctx,
|
||||
eventstore.NewPushIntent(
|
||||
|
@@ -8,7 +8,9 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/shopspring/decimal"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
@@ -69,6 +71,7 @@ func positionQuery(db *db.DB) string {
|
||||
}
|
||||
|
||||
func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
logging.Info("starting to copy events")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
@@ -88,7 +91,7 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName())
|
||||
logging.OnError(err).Fatal("unable to query latest successful migration")
|
||||
|
||||
var maxPosition float64
|
||||
var maxPosition decimal.Decimal
|
||||
err = source.QueryRowContext(ctx,
|
||||
func(row *sql.Row) error {
|
||||
return row.Scan(&maxPosition)
|
||||
@@ -100,7 +103,7 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration")
|
||||
|
||||
nextPos := make(chan bool, 1)
|
||||
pos := make(chan float64, 1)
|
||||
pos := make(chan decimal.Decimal, 1)
|
||||
errs := make(chan error, 3)
|
||||
|
||||
go func() {
|
||||
@@ -130,7 +133,10 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
if err != nil {
|
||||
return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i)
|
||||
}
|
||||
logging.WithFields("batch_count", i).Info("batch of events copied")
|
||||
|
||||
if tag.RowsAffected() < int64(bulkSize) {
|
||||
logging.WithFields("batch_count", i).Info("last batch of events copied")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -148,7 +154,7 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
go func() {
|
||||
defer close(pos)
|
||||
for range nextPos {
|
||||
var position float64
|
||||
var position decimal.Decimal
|
||||
err := dest.QueryRowContext(
|
||||
ctx,
|
||||
func(row *sql.Row) error {
|
||||
@@ -171,6 +177,10 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN")
|
||||
eventCount = tag.RowsAffected()
|
||||
if err != nil {
|
||||
pgErr := new(pgconn.PgError)
|
||||
errors.As(err, &pgErr)
|
||||
|
||||
logging.WithError(err).WithField("pg_err_details", pgErr.Detail).Error("unable to copy events into destination")
|
||||
return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination")
|
||||
}
|
||||
|
||||
@@ -183,7 +193,7 @@ func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) {
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated")
|
||||
}
|
||||
|
||||
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position float64, errs <-chan error) {
|
||||
func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position decimal.Decimal, errs <-chan error) {
|
||||
joinedErrs := make([]error, 0, len(errs))
|
||||
for err := range errs {
|
||||
joinedErrs = append(joinedErrs, err)
|
||||
@@ -202,6 +212,7 @@ func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, sou
|
||||
}
|
||||
|
||||
func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) {
|
||||
logging.Info("starting to copy unique constraints")
|
||||
start := time.Now()
|
||||
reader, writer := io.Pipe()
|
||||
errs := make(chan error, 1)
|
||||
|
@@ -3,6 +3,7 @@ package mirror
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -104,6 +105,7 @@ func projections(
|
||||
config *ProjectionsConfig,
|
||||
masterKey string,
|
||||
) {
|
||||
logging.Info("starting to fill projections")
|
||||
start := time.Now()
|
||||
|
||||
client, err := database.Connect(config.Destination, false)
|
||||
@@ -255,8 +257,10 @@ func projections(
|
||||
go execProjections(ctx, instances, failedInstances, &wg)
|
||||
}
|
||||
|
||||
for _, instance := range queryInstanceIDs(ctx, client) {
|
||||
existingInstances := queryInstanceIDs(ctx, client)
|
||||
for i, instance := range existingInstances {
|
||||
instances <- instance
|
||||
logging.WithFields("id", instance, "index", fmt.Sprintf("%d/%d", i, len(existingInstances))).Info("instance queued for projection")
|
||||
}
|
||||
close(instances)
|
||||
wg.Wait()
|
||||
@@ -268,7 +272,7 @@ func projections(
|
||||
|
||||
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
|
||||
for instance := range instances {
|
||||
logging.WithFields("instance", instance).Info("start projections")
|
||||
logging.WithFields("instance", instance).Info("starting projections")
|
||||
ctx = internal_authz.WithInstanceID(ctx, instance)
|
||||
|
||||
err := projection.ProjectInstance(ctx)
|
||||
@@ -292,6 +296,13 @@ func execProjections(ctx context.Context, instances <-chan string, failedInstanc
|
||||
continue
|
||||
}
|
||||
|
||||
err = projection.ProjectInstanceFields(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
|
||||
failedInstances <- instance
|
||||
continue
|
||||
}
|
||||
|
||||
err = auth_handler.ProjectInstance(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("instance", instance).WithError(err).Info("trigger auth handler failed")
|
||||
@@ -311,7 +322,7 @@ func execProjections(ctx context.Context, instances <-chan string, failedInstanc
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// returns the instance configured by flag
|
||||
// queryInstanceIDs returns the instance configured by flag
|
||||
// or all instances which are not removed
|
||||
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
|
||||
if len(instanceIDs) > 0 {
|
||||
|
@@ -46,6 +46,7 @@ func copySystem(ctx context.Context, config *Migration) {
|
||||
}
|
||||
|
||||
func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy assets")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
@@ -70,7 +71,7 @@ func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var eventCount int64
|
||||
var assetCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
@@ -82,16 +83,17 @@ func copyAssets(ctx context.Context, source, dest *database.DB) {
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
assetCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy assets to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy assets from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("assets migrated")
|
||||
logging.WithFields("took", time.Since(start), "count", assetCount).Info("assets migrated")
|
||||
}
|
||||
|
||||
func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
logging.Info("starting to copy encryption keys")
|
||||
start := time.Now()
|
||||
|
||||
sourceConn, err := source.Conn(ctx)
|
||||
@@ -116,7 +118,7 @@ func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
logging.OnError(err).Fatal("unable to acquire dest connection")
|
||||
defer destConn.Close()
|
||||
|
||||
var eventCount int64
|
||||
var keyCount int64
|
||||
err = destConn.Raw(func(driverConn interface{}) error {
|
||||
conn := driverConn.(*stdlib.Conn).Conn()
|
||||
|
||||
@@ -128,11 +130,11 @@ func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) {
|
||||
}
|
||||
|
||||
tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin")
|
||||
eventCount = tag.RowsAffected()
|
||||
keyCount = tag.RowsAffected()
|
||||
|
||||
return err
|
||||
})
|
||||
logging.OnError(err).Fatal("unable to copy encryption keys to destination")
|
||||
logging.OnError(<-errs).Fatal("unable to copy encryption keys from source")
|
||||
logging.WithFields("took", time.Since(start), "count", eventCount).Info("encryption keys migrated")
|
||||
logging.WithFields("took", time.Since(start), "count", keyCount).Info("encryption keys migrated")
|
||||
}
|
||||
|
@@ -15,7 +15,6 @@ var (
|
||||
|
||||
type BackChannelLogoutNotificationStart struct {
|
||||
dbClient *database.DB
|
||||
esClient *eventstore.Eventstore
|
||||
}
|
||||
|
||||
func (mig *BackChannelLogoutNotificationStart) Execute(ctx context.Context, e eventstore.Event) error {
|
||||
|
27
cmd/setup/54.go
Normal file
27
cmd/setup/54.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 54.sql
|
||||
instancePositionIndex string
|
||||
)
|
||||
|
||||
type InstancePositionIndex struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *InstancePositionIndex) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, instancePositionIndex)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *InstancePositionIndex) String() string {
|
||||
return "54_instance_position_index_again"
|
||||
}
|
1
cmd/setup/54.sql
Normal file
1
cmd/setup/54.sql
Normal file
@@ -0,0 +1 @@
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS es_instance_position ON eventstore.events2 (instance_id, position);
|
27
cmd/setup/55.go
Normal file
27
cmd/setup/55.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 55.sql
|
||||
executionHandlerCurrentState string
|
||||
)
|
||||
|
||||
type ExecutionHandlerStart struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *ExecutionHandlerStart) Execute(ctx context.Context, e eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, executionHandlerCurrentState, e.Sequence(), e.CreatedAt(), e.Position())
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *ExecutionHandlerStart) String() string {
|
||||
return "55_execution_handler_start"
|
||||
}
|
22
cmd/setup/55.sql
Normal file
22
cmd/setup/55.sql
Normal file
@@ -0,0 +1,22 @@
|
||||
INSERT INTO projections.current_states AS cs ( instance_id
|
||||
, projection_name
|
||||
, last_updated
|
||||
, sequence
|
||||
, event_date
|
||||
, position
|
||||
, filter_offset)
|
||||
SELECT instance_id
|
||||
, 'projections.execution_handler'
|
||||
, now()
|
||||
, $1
|
||||
, $2
|
||||
, $3
|
||||
, 0
|
||||
FROM eventstore.events2 AS e
|
||||
WHERE aggregate_type = 'instance'
|
||||
AND event_type = 'instance.added'
|
||||
ON CONFLICT (instance_id, projection_name) DO UPDATE SET last_updated = EXCLUDED.last_updated,
|
||||
sequence = EXCLUDED.sequence,
|
||||
event_date = EXCLUDED.event_date,
|
||||
position = EXCLUDED.position,
|
||||
filter_offset = EXCLUDED.filter_offset;
|
27
cmd/setup/56.go
Normal file
27
cmd/setup/56.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 56.sql
|
||||
addSAMLFederatedLogout string
|
||||
)
|
||||
|
||||
type IDPTemplate6SAMLFederatedLogout struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *IDPTemplate6SAMLFederatedLogout) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addSAMLFederatedLogout)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *IDPTemplate6SAMLFederatedLogout) String() string {
|
||||
return "56_idp_templates6_add_saml_federated_logout"
|
||||
}
|
1
cmd/setup/56.sql
Normal file
1
cmd/setup/56.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE IF EXISTS projections.idp_templates6_saml ADD COLUMN IF NOT EXISTS federated_logout_enabled BOOLEAN DEFAULT FALSE;
|
27
cmd/setup/57.go
Normal file
27
cmd/setup/57.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 57.sql
|
||||
createResourceCounts string
|
||||
)
|
||||
|
||||
type CreateResourceCounts struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *CreateResourceCounts) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, createResourceCounts)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *CreateResourceCounts) String() string {
|
||||
return "57_create_resource_counts"
|
||||
}
|
106
cmd/setup/57.sql
Normal file
106
cmd/setup/57.sql
Normal file
@@ -0,0 +1,106 @@
|
||||
CREATE TABLE IF NOT EXISTS projections.resource_counts
|
||||
(
|
||||
id SERIAL PRIMARY KEY, -- allows for easy pagination
|
||||
instance_id TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL, -- needed for trigger matching, not in reports
|
||||
parent_type TEXT NOT NULL,
|
||||
parent_id TEXT NOT NULL,
|
||||
resource_name TEXT NOT NULL, -- friendly name for reporting
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
amount INTEGER NOT NULL DEFAULT 1 CHECK (amount >= 0),
|
||||
|
||||
UNIQUE (instance_id, parent_type, parent_id, table_name)
|
||||
);
|
||||
|
||||
-- count_resource is a trigger function which increases or decreases the count of a resource.
|
||||
-- When creating the trigger the following required arguments (TG_ARGV) can be passed:
|
||||
-- 1. The type of the parent
|
||||
-- 2. The column name of the instance id
|
||||
-- 3. The column name of the owner id
|
||||
-- 4. The name of the resource
|
||||
CREATE OR REPLACE FUNCTION projections.count_resource()
|
||||
RETURNS trigger
|
||||
LANGUAGE 'plpgsql' VOLATILE
|
||||
AS $$
|
||||
DECLARE
|
||||
-- trigger variables
|
||||
tg_table_name TEXT := TG_TABLE_SCHEMA || '.' || TG_TABLE_NAME;
|
||||
tg_parent_type TEXT := TG_ARGV[0];
|
||||
tg_instance_id_column TEXT := TG_ARGV[1];
|
||||
tg_parent_id_column TEXT := TG_ARGV[2];
|
||||
tg_resource_name TEXT := TG_ARGV[3];
|
||||
|
||||
tg_instance_id TEXT;
|
||||
tg_parent_id TEXT;
|
||||
|
||||
select_ids TEXT := format('SELECT ($1).%I, ($1).%I', tg_instance_id_column, tg_parent_id_column);
|
||||
BEGIN
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
EXECUTE select_ids INTO tg_instance_id, tg_parent_id USING NEW;
|
||||
|
||||
INSERT INTO projections.resource_counts(instance_id, table_name, parent_type, parent_id, resource_name)
|
||||
VALUES (tg_instance_id, tg_table_name, tg_parent_type, tg_parent_id, tg_resource_name)
|
||||
ON CONFLICT (instance_id, table_name, parent_type, parent_id) DO
|
||||
UPDATE SET updated_at = now(), amount = projections.resource_counts.amount + 1;
|
||||
|
||||
RETURN NEW;
|
||||
ELSEIF (TG_OP = 'DELETE') THEN
|
||||
EXECUTE select_ids INTO tg_instance_id, tg_parent_id USING OLD;
|
||||
|
||||
UPDATE projections.resource_counts
|
||||
SET updated_at = now(), amount = amount - 1
|
||||
WHERE instance_id = tg_instance_id
|
||||
AND table_name = tg_table_name
|
||||
AND parent_type = tg_parent_type
|
||||
AND parent_id = tg_parent_id
|
||||
AND resource_name = tg_resource_name
|
||||
AND amount > 0; -- prevent check failure on negative amount.
|
||||
|
||||
RETURN OLD;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- delete_table_counts removes all resource counts for a TRUNCATED table.
|
||||
CREATE OR REPLACE FUNCTION projections.delete_table_counts()
|
||||
RETURNS trigger
|
||||
LANGUAGE 'plpgsql'
|
||||
AS $$
|
||||
DECLARE
|
||||
-- trigger variables
|
||||
tg_table_name TEXT := TG_TABLE_SCHEMA || '.' || TG_TABLE_NAME;
|
||||
BEGIN
|
||||
DELETE FROM projections.resource_counts
|
||||
WHERE table_name = tg_table_name;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- delete_parent_counts removes all resource counts for a deleted parent.
|
||||
-- 1. The type of the parent
|
||||
-- 2. The column name of the instance id
|
||||
-- 3. The column name of the owner id
|
||||
CREATE OR REPLACE FUNCTION projections.delete_parent_counts()
|
||||
RETURNS trigger
|
||||
LANGUAGE 'plpgsql'
|
||||
AS $$
|
||||
DECLARE
|
||||
-- trigger variables
|
||||
tg_parent_type TEXT := TG_ARGV[0];
|
||||
tg_instance_id_column TEXT := TG_ARGV[1];
|
||||
tg_parent_id_column TEXT := TG_ARGV[2];
|
||||
|
||||
tg_instance_id TEXT;
|
||||
tg_parent_id TEXT;
|
||||
|
||||
select_ids TEXT := format('SELECT ($1).%I, ($1).%I', tg_instance_id_column, tg_parent_id_column);
|
||||
BEGIN
|
||||
EXECUTE select_ids INTO tg_instance_id, tg_parent_id USING OLD;
|
||||
|
||||
DELETE FROM projections.resource_counts
|
||||
WHERE instance_id = tg_instance_id
|
||||
AND parent_type = tg_parent_type
|
||||
AND parent_id = tg_parent_id;
|
||||
|
||||
RETURN OLD;
|
||||
END
|
||||
$$;
|
49
cmd/setup/58.go
Normal file
49
cmd/setup/58.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 58/*.sql
|
||||
replaceLoginNames3View embed.FS
|
||||
)
|
||||
|
||||
type ReplaceLoginNames3View struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *ReplaceLoginNames3View) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
var exists bool
|
||||
err := mig.dbClient.QueryRowContext(ctx, func(r *sql.Row) error {
|
||||
return r.Scan(&exists)
|
||||
}, "SELECT exists(SELECT 1 from information_schema.views WHERE table_schema = 'projections' AND table_name = 'login_names3')")
|
||||
|
||||
if err != nil || !exists {
|
||||
return err
|
||||
}
|
||||
|
||||
statements, err := readStatements(replaceLoginNames3View, "58")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, stmt := range statements {
|
||||
logging.WithFields("file", stmt.file, "migration", mig.String()).Info("execute statement")
|
||||
if _, err := mig.dbClient.ExecContext(ctx, stmt.query); err != nil {
|
||||
return fmt.Errorf("%s %s: %w", mig.String(), stmt.file, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mig *ReplaceLoginNames3View) String() string {
|
||||
return "58_replace_login_names3_view"
|
||||
}
|
36
cmd/setup/58/01_update_login_names3_view.sql
Normal file
36
cmd/setup/58/01_update_login_names3_view.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
CREATE OR REPLACE VIEW projections.login_names3 AS
|
||||
SELECT
|
||||
u.id AS user_id
|
||||
, CASE
|
||||
WHEN p.must_be_domain THEN CONCAT(u.user_name, '@', d.name)
|
||||
ELSE u.user_name
|
||||
END AS login_name
|
||||
, COALESCE(d.is_primary, TRUE) AS is_primary
|
||||
, u.instance_id
|
||||
FROM
|
||||
projections.login_names3_users AS u
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
must_be_domain
|
||||
, is_default
|
||||
FROM
|
||||
projections.login_names3_policies AS p
|
||||
WHERE
|
||||
(
|
||||
p.instance_id = u.instance_id
|
||||
AND NOT p.is_default
|
||||
AND p.resource_owner = u.resource_owner
|
||||
) OR (
|
||||
p.instance_id = u.instance_id
|
||||
AND p.is_default
|
||||
)
|
||||
ORDER BY
|
||||
p.is_default -- custom first
|
||||
LIMIT 1
|
||||
) AS p ON TRUE
|
||||
LEFT JOIN
|
||||
projections.login_names3_domains d
|
||||
ON
|
||||
p.must_be_domain
|
||||
AND u.resource_owner = d.resource_owner
|
||||
AND u.instance_id = d.instance_id
|
1
cmd/setup/58/02_create_index.sql
Normal file
1
cmd/setup/58/02_create_index.sql
Normal file
@@ -0,0 +1 @@
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS login_names3_policies_is_default_owner_idx ON projections.login_names3_policies (instance_id, is_default, resource_owner) INCLUDE (must_be_domain)
|
@@ -150,6 +150,11 @@ type Steps struct {
|
||||
s51IDPTemplate6RootCA *IDPTemplate6RootCA
|
||||
s52IDPTemplate6LDAP2 *IDPTemplate6LDAP2
|
||||
s53InitPermittedOrgsFunction *InitPermittedOrgsFunction53
|
||||
s54InstancePositionIndex *InstancePositionIndex
|
||||
s55ExecutionHandlerStart *ExecutionHandlerStart
|
||||
s56IDPTemplate6SAMLFederatedLogout *IDPTemplate6SAMLFederatedLogout
|
||||
s57CreateResourceCounts *CreateResourceCounts
|
||||
s58ReplaceLoginNames3View *ReplaceLoginNames3View
|
||||
}
|
||||
|
||||
func MustNewSteps(v *viper.Viper) *Steps {
|
||||
|
@@ -198,7 +198,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: dbClient}
|
||||
steps.s36FillV2Milestones = &FillV3Milestones{dbClient: dbClient, eventstore: eventstoreClient}
|
||||
steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: dbClient}
|
||||
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: dbClient, esClient: eventstoreClient}
|
||||
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: dbClient}
|
||||
steps.s40InitPushFunc = &InitPushFunc{dbClient: dbClient}
|
||||
steps.s42Apps7OIDCConfigsLoginVersion = &Apps7OIDCConfigsLoginVersion{dbClient: dbClient}
|
||||
steps.s43CreateFieldsDomainIndex = &CreateFieldsDomainIndex{dbClient: dbClient}
|
||||
@@ -212,6 +212,11 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s51IDPTemplate6RootCA = &IDPTemplate6RootCA{dbClient: dbClient}
|
||||
steps.s52IDPTemplate6LDAP2 = &IDPTemplate6LDAP2{dbClient: dbClient}
|
||||
steps.s53InitPermittedOrgsFunction = &InitPermittedOrgsFunction53{dbClient: dbClient}
|
||||
steps.s54InstancePositionIndex = &InstancePositionIndex{dbClient: dbClient}
|
||||
steps.s55ExecutionHandlerStart = &ExecutionHandlerStart{dbClient: dbClient}
|
||||
steps.s56IDPTemplate6SAMLFederatedLogout = &IDPTemplate6SAMLFederatedLogout{dbClient: dbClient}
|
||||
steps.s57CreateResourceCounts = &CreateResourceCounts{dbClient: dbClient}
|
||||
steps.s58ReplaceLoginNames3View = &ReplaceLoginNames3View{dbClient: dbClient}
|
||||
|
||||
err = projection.Create(ctx, dbClient, eventstoreClient, config.Projections, nil, nil, nil)
|
||||
logging.OnError(err).Fatal("unable to start projections")
|
||||
@@ -254,6 +259,11 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s51IDPTemplate6RootCA,
|
||||
steps.s52IDPTemplate6LDAP2,
|
||||
steps.s53InitPermittedOrgsFunction,
|
||||
steps.s54InstancePositionIndex,
|
||||
steps.s55ExecutionHandlerStart,
|
||||
steps.s56IDPTemplate6SAMLFederatedLogout,
|
||||
steps.s57CreateResourceCounts,
|
||||
steps.s58ReplaceLoginNames3View,
|
||||
} {
|
||||
setupErr = executeMigration(ctx, eventstoreClient, step, "migration failed")
|
||||
if setupErr != nil {
|
||||
@@ -293,6 +303,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
dbClient: dbClient,
|
||||
},
|
||||
}
|
||||
repeatableSteps = append(repeatableSteps, triggerSteps(dbClient)...)
|
||||
|
||||
for _, repeatableStep := range repeatableSteps {
|
||||
setupErr = executeMigration(ctx, eventstoreClient, repeatableStep, "unable to migrate repeatable step")
|
||||
|
125
cmd/setup/trigger_steps.go
Normal file
125
cmd/setup/trigger_steps.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/migration"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
)
|
||||
|
||||
// triggerSteps defines the repeatable migrations that set up triggers
|
||||
// for counting resources in the database.
|
||||
func triggerSteps(db *database.DB) []migration.RepeatableMigration {
|
||||
return []migration.RepeatableMigration{
|
||||
// Delete parent count triggers for instances and organizations
|
||||
migration.DeleteParentCountsTrigger(db,
|
||||
projection.InstanceProjectionTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.InstanceColumnID,
|
||||
projection.InstanceColumnID,
|
||||
"instance",
|
||||
),
|
||||
migration.DeleteParentCountsTrigger(db,
|
||||
projection.OrgProjectionTable,
|
||||
domain.CountParentTypeOrganization,
|
||||
projection.OrgColumnInstanceID,
|
||||
projection.OrgColumnID,
|
||||
"organization",
|
||||
),
|
||||
|
||||
// Count triggers for all the resources
|
||||
migration.CountTrigger(db,
|
||||
projection.OrgProjectionTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.OrgColumnInstanceID,
|
||||
projection.OrgColumnInstanceID,
|
||||
"organization",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.ProjectProjectionTable,
|
||||
domain.CountParentTypeOrganization,
|
||||
projection.ProjectColumnInstanceID,
|
||||
projection.ProjectColumnResourceOwner,
|
||||
"project",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.UserTable,
|
||||
domain.CountParentTypeOrganization,
|
||||
projection.UserInstanceIDCol,
|
||||
projection.UserResourceOwnerCol,
|
||||
"user",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.InstanceMemberProjectionTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.MemberInstanceID,
|
||||
projection.MemberResourceOwner,
|
||||
"iam_admin",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.IDPTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.IDPInstanceIDCol,
|
||||
projection.IDPInstanceIDCol,
|
||||
"identity_provider",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.IDPTemplateLDAPTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.LDAPInstanceIDCol,
|
||||
projection.LDAPInstanceIDCol,
|
||||
"identity_provider_ldap",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.ActionTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.ActionInstanceIDCol,
|
||||
projection.ActionInstanceIDCol,
|
||||
"action_v1",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.ExecutionTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.ExecutionInstanceIDCol,
|
||||
projection.ExecutionInstanceIDCol,
|
||||
"execution",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
fmt.Sprintf("%s_%s", projection.ExecutionTable, projection.ExecutionTargetSuffix),
|
||||
domain.CountParentTypeInstance,
|
||||
projection.ExecutionTargetInstanceIDCol,
|
||||
projection.ExecutionTargetInstanceIDCol,
|
||||
"execution_target",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.LoginPolicyTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.LoginPolicyInstanceIDCol,
|
||||
projection.LoginPolicyInstanceIDCol,
|
||||
"login_policy",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.PasswordComplexityTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.ComplexityPolicyInstanceIDCol,
|
||||
projection.ComplexityPolicyInstanceIDCol,
|
||||
"password_complexity_policy",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.PasswordAgeTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.AgePolicyInstanceIDCol,
|
||||
projection.AgePolicyInstanceIDCol,
|
||||
"password_expiry_policy",
|
||||
),
|
||||
migration.CountTrigger(db,
|
||||
projection.LockoutPolicyTable,
|
||||
domain.CountParentTypeInstance,
|
||||
projection.LockoutPolicyInstanceIDCol,
|
||||
projection.LockoutPolicyInstanceIDCol,
|
||||
"lockout_policy",
|
||||
),
|
||||
}
|
||||
}
|
@@ -40,11 +40,13 @@ import (
|
||||
feature_v2 "github.com/zitadel/zitadel/internal/api/grpc/feature/v2"
|
||||
feature_v2beta "github.com/zitadel/zitadel/internal/api/grpc/feature/v2beta"
|
||||
idp_v2 "github.com/zitadel/zitadel/internal/api/grpc/idp/v2"
|
||||
instance "github.com/zitadel/zitadel/internal/api/grpc/instance/v2beta"
|
||||
"github.com/zitadel/zitadel/internal/api/grpc/management"
|
||||
oidc_v2 "github.com/zitadel/zitadel/internal/api/grpc/oidc/v2"
|
||||
oidc_v2beta "github.com/zitadel/zitadel/internal/api/grpc/oidc/v2beta"
|
||||
org_v2 "github.com/zitadel/zitadel/internal/api/grpc/org/v2"
|
||||
org_v2beta "github.com/zitadel/zitadel/internal/api/grpc/org/v2beta"
|
||||
project_v2beta "github.com/zitadel/zitadel/internal/api/grpc/project/v2beta"
|
||||
"github.com/zitadel/zitadel/internal/api/grpc/resources/debug_events/debug_events"
|
||||
user_v3_alpha "github.com/zitadel/zitadel/internal/api/grpc/resources/user/v3alpha"
|
||||
userschema_v3_alpha "github.com/zitadel/zitadel/internal/api/grpc/resources/userschema/v3alpha"
|
||||
@@ -72,12 +74,14 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_repo "github.com/zitadel/zitadel/internal/authz/repository"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/domain/federatedlogout"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
|
||||
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
|
||||
@@ -304,7 +308,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
|
||||
execution.Register(
|
||||
ctx,
|
||||
config.Projections.Customizations["executions"],
|
||||
config.Projections.Customizations["execution_handler"],
|
||||
config.Executions,
|
||||
queries,
|
||||
eventstoreClient.EventTypes(),
|
||||
@@ -442,6 +446,9 @@ func startAPIs(
|
||||
if err := apis.RegisterServer(ctx, system.CreateServer(commands, queries, config.Database.DatabaseName(), config.DefaultInstance, config.ExternalDomain), tlsConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, instance.CreateServer(commands, queries, config.Database.DatabaseName(), config.DefaultInstance, config.ExternalDomain)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterServer(ctx, admin.CreateServer(config.Database.DatabaseName(), commands, queries, keys.User, config.AuditLogRetention), tlsConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -454,7 +461,7 @@ func startAPIs(
|
||||
if err := apis.RegisterService(ctx, user_v2beta.CreateServer(commands, queries, keys.User, keys.IDPConfig, idp.CallbackURL(), idp.SAMLRootURL(), assets.AssetAPI(), permissionCheck)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, user_v2.CreateServer(commands, queries, keys.User, keys.IDPConfig, idp.CallbackURL(), idp.SAMLRootURL(), assets.AssetAPI(), permissionCheck)); err != nil {
|
||||
if err := apis.RegisterService(ctx, user_v2.CreateServer(config.SystemDefaults, commands, queries, keys.User, keys.IDPConfig, idp.CallbackURL(), idp.SAMLRootURL(), assets.AssetAPI(), permissionCheck)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, session_v2beta.CreateServer(commands, queries, permissionCheck)); err != nil {
|
||||
@@ -463,7 +470,7 @@ func startAPIs(
|
||||
if err := apis.RegisterService(ctx, settings_v2beta.CreateServer(commands, queries)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, org_v2beta.CreateServer(commands, queries, permissionCheck)); err != nil {
|
||||
if err := apis.RegisterService(ctx, org_v2beta.CreateServer(config.SystemDefaults, commands, queries, permissionCheck)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, feature_v2beta.CreateServer(commands, queries)); err != nil {
|
||||
@@ -487,6 +494,9 @@ func startAPIs(
|
||||
if err := apis.RegisterService(ctx, action_v2_beta.CreateServer(config.SystemDefaults, commands, queries, domain.AllActionFunctions, apis.ListGrpcMethods, apis.ListGrpcServices)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, project_v2beta.CreateServer(config.SystemDefaults, commands, queries, permissionCheck)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := apis.RegisterService(ctx, userschema_v3_alpha.CreateServer(config.SystemDefaults, commands, queries)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -503,7 +513,12 @@ func startAPIs(
|
||||
assetsCache := middleware.AssetsCacheInterceptor(config.AssetStorage.Cache.MaxAge, config.AssetStorage.Cache.SharedMaxAge)
|
||||
apis.RegisterHandlerOnPrefix(assets.HandlerPrefix, assets.NewHandler(commands, verifier, config.SystemAuthZ, config.InternalAuthZ, id.SonyFlakeGenerator(), store, queries, middleware.CallDurationHandler, instanceInterceptor.Handler, assetsCache.Handler, limitingAccessInterceptor.Handle))
|
||||
|
||||
apis.RegisterHandlerOnPrefix(idp.HandlerPrefix, idp.NewHandler(commands, queries, keys.IDPConfig, instanceInterceptor.Handler))
|
||||
federatedLogoutsCache, err := connector.StartCache[federatedlogout.Index, string, *federatedlogout.FederatedLogout](ctx, []federatedlogout.Index{federatedlogout.IndexRequestID}, cache.PurposeFederatedLogout, cacheConnectors.Config.FederatedLogouts, cacheConnectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apis.RegisterHandlerOnPrefix(idp.HandlerPrefix, idp.NewHandler(commands, queries, keys.IDPConfig, instanceInterceptor.Handler, federatedLogoutsCache))
|
||||
|
||||
userAgentInterceptor, err := middleware.NewUserAgentHandler(config.UserAgentCookie, keys.UserAgentCookieKey, id.SonyFlakeGenerator(), config.ExternalSecure, login.EndpointResources, login.EndpointExternalLoginCallbackFormPost, login.EndpointSAMLACS)
|
||||
if err != nil {
|
||||
@@ -524,7 +539,25 @@ func startAPIs(
|
||||
}
|
||||
apis.RegisterHandlerOnPrefix(openapi.HandlerPrefix, openAPIHandler)
|
||||
|
||||
oidcServer, err := oidc.NewServer(ctx, config.OIDC, login.DefaultLoggedOutPath, config.ExternalSecure, commands, queries, authRepo, keys.OIDC, keys.OIDCKey, eventstore, dbClient, userAgentInterceptor, instanceInterceptor.Handler, limitingAccessInterceptor, config.Log.Slog(), config.SystemDefaults.SecretHasher)
|
||||
oidcServer, err := oidc.NewServer(
|
||||
ctx,
|
||||
config.OIDC,
|
||||
login.DefaultLoggedOutPath,
|
||||
config.ExternalSecure,
|
||||
commands,
|
||||
queries,
|
||||
authRepo,
|
||||
keys.OIDC,
|
||||
keys.OIDCKey,
|
||||
eventstore,
|
||||
dbClient,
|
||||
userAgentInterceptor,
|
||||
instanceInterceptor.Handler,
|
||||
limitingAccessInterceptor,
|
||||
config.Log.Slog(),
|
||||
config.SystemDefaults.SecretHasher,
|
||||
federatedLogoutsCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to start oidc provider: %w", err)
|
||||
}
|
||||
@@ -573,6 +606,7 @@ func startAPIs(
|
||||
keys.IDPConfig,
|
||||
keys.CSRFCookieKey,
|
||||
cacheConnectors,
|
||||
federatedLogoutsCache,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to start login: %w", err)
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package start
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
@@ -29,14 +31,19 @@ Requirements:
|
||||
masterKey, err := key.MasterKey(cmd)
|
||||
logging.OnError(err).Panic("No master key provided")
|
||||
|
||||
initialise.InitAll(cmd.Context(), initialise.MustNewConfig(viper.GetViper()))
|
||||
initCtx, cancel := context.WithCancel(cmd.Context())
|
||||
initialise.InitAll(initCtx, initialise.MustNewConfig(viper.GetViper()))
|
||||
cancel()
|
||||
|
||||
err = setup.BindInitProjections(cmd)
|
||||
logging.OnError(err).Fatal("unable to bind \"init-projections\" flag")
|
||||
|
||||
setupConfig := setup.MustNewConfig(viper.GetViper())
|
||||
setupSteps := setup.MustNewSteps(viper.New())
|
||||
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
|
||||
|
||||
setupCtx, cancel := context.WithCancel(cmd.Context())
|
||||
setup.Setup(setupCtx, setupConfig, setupSteps, masterKey)
|
||||
cancel()
|
||||
|
||||
startConfig := MustNewConfig(viper.GetViper())
|
||||
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package start
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
@@ -34,7 +36,10 @@ Requirements:
|
||||
|
||||
setupConfig := setup.MustNewConfig(viper.GetViper())
|
||||
setupSteps := setup.MustNewSteps(viper.New())
|
||||
setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey)
|
||||
|
||||
setupCtx, cancel := context.WithCancel(cmd.Context())
|
||||
setup.Setup(setupCtx, setupConfig, setupSteps, masterKey)
|
||||
cancel()
|
||||
|
||||
startConfig := MustNewConfig(viper.GetViper())
|
||||
|
||||
|
Reference in New Issue
Block a user