feat(eventstore): increase parallel write capabilities (#5940)

This implementation increases parallel write capabilities of the eventstore.
Please have a look at the technical advisories: [05](https://zitadel.com/docs/support/advisory/a10005) and  [06](https://zitadel.com/docs/support/advisory/a10006).
The implementation of eventstore.push is rewritten and stored events are migrated to a new table `eventstore.events2`.
If you are using cockroach: make sure that the database user of ZITADEL has `VIEWACTIVITY` grant. This is used to query events.
This commit is contained in:
Silvan 2023-10-19 12:19:10 +02:00 committed by GitHub
parent 259faba3f0
commit b5564572bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
791 changed files with 30326 additions and 43202 deletions

3
.gitignore vendored
View File

@ -16,7 +16,7 @@ profile.cov
*.out *.out
#Debug #Debug
__debug_bin __debug_bin*
debug debug
sandbox.go sandbox.go
/cmd/dev/ /cmd/dev/
@ -48,6 +48,7 @@ cmd/zitadel/zitadel
tmp/ tmp/
console/src/app/proto/generated/ console/src/app/proto/generated/
**.pb.go **.pb.go
!pkg/grpc/protoc/v2/options.pb.go
**.proto.mock.go **.proto.mock.go
**.pb.*.go **.pb.*.go
**.gen.go **.gen.go

View File

@ -78,13 +78,17 @@ HTTP1HostHeader: "host" # ZITADEL_HTTP1HOSTHEADER
WebAuthNName: ZITADEL # ZITADEL_WEBAUTHN_NAME WebAuthNName: ZITADEL # ZITADEL_WEBAUTHN_NAME
Database: Database:
# This setting defines the ratio of how many connections defined below
# are used to push events. ZITADEL manages two database connection pools
# one to push events and one for the remaining queries.
EventPushConnRatio: 0.2 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO
# CockroachDB is the default database of ZITADEL # CockroachDB is the default database of ZITADEL
cockroach: cockroach:
Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST
Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT
Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE
MaxOpenConns: 20 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS MaxOpenConns: 40 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS
MaxIdleConns: 10 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS MaxIdleConns: 20 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS
MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME
MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME
Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS
@ -177,14 +181,16 @@ AssetStorage:
# The Projections section defines the behavior for the scheduled and synchronous events projections. # The Projections section defines the behavior for the scheduled and synchronous events projections.
Projections: Projections:
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 500ms # ZITADEL_PROJECTIONS_TRANSACTIONDURATION
# Time interval between scheduled projections # Time interval between scheduled projections
RequeueEvery: 60s # ZITADEL_PROJECTIONS_REQUEUEEVERY RequeueEvery: 60s # ZITADEL_PROJECTIONS_REQUEUEEVERY
# Time between retried database statements resulting from projected events # Time between retried database statements resulting from projected events
RetryFailedAfter: 1s # ZITADEL_PROJECTIONS_RETRYFAILED RetryFailedAfter: 1s # ZITADEL_PROJECTIONS_RETRYFAILED
# Retried execution number of database statements resulting from projected events # Retried execution number of database statements resulting from projected events
MaxFailureCount: 5 # ZITADEL_PROJECTIONS_MAXFAILURECOUNT MaxFailureCount: 5 # ZITADEL_PROJECTIONS_MAXFAILURECOUNT
# Number of concurrent projection routines. Values of 0 and below are overwritten to 1
ConcurrentInstances: 1 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES
# Limit of returned events per query # Limit of returned events per query
BulkLimit: 200 # ZITADEL_PROJECTIONS_BULKLIMIT BulkLimit: 200 # ZITADEL_PROJECTIONS_BULKLIMIT
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe # Only instances are projected, for which at least a projection-relevant event exists within the timeframe
@ -194,11 +200,17 @@ Projections:
# In the Customizations section, all settings from above can be overwritten for each specific projection # In the Customizations section, all settings from above can be overwritten for each specific projection
Customizations: Customizations:
Projects: Projects:
BulkLimit: 2000 # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_PROJECTS_BULKLIMIT TransactionDuration: 2s
# The Notifications projection is used for sending emails and SMS to users # The Notifications projection is used for sending emails and SMS to users
Notifications: Notifications:
# As notification projections don't result in database statements, retries don't have an effect # As notification projections don't result in database statements, retries don't have an effect
MaxFailureCount: 0 # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONS_MAXFAILURECOUNT MaxFailureCount: 10 # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONS_MAXFAILURECOUNT
# Sending emails can take longer than 500ms
TransactionDuration: 5s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONS_TRANSACTIONDURATION
password_complexities:
TransactionDuration: 2s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_PASSWORD_COMPLEXITIES_TRANSACTIONDURATION
lockout_policy:
TransactionDuration: 2s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_LOCKOUT_POLICY_TRANSACTIONDURATION
# The NotificationsQuotas projection is used for calling quota webhooks # The NotificationsQuotas projection is used for calling quota webhooks
NotificationsQuotas: NotificationsQuotas:
# In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances. # In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances.
@ -207,9 +219,13 @@ Projections:
# Defaults to 45 days # Defaults to 45 days
HandleActiveInstances: 1080h # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_HANDLEACTIVEINSTANCES HandleActiveInstances: 1080h # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_HANDLEACTIVEINSTANCES
# As quota notification projections don't result in database statements, retries don't have an effect # As quota notification projections don't result in database statements, retries don't have an effect
MaxFailureCount: 0 # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_MAXFAILURECOUNT MaxFailureCount: 10 # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_MAXFAILURECOUNT
# Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much. # Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much.
RequeueEvery: 300s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_REQUEUEEVERY RequeueEvery: 300s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONSQUOTAS_REQUEUEEVERY
# Sending emails can take longer than 500ms
TransactionDuration: 5s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_NOTIFICATIONQUOTAS_TRANSACTIONDURATION
milestones:
BulkLimit: 50
# The Telemetry projection is used for calling telemetry webhooks # The Telemetry projection is used for calling telemetry webhooks
Telemetry: Telemetry:
# In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances. # In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances.
@ -223,20 +239,34 @@ Projections:
RequeueEvery: 3300s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_TELEMETRY_REQUEUEEVERY RequeueEvery: 3300s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_TELEMETRY_REQUEUEEVERY
Auth: Auth:
# See Projections.BulkLimit
SearchLimit: 1000 # ZITADEL_AUTH_SEARCHLIMIT SearchLimit: 1000 # ZITADEL_AUTH_SEARCHLIMIT
Spooler: Spooler:
ConcurrentWorkers: 1 # ZITADEL_AUTH_SPOOLER_CONCURRENTWORKERS # See Projections.TransationDuration
ConcurrentInstances: 1 # ZITADEL_AUTH_SPOOLER_CONCURRENTINSTANCES TransactionDuration: 10s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION
BulkLimit: 10000 # ZITADEL_AUTH_SPOOLER_BULKLIMIT # See Projections.BulkLimit
FailureCountUntilSkip: 5 # ZITADEL_AUTH_SPOOLER_FAILURECOUNTUNTILSKIP BulkLimit: 100 #ZITADEL_AUTH_SPOOLER_BULKLIMIT
# See Projections.MaxFailureCount
FailureCountUntilSkip: 5 #ZITADEL_AUTH_SPOOLER_FAILURECOUNTUNTILSKIP
# Only instance are projected, for which at least a projection relevant event exists withing the timeframe
# from HandleActiveInstances duration in the past until the projections current time
# Defaults to twice the RequeueEvery duration
HandleActiveInstances: 120s #ZITADEL_AUTH_SPOOLER_HANDLEACTIVEINSTANCES
Admin: Admin:
# See Projections.BulkLimit
SearchLimit: 1000 # ZITADEL_ADMIN_SEARCHLIMIT SearchLimit: 1000 # ZITADEL_ADMIN_SEARCHLIMIT
Spooler: Spooler:
ConcurrentWorkers: 1 # ZITADEL_ADMIN_SPOOLER_CONCURRENTWORKERS # See Projections.TransationDuration
ConcurrentInstances: 1 # ZITADEL_ADMIN_SPOOLER_CONCURRENTINSTANCES TransactionDuration: 10s
BulkLimit: 10000 # ZITADEL_ADMIN_SPOOLER_BULKLIMIT # See Projections.BulkLimit
FailureCountUntilSkip: 5 # ZITADEL_ADMIN_SPOOLER_FAILURECOUNTUNTILSKIP BulkLimit: 200
# See Projections.MaxFailureCount
FailureCountUntilSkip: 5
# Only instance are projected, for which at least a projection relevant event exists withing the timeframe
# from HandleActiveInstances duration in the past until the projections current time
# Defaults to twice the RequeueEvery duration
HandleActiveInstances: 120s
UserAgentCookie: UserAgentCookie:
Name: zitadel.useragent # ZITADEL_USERAGENTCOOKIE_NAME Name: zitadel.useragent # ZITADEL_USERAGENTCOOKIE_NAME
@ -322,10 +352,12 @@ Console:
Notification: Notification:
Repository: Repository:
Spooler: Spooler:
ConcurrentWorkers: 1 # ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_CONCURRENTWORKERS # See Projections.TransactionDuration
ConcurrentInstances: 10 # ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_CONCURRENTINSTANCES TransactionDuration: 10s #ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_TRANSACTIONDURATION
BulkLimit: 10000 # ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_BULKLIMIT # See Projections.BulkLimit
FailureCountUntilSkip: 5 # ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_FAILURECOUNTUNTILSKIP BulkLimit: 200 #ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_BULKLIMIT
# See Projections.MaxFailureCount
FailureCountUntilSkip: 5 #ZITADEL_NOTIFICATION_REPOSITORY_SPOOLER_FAILURECOUNTUNTILSKIP
Handlers: Handlers:
EncryptionKeys: EncryptionKeys:
@ -477,8 +509,8 @@ Quotas:
MaxBulkSize: 0 # ZITADEL_QUOTAS_EXECUTION_DEBOUNCE_MAXBULKSIZE MaxBulkSize: 0 # ZITADEL_QUOTAS_EXECUTION_DEBOUNCE_MAXBULKSIZE
Eventstore: Eventstore:
PushTimeout: 15s # ZITADEL_EVENTSTORE_PUSHTIMEOUT # Sets the maximum duration of transactions pushing events
AllowOrderByCreationDate: false # ZITADEL_EVENTSTORE_ALLOWORDERBYCREATIONDATE PushTimeout: 15s #ZITADEL_EVENTSTORE_PUSHTIMEOUT
DefaultInstance: DefaultInstance:
InstanceName: ZITADEL # ZITADEL_DEFAULTINSTANCE_INSTANCENAME InstanceName: ZITADEL # ZITADEL_DEFAULTINSTANCE_INSTANCENAME

View File

@ -1,13 +1,13 @@
package initialise package initialise
import ( import (
"database/sql"
"errors" "errors"
"github.com/jackc/pgconn" "github.com/jackc/pgconn"
"github.com/zitadel/zitadel/internal/database"
) )
func exec(db *sql.DB, stmt string, possibleErrCodes []string, args ...interface{}) error { func exec(db *database.DB, stmt string, possibleErrCodes []string, args ...interface{}) error {
_, err := db.Exec(stmt, args...) _, err := db.Exec(stmt, args...)
pgErr := new(pgconn.PgError) pgErr := new(pgconn.PgError)
if errors.As(err, &pgErr) { if errors.As(err, &pgErr) {

View File

@ -1,7 +1,6 @@
package initialise package initialise
import ( import (
"database/sql"
"embed" "embed"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -68,7 +67,7 @@ func InitAll(config *Config) {
logging.OnError(err).Fatal("unable to initialize ZITADEL") logging.OnError(err).Fatal("unable to initialize ZITADEL")
} }
func initialise(config database.Config, steps ...func(*sql.DB) error) error { func initialise(config database.Config, steps ...func(*database.DB) error) error {
logging.Info("initialization started") logging.Info("initialization started")
err := ReadStmts(config.Type()) err := ReadStmts(config.Type())
@ -76,16 +75,16 @@ func initialise(config database.Config, steps ...func(*sql.DB) error) error {
return err return err
} }
db, err := database.Connect(config, true) db, err := database.Connect(config, true, false)
if err != nil { if err != nil {
return err return err
} }
defer db.Close() defer db.Close()
return Init(db.DB, steps...) return Init(db, steps...)
} }
func Init(db *sql.DB, steps ...func(*sql.DB) error) error { func Init(db *database.DB, steps ...func(*database.DB) error) error {
for _, step := range steps { for _, step := range steps {
if err := step(db); err != nil { if err := step(db); err != nil {
return err return err

View File

@ -1,17 +1,17 @@
package initialise package initialise
import ( import (
"database/sql"
"database/sql/driver" "database/sql/driver"
"regexp" "regexp"
"testing" "testing"
"github.com/DATA-DOG/go-sqlmock" "github.com/DATA-DOG/go-sqlmock"
"github.com/zitadel/zitadel/internal/database"
) )
type db struct { type db struct {
mock sqlmock.Sqlmock mock sqlmock.Sqlmock
db *sql.DB db *database.DB
} }
func prepareDB(t *testing.T, expectations ...expectation) db { func prepareDB(t *testing.T, expectations ...expectation) db {
@ -25,7 +25,7 @@ func prepareDB(t *testing.T, expectations ...expectation) db {
} }
return db{ return db{
mock: mock, mock: mock,
db: client, db: &database.DB{DB: client},
} }
} }
@ -42,6 +42,20 @@ func expectExec(stmt string, err error, args ...driver.Value) expectation {
} }
} }
func expectQuery(stmt string, err error, columns []string, rows [][]driver.Value, args ...driver.Value) expectation {
return func(m sqlmock.Sqlmock) {
res := sqlmock.NewRows(columns)
for _, row := range rows {
res.AddRow(row...)
}
query := m.ExpectQuery(regexp.QuoteMeta(stmt)).WithArgs(args...).WillReturnRows(res)
if err != nil {
query.WillReturnError(err)
return
}
}
}
func expectBegin(err error) expectation { func expectBegin(err error) expectation {
return func(m sqlmock.Sqlmock) { return func(m sqlmock.Sqlmock) {
query := m.ExpectBegin() query := m.ExpectBegin()

View File

@ -1,3 +1,4 @@
-- replace the first %[1]s with the database -- replace the first %[1]s with the database
-- replace the second \%[2]s with the user -- replace the second \%[2]s with the user
GRANT ALL ON DATABASE %[1]s TO %[2]s GRANT ALL ON DATABASE %[1]s TO %[2]s;
GRANT SYSTEM VIEWACTIVITY TO %[2]s;

View File

@ -1,27 +1,21 @@
SET experimental_enable_hash_sharded_indexes = on; CREATE TABLE IF NOT EXISTS eventstore.events2 (
instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_type TEXT NOT NULL
, "sequence" BIGINT NOT NULL
, revision SMALLINT NOT NULL
, created_at TIMESTAMPTZ NOT NULL
, payload JSONB
, creator TEXT NOT NULL
, "owner" TEXT NOT NULL
, "position" DECIMAL NOT NULL
, in_tx_order INTEGER NOT NULL
CREATE TABLE IF NOT EXISTS eventstore.events ( , PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
id UUID DEFAULT gen_random_uuid() , INDEX es_active_instances (created_at DESC) STORING ("position")
, event_type TEXT NOT NULL , INDEX es_wm (aggregate_id, instance_id, aggregate_type, event_type)
, aggregate_type TEXT NOT NULL , INDEX es_projection (instance_id, aggregate_type, event_type, "position" DESC)
, aggregate_id TEXT NOT NULL );
, aggregate_version TEXT NOT NULL
, event_sequence BIGINT NOT NULL
, previous_aggregate_sequence BIGINT
, previous_aggregate_type_sequence INT8
, creation_date TIMESTAMPTZ NOT NULL DEFAULT now()
, event_data JSONB
, editor_user TEXT NOT NULL
, editor_service TEXT NOT NULL
, resource_owner TEXT NOT NULL
, instance_id TEXT NOT NULL
, PRIMARY KEY (event_sequence DESC, instance_id) USING HASH WITH BUCKET_COUNT = 10
, INDEX agg_type_agg_id (aggregate_type, aggregate_id, instance_id)
, INDEX agg_type (aggregate_type, instance_id)
, INDEX agg_type_seq (aggregate_type, event_sequence DESC, instance_id)
STORING (id, event_type, aggregate_id, aggregate_version, previous_aggregate_sequence, creation_date, event_data, editor_user, editor_service, resource_owner, previous_aggregate_type_sequence)
, INDEX max_sequence (aggregate_type, aggregate_id, event_sequence DESC, instance_id)
, CONSTRAINT previous_sequence_unique UNIQUE (previous_aggregate_sequence DESC, instance_id)
, CONSTRAINT prev_agg_type_seq_unique UNIQUE(previous_aggregate_type_sequence, instance_id)
);

View File

@ -1,25 +1,22 @@
CREATE TABLE IF NOT EXISTS eventstore.events ( CREATE TABLE IF NOT EXISTS eventstore.events2 (
id UUID DEFAULT gen_random_uuid() instance_id TEXT NOT NULL
, event_type TEXT NOT NULL , aggregate_type TEXT NOT NULL
, aggregate_type TEXT NOT NULL , aggregate_id TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, aggregate_version TEXT NOT NULL , event_type TEXT NOT NULL
, event_sequence BIGINT NOT NULL , "sequence" BIGINT NOT NULL
, previous_aggregate_sequence BIGINT , revision SMALLINT NOT NULL
, previous_aggregate_type_sequence INT8 , created_at TIMESTAMPTZ NOT NULL
, creation_date TIMESTAMPTZ NOT NULL DEFAULT now() , payload JSONB
, event_data JSONB , creator TEXT NOT NULL
, editor_user TEXT NOT NULL , "owner" TEXT NOT NULL
, editor_service TEXT NOT NULL
, resource_owner TEXT NOT NULL , "position" DECIMAL NOT NULL
, instance_id TEXT NOT NULL , in_tx_order INTEGER NOT NULL
, PRIMARY KEY (event_sequence, instance_id) , PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
, CONSTRAINT previous_sequence_unique UNIQUE(previous_aggregate_sequence, instance_id)
, CONSTRAINT prev_agg_type_seq_unique UNIQUE(previous_aggregate_type_sequence, instance_id)
); );
CREATE INDEX IF NOT EXISTS agg_type_agg_id ON eventstore.events (aggregate_type, aggregate_id, instance_id); CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC, instance_id);
CREATE INDEX IF NOT EXISTS agg_type ON eventstore.events (aggregate_type, instance_id); CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS agg_type_seq ON eventstore.events (aggregate_type, event_sequence DESC, instance_id); CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");
CREATE INDEX IF NOT EXISTS max_sequence ON eventstore.events (aggregate_type, aggregate_id, event_sequence DESC, instance_id);

View File

@ -1,13 +1,14 @@
package initialise package initialise
import ( import (
"database/sql"
_ "embed" _ "embed"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/zitadel/logging" "github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
) )
func newDatabase() *cobra.Command { func newDatabase() *cobra.Command {
@ -33,8 +34,8 @@ The user provided by flags needs priviledge to
} }
} }
func VerifyDatabase(databaseName string) func(*sql.DB) error { func VerifyDatabase(databaseName string) func(*database.DB) error {
return func(db *sql.DB) error { return func(db *database.DB) error {
logging.WithFields("database", databaseName).Info("verify database") logging.WithFields("database", databaseName).Info("verify database")
return exec(db, fmt.Sprintf(string(databaseStmt), databaseName), []string{dbAlreadyExistsCode}) return exec(db, fmt.Sprintf(string(databaseStmt), databaseName), []string{dbAlreadyExistsCode})

View File

@ -1,13 +1,14 @@
package initialise package initialise
import ( import (
"database/sql"
_ "embed" _ "embed"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/zitadel/logging" "github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
) )
func newGrant() *cobra.Command { func newGrant() *cobra.Command {
@ -28,8 +29,8 @@ Prereqesits:
} }
} }
func VerifyGrant(databaseName, username string) func(*sql.DB) error { func VerifyGrant(databaseName, username string) func(*database.DB) error {
return func(db *sql.DB) error { return func(db *database.DB) error {
logging.WithFields("user", username, "database", databaseName).Info("verify grant") logging.WithFields("user", username, "database", databaseName).Info("verify grant")
return exec(db, fmt.Sprintf(grantStmt, databaseName, username), nil) return exec(db, fmt.Sprintf(grantStmt, databaseName, username), nil)

View File

@ -1,13 +1,14 @@
package initialise package initialise
import ( import (
"database/sql"
_ "embed" _ "embed"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/zitadel/logging" "github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
) )
func newUser() *cobra.Command { func newUser() *cobra.Command {
@ -33,8 +34,8 @@ The user provided by flags needs priviledge to
} }
} }
func VerifyUser(username, password string) func(*sql.DB) error { func VerifyUser(username, password string) func(*database.DB) error {
return func(db *sql.DB) error { return func(db *database.DB) error {
logging.WithFields("username", username).Info("verify user") logging.WithFields("username", username).Info("verify user")
if password != "" { if password != "" {

View File

@ -1,14 +1,13 @@
package initialise package initialise
import ( import (
"database/sql"
_ "embed" _ "embed"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/zitadel/logging" "github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
) )
@ -29,58 +28,66 @@ Prereqesits:
} }
} }
func VerifyZitadel(db *sql.DB, config database.Config) error { func VerifyZitadel(db *database.DB, config database.Config) error {
err := ReadStmts(config.Type()) err := ReadStmts(config.Type())
if err != nil { if err != nil {
return err return err
} }
logging.WithFields().Info("verify system")
if err := exec(db, fmt.Sprintf(createSystemStmt, config.Username()), nil); err != nil { if err := exec(db, fmt.Sprintf(createSystemStmt, config.Username()), nil); err != nil {
return err return err
} }
logging.WithFields().Info("verify encryption keys")
if err := createEncryptionKeys(db); err != nil { if err := createEncryptionKeys(db); err != nil {
return err return err
} }
logging.WithFields().Info("verify projections")
if err := exec(db, fmt.Sprintf(createProjectionsStmt, config.Username()), nil); err != nil { if err := exec(db, fmt.Sprintf(createProjectionsStmt, config.Username()), nil); err != nil {
return err return err
} }
logging.WithFields().Info("verify eventstore")
if err := exec(db, fmt.Sprintf(createEventstoreStmt, config.Username()), nil); err != nil { if err := exec(db, fmt.Sprintf(createEventstoreStmt, config.Username()), nil); err != nil {
return err return err
} }
logging.WithFields().Info("verify events tables")
if err := createEvents(db); err != nil { if err := createEvents(db); err != nil {
return err return err
} }
logging.WithFields().Info("verify system sequence")
if err := exec(db, createSystemSequenceStmt, nil); err != nil { if err := exec(db, createSystemSequenceStmt, nil); err != nil {
return err return err
} }
logging.WithFields().Info("verify unique constraints")
if err := exec(db, createUniqueConstraints, nil); err != nil { if err := exec(db, createUniqueConstraints, nil); err != nil {
return err return err
} }
return nil return nil
} }
func verifyZitadel(config database.Config) error { func verifyZitadel(config database.Config) error {
logging.WithFields("database", config.DatabaseName()).Info("verify zitadel") logging.WithFields("database", config.DatabaseName()).Info("verify zitadel")
db, err := database.Connect(config, false) db, err := database.Connect(config, false, false)
if err != nil { if err != nil {
return err return err
} }
if err := VerifyZitadel(db.DB, config); err != nil { if err := VerifyZitadel(db, config); err != nil {
return err return err
} }
return db.Close() return db.Close()
} }
func createEncryptionKeys(db *sql.DB) error { func createEncryptionKeys(db *database.DB) error {
tx, err := db.Begin() tx, err := db.Begin()
if err != nil { if err != nil {
return err return err
@ -93,16 +100,29 @@ func createEncryptionKeys(db *sql.DB) error {
return tx.Commit() return tx.Commit()
} }
func createEvents(db *sql.DB) error { func createEvents(db *database.DB) (err error) {
tx, err := db.Begin() tx, err := db.Begin()
if err != nil { if err != nil {
return err return err
} }
defer func() {
if err != nil {
rollbackErr := tx.Rollback()
logging.OnError(rollbackErr).Debug("rollback failed")
return
}
err = tx.Commit()
}()
if _, err = tx.Exec(createEventsStmt); err != nil { // if events already exists events2 is created during a setup job
tx.Rollback() var count int
row := tx.QueryRow("SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'")
if err = row.Scan(&count); err != nil {
return err return err
} }
if row.Err() != nil || count >= 1 {
return tx.Commit() return row.Err()
}
_, err = tx.Exec(createEventsStmt)
return err
} }

View File

@ -2,6 +2,7 @@ package initialise
import ( import (
"database/sql" "database/sql"
"database/sql/driver"
"errors" "errors"
"testing" "testing"
) )
@ -30,11 +31,53 @@ func Test_verifyEvents(t *testing.T) {
}, },
targetErr: sql.ErrConnDone, targetErr: sql.ErrConnDone,
}, },
{
name: "events already exists",
args: args{
db: prepareDB(t,
expectBegin(nil),
expectQuery(
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
nil,
[]string{"count"},
[][]driver.Value{
{1},
},
),
expectCommit(nil),
),
},
},
{
name: "events and events2 already exists",
args: args{
db: prepareDB(t,
expectBegin(nil),
expectQuery(
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
nil,
[]string{"count"},
[][]driver.Value{
{2},
},
),
expectCommit(nil),
),
},
},
{ {
name: "create table fails", name: "create table fails",
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectBegin(nil), expectBegin(nil),
expectQuery(
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
nil,
[]string{"count"},
[][]driver.Value{
{0},
},
),
expectExec(createEventsStmt, sql.ErrNoRows), expectExec(createEventsStmt, sql.ErrNoRows),
expectRollback(nil), expectRollback(nil),
), ),
@ -46,6 +89,14 @@ func Test_verifyEvents(t *testing.T) {
args: args{ args: args{
db: prepareDB(t, db: prepareDB(t,
expectBegin(nil), expectBegin(nil),
expectQuery(
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events%'",
nil,
[]string{"count"},
[][]driver.Value{
{0},
},
),
expectExec(createEventsStmt, nil), expectExec(createEventsStmt, nil),
expectCommit(nil), expectCommit(nil),
), ),

View File

@ -124,7 +124,7 @@ func openFile(fileName string) (io.Reader, error) {
} }
func keyStorage(config database.Config, masterKey string) (crypto.KeyStorage, error) { func keyStorage(config database.Config, masterKey string) (crypto.KeyStorage, error) {
db, err := database.Connect(config, false) db, err := database.Connect(config, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -12,7 +12,7 @@ CREATE TABLE adminapi.locks (
CREATE TABLE adminapi.current_sequences ( CREATE TABLE adminapi.current_sequences (
view_name TEXT, view_name TEXT,
current_sequence BIGINT, current_sequence BIGINT,
event_timestamp TIMESTAMPTZ, event_date TIMESTAMPTZ,
last_successful_spooler_run TIMESTAMPTZ, last_successful_spooler_run TIMESTAMPTZ,
instance_id TEXT NOT NULL, instance_id TEXT NOT NULL,

View File

@ -12,7 +12,7 @@ CREATE TABLE auth.locks (
CREATE TABLE auth.current_sequences ( CREATE TABLE auth.current_sequences (
view_name TEXT, view_name TEXT,
current_sequence BIGINT, current_sequence BIGINT,
event_timestamp TIMESTAMPTZ, event_date TIMESTAMPTZ,
last_successful_spooler_run TIMESTAMPTZ, last_successful_spooler_run TIMESTAMPTZ,
instance_id TEXT NOT NULL, instance_id TEXT NOT NULL,

View File

@ -1,23 +0,0 @@
package setup
import (
"embed"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 04/cockroach/index.sql
//go:embed 04/postgres/index.sql
stmts04 embed.FS
)
func New04(db *database.DB) *EventstoreIndexesNew {
return &EventstoreIndexesNew{
dbClient: db,
name: "04_eventstore_indexes",
step: "04",
fileName: "index.sql",
stmts: stmts04,
}
}

View File

@ -1,4 +0,0 @@
CREATE INDEX IF NOT EXISTS write_model ON eventstore.events (instance_id, aggregate_type, aggregate_id, event_type, resource_owner)
STORING (id, aggregate_version, previous_aggregate_sequence, creation_date, event_data, editor_user, editor_service, previous_aggregate_type_sequence);
CREATE INDEX IF NOT EXISTS active_instances ON eventstore.events (creation_date desc, instance_id) USING HASH;

View File

@ -1,3 +0,0 @@
CREATE INDEX IF NOT EXISTS write_model ON eventstore.events (instance_id, aggregate_type, aggregate_id, event_type, resource_owner);
CREATE INDEX IF NOT EXISTS active_instances ON eventstore.events (creation_date, instance_id);

View File

@ -1,23 +0,0 @@
package setup
import (
"embed"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 09/cockroach/index.sql
//go:embed 09/postgres/index.sql
stmts09 embed.FS
)
func New09(db *database.DB) *EventstoreIndexesNew {
return &EventstoreIndexesNew{
dbClient: db,
name: "09_optimise_indexes",
step: "09",
fileName: "index.sql",
stmts: stmts09,
}
}

View File

@ -1,51 +0,0 @@
-- replace agg_type_agg_id
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type_agg_id;
COMMIT;
BEGIN;
CREATE INDEX agg_type_agg_id ON eventstore.events (
instance_id
, aggregate_type
, aggregate_id
) STORING (
event_type
, aggregate_version
, previous_aggregate_sequence
, previous_aggregate_type_sequence
, creation_date
, event_data
, editor_user
, editor_service
, resource_owner
);
COMMIT;
-- replace agg_type
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type;
COMMIT;
BEGIN;
CREATE INDEX agg_type ON eventstore.events (
instance_id
, aggregate_type
, event_sequence
) STORING (
event_type
, aggregate_id
, aggregate_version
, previous_aggregate_sequence
, previous_aggregate_type_sequence
, creation_date
, event_data
, editor_user
, editor_service
, resource_owner
);
COMMIT;
-- drop unused index
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type_seq;
COMMIT;

View File

@ -1,30 +0,0 @@
-- replace agg_type_agg_id
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type_agg_id;
COMMIT;
BEGIN;
CREATE INDEX agg_type_agg_id ON eventstore.events (
instance_id
, aggregate_type
, aggregate_id
);
COMMIT;
-- replace agg_type
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type;
COMMIT;
BEGIN;
CREATE INDEX agg_type ON eventstore.events (
instance_id
, aggregate_type
, event_sequence
);
COMMIT;
-- drop unused index
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type_seq;
COMMIT;

View File

@ -3,7 +3,7 @@ package setup
import ( import (
"context" "context"
"database/sql" "database/sql"
_ "embed" "embed"
"time" "time"
"github.com/cockroachdb/cockroach-go/v2/crdb" "github.com/cockroachdb/cockroach-go/v2/crdb"
@ -17,8 +17,9 @@ var (
correctCreationDate10CreateTable string correctCreationDate10CreateTable string
//go:embed 10/10_fill_table.sql //go:embed 10/10_fill_table.sql
correctCreationDate10FillTable string correctCreationDate10FillTable string
//go:embed 10/10_update.sql //go:embed 10/cockroach/10_update.sql
correctCreationDate10Update string //go:embed 10/postgres/10_update.sql
correctCreationDate10Update embed.FS
//go:embed 10/10_count_wrong_events.sql //go:embed 10/10_count_wrong_events.sql
correctCreationDate10CountWrongEvents string correctCreationDate10CountWrongEvents string
//go:embed 10/10_empty_table.sql //go:embed 10/10_empty_table.sql
@ -34,7 +35,8 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
ctx, cancel := context.WithTimeout(ctx, mig.FailAfter) ctx, cancel := context.WithTimeout(ctx, mig.FailAfter)
defer cancel() defer cancel()
for { for i := 0; ; i++ {
logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration")
var affected int64 var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error { err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
if mig.dbClient.Type() == "cockroach" { if mig.dbClient.Type() == "cockroach" {
@ -46,6 +48,7 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
if err != nil { if err != nil {
return err return err
} }
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table created")
_, err = tx.ExecContext(ctx, correctCreationDate10Truncate) _, err = tx.ExecContext(ctx, correctCreationDate10Truncate)
if err != nil { if err != nil {
@ -55,19 +58,25 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
if err != nil { if err != nil {
return err return err
} }
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table filled")
res := tx.QueryRowContext(ctx, correctCreationDate10CountWrongEvents) res := tx.QueryRowContext(ctx, correctCreationDate10CountWrongEvents)
if err := res.Scan(&affected); err != nil || affected == 0 { if err := res.Scan(&affected); err != nil || affected == 0 {
return err return err
} }
_, err = tx.ExecContext(ctx, correctCreationDate10Update) updateStmt, err := readStmt(correctCreationDate10Update, "10", mig.dbClient.Type(), "10_update.sql")
if err != nil { if err != nil {
return err return err
} }
logging.WithFields("count", affected).Info("creation dates changed") _, err = tx.ExecContext(ctx, updateStmt)
if err != nil {
return err
}
logging.WithFields("mig", mig.String(), "iteration", i, "count", affected).Debug("creation dates updated")
return nil return nil
}) })
logging.WithFields("mig", mig.String(), "iteration", i).Debug("end iteration")
if affected == 0 || err != nil { if affected == 0 || err != nil {
return err return err
} }

View File

@ -1,4 +1,4 @@
CREATE temporary TABLE IF NOT EXISTS wrong_events ( CREATE TEMPORARY TABLE IF NOT EXISTS wrong_events (
instance_id TEXT instance_id TEXT
, event_sequence BIGINT , event_sequence BIGINT
, current_cd TIMESTAMPTZ , current_cd TIMESTAMPTZ

View File

@ -10,6 +10,8 @@ INSERT INTO wrong_events (
) AS next_cd ) AS next_cd
FROM FROM
eventstore.events eventstore.events
WHERE
"position" IS NULL
) sub WHERE ) sub WHERE
current_cd < next_cd current_cd < next_cd
ORDER BY ORDER BY

View File

@ -1 +0,0 @@
UPDATE eventstore.events e SET creation_date = we.next_cd FROM wrong_events we WHERE e.event_sequence = we.event_sequence and e.instance_id = we.instance_id;

View File

@ -0,0 +1 @@
UPDATE eventstore.events e SET (creation_date, "position") = (we.next_cd, we.next_cd::DECIMAL) FROM wrong_events we WHERE e.event_sequence = we.event_sequence AND e.instance_id = we.instance_id;

View File

@ -0,0 +1,10 @@
UPDATE
eventstore.events e
SET
creation_date = we.next_cd
, "position" = (EXTRACT(EPOCH FROM we.next_cd))
FROM
wrong_events we
WHERE
e.event_sequence = we.event_sequence
AND e.instance_id = we.instance_id;

View File

@ -1,92 +0,0 @@
package setup
import (
"context"
"database/sql"
"embed"
"github.com/cockroachdb/cockroach-go/v2/crdb"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 11/11_add_column.sql
addEventCreatedAt string
//go:embed 11/11_update_events.sql
setCreatedAt string
//go:embed 11/11_set_column.sql
setCreatedAtDetails string
//go:embed 11/postgres/create_index.sql
//go:embed 11/cockroach/create_index.sql
createdAtIndexCreateStmt embed.FS
//go:embed 11/postgres/drop_index.sql
//go:embed 11/cockroach/drop_index.sql
createdAtIndexDropStmt embed.FS
)
type AddEventCreatedAt struct {
BulkAmount int
step10 *CorrectCreationDate
dbClient *database.DB
}
func (mig *AddEventCreatedAt) Execute(ctx context.Context) error {
// execute step 10 again because events created after the first execution of step 10
// could still have the wrong ordering of sequences and creation date
if err := mig.step10.Execute(ctx); err != nil {
return err
}
_, err := mig.dbClient.ExecContext(ctx, addEventCreatedAt)
if err != nil {
return err
}
createIndex, err := readStmt(createdAtIndexCreateStmt, "11", mig.dbClient.Type(), "create_index.sql")
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, createIndex)
if err != nil {
return err
}
for i := 0; ; i++ {
var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
res, err := tx.Exec(setCreatedAt, mig.BulkAmount)
if err != nil {
return err
}
affected, _ = res.RowsAffected()
return nil
})
if err != nil {
return err
}
logging.WithFields("step", "11", "iteration", i, "affected", affected).Info("set created_at iteration done")
if affected < int64(mig.BulkAmount) {
break
}
}
logging.Info("set details")
_, err = mig.dbClient.ExecContext(ctx, setCreatedAtDetails)
if err != nil {
return err
}
dropIndex, err := readStmt(createdAtIndexDropStmt, "11", mig.dbClient.Type(), "drop_index.sql")
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, dropIndex)
return err
}
func (mig *AddEventCreatedAt) String() string {
return "11_event_created_at"
}

View File

@ -1,6 +0,0 @@
BEGIN;
-- create table with empty created_at
ALTER TABLE eventstore.events ADD COLUMN IF NOT EXISTS created_at TIMESTAMPTZ DEFAULT NULL;
-- set column rules
ALTER TABLE eventstore.events ALTER COLUMN created_at SET DEFAULT clock_timestamp();
COMMIT;

View File

@ -1,3 +0,0 @@
BEGIN;
ALTER TABLE eventstore.events ALTER COLUMN created_at SET NOT NULL;
COMMIT;

View File

@ -1,21 +0,0 @@
UPDATE eventstore.events SET
created_at = creation_date
FROM (
SELECT
e.event_sequence as seq
, e.instance_id as i_id
, e.creation_date as cd
FROM
eventstore.events e
WHERE
created_at IS NULL
ORDER BY
event_sequence ASC
, instance_id
LIMIT $1
) AS e
WHERE
e.seq = eventstore.events.event_sequence
AND e.i_id = eventstore.events.instance_id
AND e.cd = eventstore.events.creation_date
;

View File

@ -1,8 +0,0 @@
CREATE INDEX IF NOT EXISTS ca_fill_idx ON eventstore.events (
event_sequence DESC
, instance_id
) STORING (
id
, creation_date
, created_at
) WHERE created_at IS NULL;

View File

@ -1 +0,0 @@
DROP INDEX IF EXISTS eventstore.events@ca_fill_idx;

View File

@ -1,4 +0,0 @@
CREATE INDEX IF NOT EXISTS ca_fill_idx ON eventstore.events (
event_sequence DESC
, instance_id
) WHERE created_at IS NULL;

View File

@ -1 +0,0 @@
DROP INDEX IF EXISTS eventstore.ca_fill_idx;

72
cmd/setup/14.go Normal file
View File

@ -0,0 +1,72 @@
package setup
import (
"context"
"database/sql"
"embed"
"errors"
"strings"
"github.com/jackc/pgconn"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 14/cockroach/*.sql
//go:embed 14/postgres/*.sql
newEventsTable embed.FS
)
type NewEventsTable struct {
dbClient *database.DB
}
func (mig *NewEventsTable) Execute(ctx context.Context) error {
migrations, err := newEventsTable.ReadDir("14/" + mig.dbClient.Type())
if err != nil {
return err
}
// if events already exists events2 is created during a setup job
var count int
err = mig.dbClient.QueryRow(
func(row *sql.Row) error {
if err = row.Scan(&count); err != nil {
return err
}
return row.Err()
},
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events2'",
)
if err != nil || count == 1 {
return err
}
for _, migration := range migrations {
stmt, err := readStmt(newEventsTable, "14", mig.dbClient.Type(), migration.Name())
if err != nil {
return err
}
stmt = strings.ReplaceAll(stmt, "{{.username}}", mig.dbClient.Username())
logging.WithFields("migration", mig.String(), "file", migration.Name()).Debug("execute statement")
_, err = mig.dbClient.ExecContext(ctx, stmt)
if err != nil {
return err
}
}
return nil
}
func (mig *NewEventsTable) String() string {
return "14_events_push"
}
func (mig *NewEventsTable) ContinueOnErr(err error) bool {
pgErr := new(pgconn.PgError)
if errors.As(err, &pgErr) {
return pgErr.Code == "42P01"
}
return false
}

View File

@ -0,0 +1 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@ -0,0 +1,33 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order,
PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
creation_date::DECIMAL,
event_sequence
FROM eventstore.events_old;

View File

@ -0,0 +1,7 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN revision SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN created_at SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN creator SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "owner" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "position" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN in_tx_order SET NOT NULL;

View File

@ -0,0 +1,3 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC) STORING ("position");
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

View File

@ -0,0 +1 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@ -0,0 +1,31 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
EXTRACT(EPOCH FROM creation_date),
event_sequence
FROM eventstore.events_old;

View File

@ -0,0 +1,4 @@
BEGIN;
ALTER TABLE eventstore.events2 DROP CONSTRAINT IF EXISTS events2_pkey;
ALTER TABLE eventstore.events2 ADD PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence");
COMMIT;

View File

@ -0,0 +1,7 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL,
ALTER COLUMN revision SET NOT NULL,
ALTER COLUMN created_at SET NOT NULL,
ALTER COLUMN creator SET NOT NULL,
ALTER COLUMN "owner" SET NOT NULL,
ALTER COLUMN "position" SET NOT NULL,
ALTER COLUMN in_tx_order SET NOT NULL;

View File

@ -0,0 +1,3 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC, instance_id);
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

45
cmd/setup/15.go Normal file
View File

@ -0,0 +1,45 @@
package setup
import (
"context"
"embed"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 15/cockroach/*.sql
//go:embed 15/postgres/*.sql
currentProjectionState embed.FS
)
type CurrentProjectionState struct {
dbClient *database.DB
}
func (mig *CurrentProjectionState) Execute(ctx context.Context) error {
migrations, err := currentProjectionState.ReadDir("15/" + mig.dbClient.Type())
if err != nil {
return err
}
for _, migration := range migrations {
stmt, err := readStmt(currentProjectionState, "15", mig.dbClient.Type(), migration.Name())
if err != nil {
return err
}
logging.WithFields("file", migration.Name(), "migration", mig.String()).Info("execute statement")
_, err = mig.dbClient.ExecContext(ctx, stmt)
if err != nil {
return err
}
}
return nil
}
func (mig *CurrentProjectionState) String() string {
return "15_current_projection_state"
}

View File

@ -0,0 +1,16 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@ -0,0 +1,29 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,16 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,27 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@ -0,0 +1,27 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@ -9,6 +9,8 @@ import (
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/migration" "github.com/zitadel/zitadel/internal/migration"
) )
@ -29,11 +31,14 @@ func Cleanup(config *Config) {
logging.Info("cleanup started") logging.Info("cleanup started")
dbClient, err := database.Connect(config.Database, false) zitadelDBClient, err := database.Connect(config.Database, false, false)
logging.OnError(err).Fatal("unable to connect to database")
esPusherDBClient, err := database.Connect(config.Database, false, true)
logging.OnError(err).Fatal("unable to connect to database") logging.OnError(err).Fatal("unable to connect to database")
es, err := eventstore.Start(&eventstore.Config{Client: dbClient}) config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
logging.OnError(err).Fatal("unable to start eventstore") config.Eventstore.Querier = old_es.NewCRDB(zitadelDBClient)
es := eventstore.NewEventstore(config.Eventstore)
migration.RegisterMappers(es) migration.RegisterMappers(es)
step, err := migration.LatestStep(ctx, es) step, err := migration.LatestStep(ctx, es)

View File

@ -15,6 +15,7 @@ import (
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/id" "github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/query/projection" "github.com/zitadel/zitadel/internal/query/projection"
) )
@ -31,6 +32,7 @@ type Config struct {
DefaultInstance command.InstanceSetup DefaultInstance command.InstanceSetup
Machine *id.Config Machine *id.Config
Projections projection.Config Projections projection.Config
Eventstore *eventstore.Config
} }
func MustNewConfig(v *viper.Viper) *Config { func MustNewConfig(v *viper.Viper) *Config {
@ -60,16 +62,15 @@ type Steps struct {
s1ProjectionTable *ProjectionTable s1ProjectionTable *ProjectionTable
s2AssetsTable *AssetTable s2AssetsTable *AssetTable
FirstInstance *FirstInstance FirstInstance *FirstInstance
s4EventstoreIndexes *EventstoreIndexesNew
s5LastFailed *LastFailed s5LastFailed *LastFailed
s6OwnerRemoveColumns *OwnerRemoveColumns s6OwnerRemoveColumns *OwnerRemoveColumns
s7LogstoreTables *LogstoreTables s7LogstoreTables *LogstoreTables
s8AuthTokens *AuthTokenIndexes s8AuthTokens *AuthTokenIndexes
s9EventstoreIndexes2 *EventstoreIndexesNew
CorrectCreationDate *CorrectCreationDate CorrectCreationDate *CorrectCreationDate
AddEventCreatedAt *AddEventCreatedAt
s12AddOTPColumns *AddOTPColumns s12AddOTPColumns *AddOTPColumns
s13FixQuotaProjection *FixQuotaConstraints s13FixQuotaProjection *FixQuotaConstraints
s14NewEventsTable *NewEventsTable
s15CurrentStates *CurrentProjectionState
} }
type encryptionKeyConfig struct { type encryptionKeyConfig struct {

View File

@ -1,29 +0,0 @@
package setup
import (
"context"
"embed"
"github.com/zitadel/zitadel/internal/database"
)
type EventstoreIndexesNew struct {
dbClient *database.DB
name string
step string
fileName string
stmts embed.FS
}
func (mig *EventstoreIndexesNew) Execute(ctx context.Context) error {
stmt, err := readStmt(mig.stmts, mig.step, mig.dbClient.Type(), mig.fileName)
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, stmt)
return err
}
func (mig *EventstoreIndexesNew) String() string {
return mig.name
}

View File

@ -14,6 +14,8 @@ import (
"github.com/zitadel/zitadel/cmd/tls" "github.com/zitadel/zitadel/cmd/tls"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/migration" "github.com/zitadel/zitadel/internal/migration"
"github.com/zitadel/zitadel/internal/query/projection" "github.com/zitadel/zitadel/internal/query/projection"
) )
@ -62,22 +64,26 @@ func Setup(config *Config, steps *Steps, masterKey string) {
ctx := context.Background() ctx := context.Background()
logging.Info("setup started") logging.Info("setup started")
dbClient, err := database.Connect(config.Database, false) zitadelDBClient, err := database.Connect(config.Database, false, false)
logging.OnError(err).Fatal("unable to connect to database")
esPusherDBClient, err := database.Connect(config.Database, false, true)
logging.OnError(err).Fatal("unable to connect to database") logging.OnError(err).Fatal("unable to connect to database")
eventstoreClient, err := eventstore.Start(&eventstore.Config{Client: dbClient}) config.Eventstore.Querier = old_es.NewCRDB(zitadelDBClient)
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
logging.OnError(err).Fatal("unable to start eventstore") logging.OnError(err).Fatal("unable to start eventstore")
migration.RegisterMappers(eventstoreClient) migration.RegisterMappers(eventstoreClient)
steps.s1ProjectionTable = &ProjectionTable{dbClient: dbClient.DB} steps.s1ProjectionTable = &ProjectionTable{dbClient: zitadelDBClient.DB}
steps.s2AssetsTable = &AssetTable{dbClient: dbClient.DB} steps.s2AssetsTable = &AssetTable{dbClient: zitadelDBClient.DB}
steps.FirstInstance.instanceSetup = config.DefaultInstance steps.FirstInstance.instanceSetup = config.DefaultInstance
steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User
steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP
steps.FirstInstance.oidcEncryptionKey = config.EncryptionKeys.OIDC steps.FirstInstance.oidcEncryptionKey = config.EncryptionKeys.OIDC
steps.FirstInstance.masterKey = masterKey steps.FirstInstance.masterKey = masterKey
steps.FirstInstance.db = dbClient steps.FirstInstance.db = zitadelDBClient
steps.FirstInstance.es = eventstoreClient steps.FirstInstance.es = eventstoreClient
steps.FirstInstance.defaults = config.SystemDefaults steps.FirstInstance.defaults = config.SystemDefaults
steps.FirstInstance.zitadelRoles = config.InternalAuthZ.RolePermissionMappings steps.FirstInstance.zitadelRoles = config.InternalAuthZ.RolePermissionMappings
@ -85,19 +91,17 @@ func Setup(config *Config, steps *Steps, masterKey string) {
steps.FirstInstance.externalSecure = config.ExternalSecure steps.FirstInstance.externalSecure = config.ExternalSecure
steps.FirstInstance.externalPort = config.ExternalPort steps.FirstInstance.externalPort = config.ExternalPort
steps.s4EventstoreIndexes = New04(dbClient) steps.s5LastFailed = &LastFailed{dbClient: zitadelDBClient.DB}
steps.s5LastFailed = &LastFailed{dbClient: dbClient.DB} steps.s6OwnerRemoveColumns = &OwnerRemoveColumns{dbClient: zitadelDBClient.DB}
steps.s6OwnerRemoveColumns = &OwnerRemoveColumns{dbClient: dbClient.DB} steps.s7LogstoreTables = &LogstoreTables{dbClient: zitadelDBClient.DB, username: config.Database.Username(), dbType: config.Database.Type()}
steps.s7LogstoreTables = &LogstoreTables{dbClient: dbClient.DB, username: config.Database.Username(), dbType: config.Database.Type()} steps.s8AuthTokens = &AuthTokenIndexes{dbClient: zitadelDBClient}
steps.s8AuthTokens = &AuthTokenIndexes{dbClient: dbClient} steps.CorrectCreationDate.dbClient = esPusherDBClient
steps.s9EventstoreIndexes2 = New09(dbClient) steps.s12AddOTPColumns = &AddOTPColumns{dbClient: zitadelDBClient}
steps.CorrectCreationDate.dbClient = dbClient steps.s13FixQuotaProjection = &FixQuotaConstraints{dbClient: zitadelDBClient}
steps.AddEventCreatedAt.dbClient = dbClient steps.s14NewEventsTable = &NewEventsTable{dbClient: esPusherDBClient}
steps.AddEventCreatedAt.step10 = steps.CorrectCreationDate steps.s15CurrentStates = &CurrentProjectionState{dbClient: zitadelDBClient}
steps.s12AddOTPColumns = &AddOTPColumns{dbClient: dbClient}
steps.s13FixQuotaProjection = &FixQuotaConstraints{dbClient: dbClient}
err = projection.Create(ctx, dbClient, eventstoreClient, config.Projections, nil, nil) err = projection.Create(ctx, zitadelDBClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections") logging.OnError(err).Fatal("unable to start projections")
repeatableSteps := []migration.RepeatableMigration{ repeatableSteps := []migration.RepeatableMigration{
@ -114,32 +118,28 @@ func Setup(config *Config, steps *Steps, masterKey string) {
}, },
} }
err = migration.Migrate(ctx, eventstoreClient, steps.s14NewEventsTable)
logging.WithFields("name", steps.s14NewEventsTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s1ProjectionTable) err = migration.Migrate(ctx, eventstoreClient, steps.s1ProjectionTable)
logging.OnError(err).Fatal("unable to migrate step 1") logging.WithFields("name", steps.s1ProjectionTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s2AssetsTable) err = migration.Migrate(ctx, eventstoreClient, steps.s2AssetsTable)
logging.OnError(err).Fatal("unable to migrate step 2") logging.WithFields("name", steps.s2AssetsTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.FirstInstance) err = migration.Migrate(ctx, eventstoreClient, steps.FirstInstance)
logging.OnError(err).Fatal("unable to migrate step 3") logging.WithFields("name", steps.FirstInstance.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s4EventstoreIndexes)
logging.OnError(err).Fatal("unable to migrate step 4")
err = migration.Migrate(ctx, eventstoreClient, steps.s5LastFailed) err = migration.Migrate(ctx, eventstoreClient, steps.s5LastFailed)
logging.OnError(err).Fatal("unable to migrate step 5") logging.WithFields("name", steps.s5LastFailed.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s6OwnerRemoveColumns) err = migration.Migrate(ctx, eventstoreClient, steps.s6OwnerRemoveColumns)
logging.OnError(err).Fatal("unable to migrate step 6") logging.WithFields("name", steps.s6OwnerRemoveColumns.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s7LogstoreTables) err = migration.Migrate(ctx, eventstoreClient, steps.s7LogstoreTables)
logging.OnError(err).Fatal("unable to migrate step 7") logging.WithFields("name", steps.s7LogstoreTables.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s8AuthTokens) err = migration.Migrate(ctx, eventstoreClient, steps.s8AuthTokens)
logging.OnError(err).Fatal("unable to migrate step 8") logging.WithFields("name", steps.s8AuthTokens.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s9EventstoreIndexes2)
logging.OnError(err).Fatal("unable to migrate step 9")
err = migration.Migrate(ctx, eventstoreClient, steps.CorrectCreationDate)
logging.OnError(err).Fatal("unable to migrate step 10")
err = migration.Migrate(ctx, eventstoreClient, steps.AddEventCreatedAt)
logging.OnError(err).Fatal("unable to migrate step 11")
err = migration.Migrate(ctx, eventstoreClient, steps.s12AddOTPColumns) err = migration.Migrate(ctx, eventstoreClient, steps.s12AddOTPColumns)
logging.OnError(err).Fatal("unable to migrate step 12") logging.WithFields("name", steps.s12AddOTPColumns.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s13FixQuotaProjection) err = migration.Migrate(ctx, eventstoreClient, steps.s13FixQuotaProjection)
logging.OnError(err).Fatal("unable to migrate step 13") logging.WithFields("name", steps.s13FixQuotaProjection.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s15CurrentStates)
logging.WithFields("name", steps.s15CurrentStates.String()).OnError(err).Fatal("migration failed")
for _, repeatableStep := range repeatableSteps { for _, repeatableStep := range repeatableSteps {
err = migration.Migrate(ctx, eventstoreClient, repeatableStep) err = migration.Migrate(ctx, eventstoreClient, repeatableStep)

View File

@ -60,6 +60,8 @@ import (
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/id" "github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/logstore" "github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/logstore/emitters/access" "github.com/zitadel/zitadel/internal/logstore/emitters/access"
@ -121,12 +123,16 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
ctx := context.Background() ctx := context.Background()
dbClient, err := database.Connect(config.Database, false) zitadelDBClient, err := database.Connect(config.Database, false, false)
if err != nil {
return fmt.Errorf("cannot start client for projection: %w", err)
}
esPusherDBClient, err := database.Connect(config.Database, false, true)
if err != nil { if err != nil {
return fmt.Errorf("cannot start client for projection: %w", err) return fmt.Errorf("cannot start client for projection: %w", err)
} }
keyStorage, err := cryptoDB.NewKeyStorage(dbClient, masterKey) keyStorage, err := cryptoDB.NewKeyStorage(zitadelDBClient, masterKey)
if err != nil { if err != nil {
return fmt.Errorf("cannot start key storage: %w", err) return fmt.Errorf("cannot start key storage: %w", err)
} }
@ -135,18 +141,16 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
return err return err
} }
config.Eventstore.Client = dbClient config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
eventstoreClient, err := eventstore.Start(config.Eventstore) config.Eventstore.Querier = old_es.NewCRDB(zitadelDBClient)
if err != nil { eventstoreClient := eventstore.NewEventstore(config.Eventstore)
return fmt.Errorf("cannot start eventstore for queries: %w", err)
}
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
queries, err := query.StartQueries( queries, err := query.StartQueries(
ctx, ctx,
eventstoreClient, eventstoreClient,
dbClient, zitadelDBClient,
config.Projections, config.Projections,
config.SystemDefaults, config.SystemDefaults,
keys.IDPConfig, keys.IDPConfig,
@ -160,12 +164,13 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
} }
}, },
config.SystemAPIUsers,
) )
if err != nil { if err != nil {
return fmt.Errorf("cannot start queries: %w", err) return fmt.Errorf("cannot start queries: %w", err)
} }
authZRepo, err := authz.Start(queries, dbClient, keys.OIDC, config.ExternalSecure, config.Eventstore.AllowOrderByCreationDate) authZRepo, err := authz.Start(queries, eventstoreClient, zitadelDBClient, keys.OIDC, config.ExternalSecure)
if err != nil { if err != nil {
return fmt.Errorf("error starting authz repo: %w", err) return fmt.Errorf("error starting authz repo: %w", err)
} }
@ -173,7 +178,7 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
} }
storage, err := config.AssetStorage.NewStorage(dbClient.DB) storage, err := config.AssetStorage.NewStorage(zitadelDBClient.DB)
if err != nil { if err != nil {
return fmt.Errorf("cannot start asset storage client: %w", err) return fmt.Errorf("cannot start asset storage client: %w", err)
} }
@ -215,7 +220,7 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
if err != nil { if err != nil {
return err return err
} }
actionsExecutionDBEmitter, err := logstore.NewEmitter[*record.ExecutionLog](ctx, clock, config.Quotas.Execution, execution.NewDatabaseLogStorage(dbClient, commands, queries)) actionsExecutionDBEmitter, err := logstore.NewEmitter[*record.ExecutionLog](ctx, clock, config.Quotas.Execution, execution.NewDatabaseLogStorage(zitadelDBClient, commands, queries))
if err != nil { if err != nil {
return err return err
} }
@ -254,7 +259,7 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
commands, commands,
queries, queries,
eventstoreClient, eventstoreClient,
dbClient, zitadelDBClient,
config, config,
storage, storage,
authZRepo, authZRepo,
@ -271,7 +276,7 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
if server != nil { if server != nil {
server <- &Server{ server <- &Server{
Config: config, Config: config,
DB: dbClient, DB: zitadelDBClient,
KeyStorage: keyStorage, KeyStorage: keyStorage,
Keys: keys, Keys: keys,
Eventstore: eventstoreClient, Eventstore: eventstoreClient,
@ -338,18 +343,25 @@ func startAPIs(
if err != nil { if err != nil {
return fmt.Errorf("error creating api %w", err) return fmt.Errorf("error creating api %w", err)
} }
authRepo, err := auth_es.Start(ctx, config.Auth, config.SystemDefaults, commands, queries, dbClient, eventstore, keys.OIDC, keys.User, config.Eventstore.AllowOrderByCreationDate)
config.Auth.Spooler.Client = dbClient
config.Auth.Spooler.Eventstore = eventstore
authRepo, err := auth_es.Start(ctx, config.Auth, config.SystemDefaults, commands, queries, dbClient, eventstore, keys.OIDC, keys.User)
if err != nil { if err != nil {
return fmt.Errorf("error starting auth repo: %w", err) return fmt.Errorf("error starting auth repo: %w", err)
} }
adminRepo, err := admin_es.Start(ctx, config.Admin, store, dbClient, eventstore, config.Eventstore.AllowOrderByCreationDate)
config.Admin.Spooler.Client = dbClient
config.Admin.Spooler.Eventstore = eventstore
err = admin_es.Start(ctx, config.Admin, store, dbClient)
if err != nil { if err != nil {
return fmt.Errorf("error starting admin repo: %w", err) return fmt.Errorf("error starting admin repo: %w", err)
} }
if err := apis.RegisterServer(ctx, system.CreateServer(commands, queries, adminRepo, config.Database.DatabaseName(), config.DefaultInstance, config.ExternalDomain)); err != nil {
if err := apis.RegisterServer(ctx, system.CreateServer(commands, queries, config.Database.DatabaseName(), config.DefaultInstance, config.ExternalDomain)); err != nil {
return err return err
} }
if err := apis.RegisterServer(ctx, admin.CreateServer(config.Database.DatabaseName(), commands, queries, config.SystemDefaults, adminRepo, config.ExternalSecure, keys.User, config.AuditLogRetention)); err != nil { if err := apis.RegisterServer(ctx, admin.CreateServer(config.Database.DatabaseName(), commands, queries, config.SystemDefaults, config.ExternalSecure, keys.User, config.AuditLogRetention)); err != nil {
return err return err
} }
if err := apis.RegisterServer(ctx, management.CreateServer(commands, queries, config.SystemDefaults, keys.User, config.ExternalSecure, config.AuditLogRetention)); err != nil { if err := apis.RegisterServer(ctx, management.CreateServer(commands, queries, config.SystemDefaults, keys.User, config.ExternalSecure, config.AuditLogRetention)); err != nil {

View File

@ -306,6 +306,7 @@ export interface OnboardingActions {
iconClasses?: string; iconClasses?: string;
darkcolor: string; darkcolor: string;
lightcolor: string; lightcolor: string;
aggregateType: string;
} }
type OnboardingEvent = { type OnboardingEvent = {
@ -330,7 +331,12 @@ export class AdminService {
tap(() => this.onboardingLoading.next(true)), tap(() => this.onboardingLoading.next(true)),
switchMap((actions) => { switchMap((actions) => {
const searchForTypes = actions.map((oe) => oe.oneof).flat(); const searchForTypes = actions.map((oe) => oe.oneof).flat();
const eventsReq = new ListEventsRequest().setAsc(true).setEventTypesList(searchForTypes).setAsc(false); const aggregateTypes = actions.map((oe) => oe.aggregateType);
const eventsReq = new ListEventsRequest()
.setAsc(true)
.setEventTypesList(searchForTypes)
.setAggregateTypesList(aggregateTypes)
.setAsc(false);
return from(this.listEvents(eventsReq)).pipe( return from(this.listEvents(eventsReq)).pipe(
map((events) => { map((events) => {
const el = events.toObject().eventsList.filter((e) => e.editor?.service !== 'System-API' && e.editor?.userId); const el = events.toObject().eventsList.filter((e) => e.editor?.service !== 'System-API' && e.editor?.userId);

View File

@ -28,6 +28,7 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'las la-database', iconClasses: 'las la-database',
darkcolor: greendark, darkcolor: greendark,
lightcolor: greenlight, lightcolor: greenlight,
aggregateType: 'project',
}, },
{ {
order: 1, order: 1,
@ -37,6 +38,7 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'lab la-openid', iconClasses: 'lab la-openid',
darkcolor: purpledark, darkcolor: purpledark,
lightcolor: purplelight, lightcolor: purplelight,
aggregateType: 'project',
}, },
{ {
order: 2, order: 2,
@ -46,6 +48,7 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'las la-user', iconClasses: 'las la-user',
darkcolor: bluedark, darkcolor: bluedark,
lightcolor: bluelight, lightcolor: bluelight,
aggregateType: 'user',
}, },
{ {
order: 3, order: 3,
@ -55,6 +58,7 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'las la-shield-alt', iconClasses: 'las la-shield-alt',
darkcolor: reddark, darkcolor: reddark,
lightcolor: redlight, lightcolor: redlight,
aggregateType: 'user_grant',
}, },
{ {
order: 4, order: 4,
@ -65,6 +69,7 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'las la-swatchbook', iconClasses: 'las la-swatchbook',
darkcolor: pinkdark, darkcolor: pinkdark,
lightcolor: pinklight, lightcolor: pinklight,
aggregateType: 'instance',
}, },
{ {
order: 5, order: 5,
@ -75,5 +80,6 @@ export const ONBOARDING_EVENTS: OnboardingActions[] = [
iconClasses: 'las la-envelope', iconClasses: 'las la-envelope',
darkcolor: yellowdark, darkcolor: yellowdark,
lightcolor: yellowlight, lightcolor: yellowlight,
aggregateType: 'instance',
}, },
]; ];

View File

@ -8,6 +8,9 @@ Database:
cockroach: cockroach:
# This makes the e2e config reusable with an out-of-docker zitadel process and an /etc/hosts entry # This makes the e2e config reusable with an out-of-docker zitadel process and an /etc/hosts entry
Host: host.docker.internal Host: host.docker.internal
EventPushConnRatio: 0.2
MaxOpenConns: 40
MaxIdleConns: 20
TLS: TLS:
Enabled: false Enabled: false

View File

@ -32,7 +32,7 @@ services:
db: db:
restart: 'always' restart: 'always'
image: 'cockroachdb/cockroach:v22.2.2' image: 'cockroachdb/cockroach:v22.2.10'
command: 'start-single-node --insecure --http-addr :9090' command: 'start-single-node --insecure --http-addr :9090'
healthcheck: healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9090/health?ready=1'] test: ['CMD', 'curl', '-f', 'http://localhost:9090/health?ready=1']

View File

@ -8,6 +8,9 @@ Database:
cockroach: cockroach:
# This makes the e2e config reusable with an out-of-docker zitadel process and an /etc/hosts entry # This makes the e2e config reusable with an out-of-docker zitadel process and an /etc/hosts entry
Host: host.docker.internal Host: host.docker.internal
EventPushConnRatio: 0.2
MaxOpenConns: 40
MaxIdleConns: 20
TLS: TLS:
Enabled: false Enabled: false

View File

@ -16,7 +16,7 @@ func UserMetadataListFromQuery(c *actions.FieldConfig, metadata *query.UserMetad
result := &userMetadataList{ result := &userMetadataList{
Count: metadata.Count, Count: metadata.Count,
Sequence: metadata.Sequence, Sequence: metadata.Sequence,
Timestamp: metadata.Timestamp, Timestamp: metadata.LastRun,
Metadata: make([]*userMetadata, len(metadata.Metadata)), Metadata: make([]*userMetadata, len(metadata.Metadata)),
} }

View File

@ -143,7 +143,7 @@ type humanUser struct {
Sequence uint64 Sequence uint64
State domain.UserState State domain.UserState
Username string Username string
LoginNames database.StringArray LoginNames database.TextArray[string]
PreferredLoginName string PreferredLoginName string
Human human Human human
} }
@ -170,7 +170,7 @@ type machineUser struct {
Sequence uint64 Sequence uint64
State domain.UserState State domain.UserState
Username string Username string
LoginNames database.StringArray LoginNames database.TextArray[string]
PreferredLoginName string PreferredLoginName string
Machine machine Machine machine
} }

View File

@ -65,7 +65,7 @@ func UserGrantsFromQuery(c *actions.FieldConfig, userGrants *query.UserGrants) g
grantList := &userGrantList{ grantList := &userGrantList{
Count: userGrants.Count, Count: userGrants.Count,
Sequence: userGrants.Sequence, Sequence: userGrants.Sequence,
Timestamp: userGrants.Timestamp, Timestamp: userGrants.LastRun,
Grants: make([]*userGrant, len(userGrants.UserGrants)), Grants: make([]*userGrant, len(userGrants.UserGrants)),
} }

View File

@ -1,14 +0,0 @@
package repository
import (
"context"
"github.com/zitadel/zitadel/internal/view/model"
)
type AdministratorRepository interface {
GetFailedEvents(ctx context.Context, instanceID string) ([]*model.FailedEvent, error)
RemoveFailedEvent(context.Context, *model.FailedEvent) error
GetViews(instanceID string) ([]*model.View, error)
ClearView(ctx context.Context, db, viewName string) error
}

View File

@ -1,51 +0,0 @@
package eventstore
import (
"context"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
view_model "github.com/zitadel/zitadel/internal/view/model"
"github.com/zitadel/zitadel/internal/view/repository"
)
var dbList = []string{"auth", "adminapi"}
type AdministratorRepo struct {
View *view.View
}
func (repo *AdministratorRepo) GetFailedEvents(ctx context.Context, instanceID string) ([]*view_model.FailedEvent, error) {
allFailedEvents := make([]*view_model.FailedEvent, 0)
for _, db := range dbList {
failedEvents, err := repo.View.AllFailedEvents(db, instanceID)
if err != nil {
return nil, err
}
for _, failedEvent := range failedEvents {
allFailedEvents = append(allFailedEvents, repository.FailedEventToModel(failedEvent))
}
}
return allFailedEvents, nil
}
func (repo *AdministratorRepo) RemoveFailedEvent(ctx context.Context, failedEvent *view_model.FailedEvent) error {
return repo.View.RemoveFailedEvent(failedEvent.Database, repository.FailedEventFromModel(failedEvent))
}
func (repo *AdministratorRepo) GetViews(instanceID string) ([]*view_model.View, error) {
views := make([]*view_model.View, 0)
for _, db := range dbList {
sequences, err := repo.View.AllCurrentSequences(db, instanceID)
if err != nil {
return nil, err
}
for _, sequence := range sequences {
views = append(views, repository.CurrentSequenceToModel(sequence))
}
}
return views, nil
}
func (repo *AdministratorRepo) ClearView(ctx context.Context, database, view string) error {
return repo.View.ClearView(database, view)
}

View File

@ -5,56 +5,55 @@ import (
"time" "time"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view" "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
v1 "github.com/zitadel/zitadel/internal/eventstore/v1" "github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore/v1/query" "github.com/zitadel/zitadel/internal/eventstore"
handler2 "github.com/zitadel/zitadel/internal/eventstore/handler/v2"
"github.com/zitadel/zitadel/internal/static" "github.com/zitadel/zitadel/internal/static"
) )
type Configs map[string]*Config
type Config struct { type Config struct {
Client *database.DB
Eventstore *eventstore.Eventstore
BulkLimit uint64
FailureCountUntilSkip uint64
HandleActiveInstances time.Duration
TransactionDuration time.Duration
Handlers map[string]*ConfigOverwrites
}
type ConfigOverwrites struct {
MinimumCycleDuration time.Duration MinimumCycleDuration time.Duration
} }
type handler struct { func Register(ctx context.Context, config Config, view *view.View, static static.Storage) {
view *view.View if static == nil {
bulkLimit uint64 return
cycleDuration time.Duration
errorCountUntilSkip uint64
es v1.Eventstore
}
func (h *handler) Eventstore() v1.Eventstore {
return h.es
}
func Register(ctx context.Context, configs Configs, bulkLimit, errorCount uint64, view *view.View, es v1.Eventstore, static static.Storage) []query.Handler {
handlers := []query.Handler{}
if static != nil {
handlers = append(handlers, newStyling(ctx,
handler{view, bulkLimit, configs.cycleDuration("Styling"), errorCount, es},
static))
} }
return handlers
newStyling(ctx,
config.overwrite("Styling"),
static,
view,
).Start(ctx)
} }
func (configs Configs) cycleDuration(viewModel string) time.Duration { func (config Config) overwrite(viewModel string) handler2.Config {
c, ok := configs[viewModel] c := handler2.Config{
Client: config.Client,
Eventstore: config.Eventstore,
BulkLimit: uint16(config.BulkLimit),
RequeueEvery: 3 * time.Minute,
HandleActiveInstances: config.HandleActiveInstances,
MaxFailureCount: uint8(config.FailureCountUntilSkip),
TransactionDuration: config.TransactionDuration,
}
overwrite, ok := config.Handlers[viewModel]
if !ok { if !ok {
return 3 * time.Minute return c
} }
return c.MinimumCycleDuration if overwrite.MinimumCycleDuration > 0 {
} c.RequeueEvery = overwrite.MinimumCycleDuration
}
func (h *handler) MinimumCycleDuration() time.Duration { return c
return h.cycleDuration
}
func (h *handler) LockDuration() time.Duration {
return h.cycleDuration / 3
}
func (h *handler) QueryLimit() uint64 {
return h.bulkLimit
} }

View File

@ -9,15 +9,12 @@ import (
"github.com/lucasb-eyer/go-colorful" "github.com/lucasb-eyer/go-colorful"
"github.com/muesli/gamut" "github.com/muesli/gamut"
"github.com/zitadel/logging"
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/api/ui/login" "github.com/zitadel/zitadel/internal/api/ui/login"
"github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
v1 "github.com/zitadel/zitadel/internal/eventstore/v1" "github.com/zitadel/zitadel/internal/eventstore/handler/v2"
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
"github.com/zitadel/zitadel/internal/eventstore/v1/query"
"github.com/zitadel/zitadel/internal/eventstore/v1/spooler"
iam_model "github.com/zitadel/zitadel/internal/iam/repository/view/model" iam_model "github.com/zitadel/zitadel/internal/iam/repository/view/model"
"github.com/zitadel/zitadel/internal/repository/instance" "github.com/zitadel/zitadel/internal/repository/instance"
"github.com/zitadel/zitadel/internal/repository/org" "github.com/zitadel/zitadel/internal/repository/org"
@ -28,152 +25,231 @@ const (
stylingTable = "adminapi.styling2" stylingTable = "adminapi.styling2"
) )
var _ handler.Projection = (*Styling)(nil)
type Styling struct { type Styling struct {
handler static static.Storage
static static.Storage view *admin_view.View
subscription *v1.Subscription
} }
func newStyling(ctx context.Context, handler handler, static static.Storage) *Styling { func newStyling(ctx context.Context, config handler.Config, static static.Storage, view *admin_view.View) *handler.Handler {
h := &Styling{ return handler.NewHandler(
handler: handler, ctx,
static: static, &config,
} &Styling{
h.subscribe(ctx) static: static,
view: view,
return h },
)
} }
func (m *Styling) subscribe(ctx context.Context) { // Name implements [handler.Projection]
m.subscription = m.es.Subscribe(m.AggregateTypes()...) func (*Styling) Name() string {
go func() {
for event := range m.subscription.Events {
query.ReduceEvent(ctx, m, event)
}
}()
}
func (m *Styling) ViewModel() string {
return stylingTable return stylingTable
} }
func (m *Styling) Subscription() *v1.Subscription { // Reducers implements [handler.Projection]
return m.subscription func (s *Styling) Reducers() []handler.AggregateReducer {
} return []handler.AggregateReducer{
{
func (_ *Styling) AggregateTypes() []models.AggregateType { Aggregate: org.AggregateType,
return []models.AggregateType{org.AggregateType, instance.AggregateType} EventReducers: []handler.EventReducer{
} {
Event: org.LabelPolicyAddedEventType,
func (m *Styling) CurrentSequence(ctx context.Context, instanceID string) (uint64, error) { Reduce: s.processLabelPolicy,
sequence, err := m.view.GetLatestStylingSequence(ctx, instanceID) },
if err != nil { {
return 0, err Event: org.LabelPolicyChangedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyLogoAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyLogoRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyIconAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyIconRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyLogoDarkAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyLogoDarkRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyIconDarkAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyIconDarkRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyFontAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyFontRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyAssetsRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.LabelPolicyActivatedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: org.OrgRemovedEventType,
Reduce: s.processLabelPolicy,
},
},
},
{
Aggregate: instance.AggregateType,
EventReducers: []handler.EventReducer{
{
Event: instance.LabelPolicyAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyChangedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyLogoAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyLogoRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyIconAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyIconRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyLogoDarkAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyLogoDarkRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyIconDarkAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyIconDarkRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyFontAddedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyFontRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyAssetsRemovedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.LabelPolicyActivatedEventType,
Reduce: s.processLabelPolicy,
},
{
Event: instance.InstanceRemovedEventType,
Reduce: s.processLabelPolicy,
},
},
},
} }
return sequence.CurrentSequence, nil
} }
func (m *Styling) EventQuery(ctx context.Context, instanceIDs []string) (*models.SearchQuery, error) { func (m *Styling) processLabelPolicy(event eventstore.Event) (_ *handler.Statement, err error) {
sequences, err := m.view.GetLatestStylingSequences(ctx, instanceIDs) return handler.NewStatement(event, func(ex handler.Executer, projectionName string) error {
if err != nil { policy := new(iam_model.LabelPolicyView)
return nil, err switch event.Type() {
} case instance.LabelPolicyAddedEventType,
searchQuery := models.NewSearchQuery() org.LabelPolicyAddedEventType:
for _, instanceID := range instanceIDs { err = policy.AppendEvent(event)
var seq uint64 case instance.LabelPolicyChangedEventType,
for _, sequence := range sequences { org.LabelPolicyChangedEventType,
if sequence.InstanceID == instanceID { instance.LabelPolicyLogoAddedEventType,
seq = sequence.CurrentSequence org.LabelPolicyLogoAddedEventType,
break instance.LabelPolicyLogoRemovedEventType,
org.LabelPolicyLogoRemovedEventType,
instance.LabelPolicyIconAddedEventType,
org.LabelPolicyIconAddedEventType,
instance.LabelPolicyIconRemovedEventType,
org.LabelPolicyIconRemovedEventType,
instance.LabelPolicyLogoDarkAddedEventType,
org.LabelPolicyLogoDarkAddedEventType,
instance.LabelPolicyLogoDarkRemovedEventType,
org.LabelPolicyLogoDarkRemovedEventType,
instance.LabelPolicyIconDarkAddedEventType,
org.LabelPolicyIconDarkAddedEventType,
instance.LabelPolicyIconDarkRemovedEventType,
org.LabelPolicyIconDarkRemovedEventType,
instance.LabelPolicyFontAddedEventType,
org.LabelPolicyFontAddedEventType,
instance.LabelPolicyFontRemovedEventType,
org.LabelPolicyFontRemovedEventType,
instance.LabelPolicyAssetsRemovedEventType,
org.LabelPolicyAssetsRemovedEventType:
policy, err = m.view.StylingByAggregateIDAndState(event.Aggregate().ID, event.Aggregate().InstanceID, int32(domain.LabelPolicyStatePreview))
if err != nil {
return err
} }
err = policy.AppendEvent(event)
case instance.LabelPolicyActivatedEventType,
org.LabelPolicyActivatedEventType:
policy, err = m.view.StylingByAggregateIDAndState(event.Aggregate().ID, event.Aggregate().InstanceID, int32(domain.LabelPolicyStatePreview))
if err != nil {
return err
}
err = policy.AppendEvent(event)
if err != nil {
return err
}
err = m.generateStylingFile(policy)
case instance.InstanceRemovedEventType:
err = m.deleteInstanceFilesFromStorage(event.Aggregate().InstanceID)
if err != nil {
return err
}
return m.view.DeleteInstanceStyling(event)
case org.OrgRemovedEventType:
return m.view.UpdateOrgOwnerRemovedStyling(event)
default:
return nil
} }
searchQuery.AddQuery().
AggregateTypeFilter(m.AggregateTypes()...).
LatestSequenceFilter(seq).
InstanceIDFilter(instanceID)
}
return searchQuery, nil
}
func (m *Styling) Reduce(event *models.Event) (err error) {
switch event.AggregateType {
case org.AggregateType, instance.AggregateType:
err = m.processLabelPolicy(event)
}
return err
}
func (m *Styling) processLabelPolicy(event *models.Event) (err error) {
policy := new(iam_model.LabelPolicyView)
switch eventstore.EventType(event.Type) {
case instance.LabelPolicyAddedEventType,
org.LabelPolicyAddedEventType:
err = policy.AppendEvent(event)
case instance.LabelPolicyChangedEventType,
org.LabelPolicyChangedEventType,
instance.LabelPolicyLogoAddedEventType,
org.LabelPolicyLogoAddedEventType,
instance.LabelPolicyLogoRemovedEventType,
org.LabelPolicyLogoRemovedEventType,
instance.LabelPolicyIconAddedEventType,
org.LabelPolicyIconAddedEventType,
instance.LabelPolicyIconRemovedEventType,
org.LabelPolicyIconRemovedEventType,
instance.LabelPolicyLogoDarkAddedEventType,
org.LabelPolicyLogoDarkAddedEventType,
instance.LabelPolicyLogoDarkRemovedEventType,
org.LabelPolicyLogoDarkRemovedEventType,
instance.LabelPolicyIconDarkAddedEventType,
org.LabelPolicyIconDarkAddedEventType,
instance.LabelPolicyIconDarkRemovedEventType,
org.LabelPolicyIconDarkRemovedEventType,
instance.LabelPolicyFontAddedEventType,
org.LabelPolicyFontAddedEventType,
instance.LabelPolicyFontRemovedEventType,
org.LabelPolicyFontRemovedEventType,
instance.LabelPolicyAssetsRemovedEventType,
org.LabelPolicyAssetsRemovedEventType:
policy, err = m.view.StylingByAggregateIDAndState(event.AggregateID, event.InstanceID, int32(domain.LabelPolicyStatePreview))
if err != nil { if err != nil {
return err return err
} }
err = policy.AppendEvent(event)
case instance.LabelPolicyActivatedEventType, return m.view.PutStyling(policy, event)
org.LabelPolicyActivatedEventType: }), nil
policy, err = m.view.StylingByAggregateIDAndState(event.AggregateID, event.InstanceID, int32(domain.LabelPolicyStatePreview))
if err != nil {
return err
}
err = policy.AppendEvent(event)
if err != nil {
return err
}
err = m.generateStylingFile(policy)
case instance.InstanceRemovedEventType:
err = m.deleteInstanceFilesFromStorage(event.InstanceID)
if err != nil {
return err
}
return m.view.DeleteInstanceStyling(event)
case org.OrgRemovedEventType:
return m.view.UpdateOrgOwnerRemovedStyling(event)
default:
return m.view.ProcessedStylingSequence(event)
}
if err != nil {
return err
}
return m.view.PutStyling(policy, event)
}
func (m *Styling) OnError(event *models.Event, err error) error {
logging.WithFields("id", event.AggregateID).WithError(err).Warn("something went wrong in label policy handler")
return spooler.HandleError(event, err, m.view.GetLatestStylingFailedEvent, m.view.ProcessedStylingFailedEvent, m.view.ProcessedStylingSequence, m.errorCountUntilSkip)
}
func (m *Styling) OnSuccess(instanceIDs []string) error {
return spooler.HandleSuccess(m.view.UpdateStylingSpoolerRunTimestamp, instanceIDs)
} }
func (m *Styling) generateStylingFile(policy *iam_model.LabelPolicyView) error { func (m *Styling) generateStylingFile(policy *iam_model.LabelPolicyView) error {

View File

@ -3,46 +3,23 @@ package eventsourcing
import ( import (
"context" "context"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/eventstore" admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/spooler"
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view" admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
eventstore2 "github.com/zitadel/zitadel/internal/eventstore"
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
es_spol "github.com/zitadel/zitadel/internal/eventstore/v1/spooler"
"github.com/zitadel/zitadel/internal/static" "github.com/zitadel/zitadel/internal/static"
) )
type Config struct { type Config struct {
SearchLimit uint64 Spooler admin_handler.Config
Spooler spooler.SpoolerConfig
} }
type EsRepository struct { func Start(ctx context.Context, conf Config, static static.Storage, dbClient *database.DB) error {
spooler *es_spol.Spooler
eventstore.AdministratorRepo
}
func Start(ctx context.Context, conf Config, static static.Storage, dbClient *database.DB, esV2 *eventstore2.Eventstore, allowOrderByCreationDate bool) (*EsRepository, error) {
es, err := v1.Start(dbClient, allowOrderByCreationDate)
if err != nil {
return nil, err
}
view, err := admin_view.StartView(dbClient) view, err := admin_view.StartView(dbClient)
if err != nil { if err != nil {
return nil, err return err
} }
spool := spooler.StartSpooler(ctx, conf.Spooler, es, esV2, view, dbClient, static) admin_handler.Register(ctx, conf.Spooler, view, static)
return &EsRepository{
spooler: spool,
AdministratorRepo: eventstore.AdministratorRepo{
View: view,
},
}, nil
}
func (repo *EsRepository) Health(ctx context.Context) error {
return nil return nil
} }

View File

@ -1,20 +0,0 @@
package spooler
import (
"database/sql"
"time"
es_locker "github.com/zitadel/zitadel/internal/eventstore/v1/locker"
)
const (
lockTable = "adminapi.locks"
)
type locker struct {
dbClient *sql.DB
}
func (l *locker) Renew(lockerID, viewModel, instanceID string, waitTime time.Duration) error {
return es_locker.Renew(l.dbClient, lockTable, lockerID, viewModel, instanceID, waitTime)
}

View File

@ -1,35 +0,0 @@
package spooler
import (
"context"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
"github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
"github.com/zitadel/zitadel/internal/eventstore/v1/spooler"
"github.com/zitadel/zitadel/internal/static"
)
type SpoolerConfig struct {
BulkLimit uint64
FailureCountUntilSkip uint64
ConcurrentWorkers int
ConcurrentInstances int
Handlers handler.Configs
}
func StartSpooler(ctx context.Context, c SpoolerConfig, es v1.Eventstore, esV2 *eventstore.Eventstore, view *view.View, sql *database.DB, static static.Storage) *spooler.Spooler {
spoolerConfig := spooler.Config{
Eventstore: es,
EventstoreV2: esV2,
Locker: &locker{dbClient: sql.DB},
ConcurrentWorkers: c.ConcurrentWorkers,
ConcurrentInstances: c.ConcurrentInstances,
ViewHandlers: handler.Register(ctx, c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, static),
}
spool := spoolerConfig.New()
spool.Start()
return spool
}

View File

@ -1,26 +0,0 @@
package view
import (
"github.com/zitadel/zitadel/internal/view/repository"
)
const (
errTable = "adminapi.failed_events"
errColumn = "failed_events"
)
func (v *View) saveFailedEvent(failedEvent *repository.FailedEvent) error {
return repository.SaveFailedEvent(v.Db, errTable, failedEvent)
}
func (v *View) RemoveFailedEvent(database string, failedEvent *repository.FailedEvent) error {
return repository.RemoveFailedEvent(v.Db, database+"."+errColumn, failedEvent)
}
func (v *View) latestFailedEvent(viewName, instanceID string, sequence uint64) (*repository.FailedEvent, error) {
return repository.LatestFailedEvent(v.Db, errTable, viewName, instanceID, sequence)
}
func (v *View) AllFailedEvents(db, instanceID string) ([]*repository.FailedEvent, error) {
return repository.AllFailedEvents(v.Db, db+"."+errColumn, instanceID)
}

View File

@ -1,49 +0,0 @@
package view
import (
"context"
"time"
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
"github.com/zitadel/zitadel/internal/view/repository"
)
const (
sequencesTable = "adminapi.current_sequences"
)
func (v *View) saveCurrentSequence(viewName string, event *models.Event) error {
return repository.SaveCurrentSequence(v.Db, sequencesTable, viewName, event.InstanceID, event.Sequence, event.CreationDate)
}
func (v *View) latestSequence(ctx context.Context, viewName, instanceID string) (*repository.CurrentSequence, error) {
return repository.LatestSequence(v.Db, v.TimeTravel(ctx, sequencesTable), viewName, instanceID)
}
func (v *View) latestSequences(ctx context.Context, viewName string, instanceIDs []string) ([]*repository.CurrentSequence, error) {
return repository.LatestSequences(v.Db, v.TimeTravel(ctx, sequencesTable), viewName, instanceIDs)
}
func (v *View) AllCurrentSequences(db, instanceID string) ([]*repository.CurrentSequence, error) {
return repository.AllCurrentSequences(v.Db, db+".current_sequences", instanceID)
}
func (v *View) updateSpoolerRunSequence(viewName string, instanceIDs []string) error {
currentSequences, err := repository.LatestSequences(v.Db, sequencesTable, viewName, instanceIDs)
if err != nil {
return err
}
for _, currentSequence := range currentSequences {
if currentSequence.ViewName == "" {
currentSequence.ViewName = viewName
}
currentSequence.LastSuccessfulSpoolerRun = time.Now()
}
return repository.UpdateCurrentSequences(v.Db, sequencesTable, currentSequences)
}
func (v *View) ClearView(db, viewName string) error {
truncateView := db + "." + viewName
sequenceTable := db + ".current_sequences"
return repository.ClearView(v.Db, truncateView, sequenceTable)
}

View File

@ -1,12 +1,9 @@
package view package view
import ( import (
"context" "github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
"github.com/zitadel/zitadel/internal/iam/repository/view" "github.com/zitadel/zitadel/internal/iam/repository/view"
"github.com/zitadel/zitadel/internal/iam/repository/view/model" "github.com/zitadel/zitadel/internal/iam/repository/view/model"
global_view "github.com/zitadel/zitadel/internal/view/repository"
) )
const ( const (
@ -17,50 +14,14 @@ func (v *View) StylingByAggregateIDAndState(aggregateID, instanceID string, stat
return view.GetStylingByAggregateIDAndState(v.Db, stylingTyble, aggregateID, instanceID, state) return view.GetStylingByAggregateIDAndState(v.Db, stylingTyble, aggregateID, instanceID, state)
} }
func (v *View) PutStyling(policy *model.LabelPolicyView, event *models.Event) error { func (v *View) PutStyling(policy *model.LabelPolicyView, event eventstore.Event) error {
err := view.PutStyling(v.Db, stylingTyble, policy) return view.PutStyling(v.Db, stylingTyble, policy)
if err != nil {
return err
}
return v.ProcessedStylingSequence(event)
} }
func (v *View) DeleteInstanceStyling(event *models.Event) error { func (v *View) DeleteInstanceStyling(event eventstore.Event) error {
err := view.DeleteInstanceStyling(v.Db, stylingTyble, event.InstanceID) return view.DeleteInstanceStyling(v.Db, stylingTyble, event.Aggregate().InstanceID)
if err != nil {
return err
}
return v.ProcessedStylingSequence(event)
} }
func (v *View) UpdateOrgOwnerRemovedStyling(event *models.Event) error { func (v *View) UpdateOrgOwnerRemovedStyling(event eventstore.Event) error {
err := view.UpdateOrgOwnerRemovedStyling(v.Db, stylingTyble, event.InstanceID, event.AggregateID) return view.UpdateOrgOwnerRemovedStyling(v.Db, stylingTyble, event.Aggregate().InstanceID, event.Aggregate().ID)
if err != nil {
return err
}
return v.ProcessedStylingSequence(event)
}
func (v *View) GetLatestStylingSequence(ctx context.Context, instanceID string) (*global_view.CurrentSequence, error) {
return v.latestSequence(ctx, stylingTyble, instanceID)
}
func (v *View) GetLatestStylingSequences(ctx context.Context, instanceIDs []string) ([]*global_view.CurrentSequence, error) {
return v.latestSequences(ctx, stylingTyble, instanceIDs)
}
func (v *View) ProcessedStylingSequence(event *models.Event) error {
return v.saveCurrentSequence(stylingTyble, event)
}
func (v *View) UpdateStylingSpoolerRunTimestamp(instanceIDs []string) error {
return v.updateSpoolerRunSequence(stylingTyble, instanceIDs)
}
func (v *View) GetLatestStylingFailedEvent(sequence uint64, instanceID string) (*global_view.FailedEvent, error) {
return v.latestFailedEvent(stylingTyble, instanceID, sequence)
}
func (v *View) ProcessedStylingFailedEvent(failedEvent *global_view.FailedEvent) error {
return v.saveFailedEvent(failedEvent)
} }

View File

@ -1,8 +0,0 @@
package repository
import "context"
type Repository interface {
Health(ctx context.Context) error
AdministratorRepository
}

View File

@ -91,13 +91,7 @@ func VerifyTokenAndCreateCtxData(ctx context.Context, token, orgID, orgDomain st
verifiedOrgID, err := t.ExistsOrg(ctx, orgID, orgDomain) verifiedOrgID, err := t.ExistsOrg(ctx, orgID, orgDomain)
if err != nil { if err != nil {
err = retry(func() error { return CtxData{}, errors.ThrowPermissionDenied(nil, "AUTH-Bs7Ds", "Organisation doesn't exist")
verifiedOrgID, err = t.ExistsOrg(ctx, orgID, orgDomain)
return err
})
if err != nil {
return CtxData{}, errors.ThrowPermissionDenied(nil, "AUTH-Bs7Ds", "Organisation doesn't exist")
}
} }
return CtxData{ return CtxData{

View File

@ -16,10 +16,8 @@ func CheckPermission(ctx context.Context, resolver MembershipsResolver, roleMapp
_, userPermissionSpan := tracing.NewNamedSpan(ctx, "checkUserPermissions") _, userPermissionSpan := tracing.NewNamedSpan(ctx, "checkUserPermissions")
err = checkUserResourcePermissions(requestedPermissions, resourceID) err = checkUserResourcePermissions(requestedPermissions, resourceID)
userPermissionSpan.EndWithError(err) userPermissionSpan.EndWithError(err)
if err != nil {
return err return err
}
return nil
} }
// getUserPermissions retrieves the memberships of the authenticated user (on instance and provided organisation level), // getUserPermissions retrieves the memberships of the authenticated user (on instance and provided organisation level),
@ -33,23 +31,17 @@ func getUserPermissions(ctx context.Context, resolver MembershipsResolver, requi
} }
ctx = context.WithValue(ctx, dataKey, ctxData) ctx = context.WithValue(ctx, dataKey, ctxData)
memberships, err := resolver.SearchMyMemberships(ctx, orgID) memberships, err := resolver.SearchMyMemberships(ctx, orgID, false)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if len(memberships) == 0 { if len(memberships) == 0 {
err = retry(func() error { memberships, err = resolver.SearchMyMemberships(ctx, orgID, true)
memberships, err = resolver.SearchMyMemberships(ctx, orgID) if len(memberships) == 0 {
if err != nil { return nil, nil, errors.ThrowNotFound(nil, "AUTHZ-cdgFk", "membership not found")
return err }
}
if len(memberships) == 0 {
return errors.ThrowNotFound(nil, "AUTHZ-cdgFk", "membership not found")
}
return nil
})
if err != nil { if err != nil {
return nil, nil, nil return nil, nil, err
} }
} }
requestedPermissions, allPermissions = mapMembershipsToPermissions(requiredPerm, memberships, roleMappings) requestedPermissions, allPermissions = mapMembershipsToPermissions(requiredPerm, memberships, roleMappings)

View File

@ -18,7 +18,7 @@ type testVerifier struct {
func (v *testVerifier) VerifyAccessToken(ctx context.Context, token, clientID, projectID string) (string, string, string, string, string, error) { func (v *testVerifier) VerifyAccessToken(ctx context.Context, token, clientID, projectID string) (string, string, string, string, string, error) {
return "userID", "agentID", "clientID", "de", "orgID", nil return "userID", "agentID", "clientID", "de", "orgID", nil
} }
func (v *testVerifier) SearchMyMemberships(ctx context.Context, orgID string) ([]*Membership, error) { func (v *testVerifier) SearchMyMemberships(ctx context.Context, orgID string, _ bool) ([]*Membership, error) {
return v.memberships, nil return v.memberships, nil
} }

View File

@ -1,15 +0,0 @@
package authz
import "time"
//TODO: workaround if org projection is not yet up-to-date
func retry(retriable func() error) (err error) {
for i := 0; i < 3; i++ {
time.Sleep(500 * time.Millisecond)
err = retriable()
if err == nil {
return nil
}
}
return err
}

View File

@ -1,88 +0,0 @@
package authz
import (
"errors"
"testing"
)
func Test_retry(t *testing.T) {
type args struct {
retriable func(*int) func() error
}
type want struct {
executions int
err bool
}
tests := []struct {
name string
args args
want want
}{
{
name: "1 execution",
args: args{
retriable: func(execs *int) func() error {
return func() error {
if *execs < 1 {
*execs++
return errors.New("not 1")
}
return nil
}
},
},
want: want{
err: false,
executions: 1,
},
},
{
name: "2 execution",
args: args{
retriable: func(execs *int) func() error {
return func() error {
if *execs < 2 {
*execs++
return errors.New("not 2")
}
return nil
}
},
},
want: want{
err: false,
executions: 2,
},
},
{
name: "too many execution",
args: args{
retriable: func(execs *int) func() error {
return func() error {
if *execs < 3 {
*execs++
return errors.New("not 3")
}
return nil
}
},
},
want: want{
err: true,
executions: 3,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var execs int
if err := retry(tt.args.retriable(&execs)); (err != nil) != tt.want.err {
t.Errorf("retry() error = %v, want.err %v", err, tt.want.err)
}
if execs != tt.want.executions {
t.Errorf("retry() executions: want: %d got: %d", tt.want.executions, execs)
}
})
}
}

View File

@ -32,13 +32,13 @@ type TokenVerifier struct {
} }
type MembershipsResolver interface { type MembershipsResolver interface {
SearchMyMemberships(ctx context.Context, orgID string) ([]*Membership, error) SearchMyMemberships(ctx context.Context, orgID string, shouldTriggerBulk bool) ([]*Membership, error)
} }
type authZRepo interface { type authZRepo interface {
MembershipsResolver
VerifyAccessToken(ctx context.Context, token, verifierClientID, projectID string) (userID, agentID, clientID, prefLang, resourceOwner string, err error) VerifyAccessToken(ctx context.Context, token, verifierClientID, projectID string) (userID, agentID, clientID, prefLang, resourceOwner string, err error)
VerifierClientID(ctx context.Context, name string) (clientID, projectID string, err error) VerifierClientID(ctx context.Context, name string) (clientID, projectID string, err error)
SearchMyMemberships(ctx context.Context, orgID string) ([]*Membership, error)
ProjectIDAndOriginsByClientID(ctx context.Context, clientID string) (projectID string, origins []string, err error) ProjectIDAndOriginsByClientID(ctx context.Context, clientID string) (projectID string, origins []string, err error)
ExistsOrg(ctx context.Context, id, domain string) (string, error) ExistsOrg(ctx context.Context, id, domain string) (string, error)
} }
@ -135,10 +135,10 @@ func (v *TokenVerifier) RegisterServer(appName, methodPrefix string, mappings Me
} }
} }
func (v *TokenVerifier) SearchMyMemberships(ctx context.Context, orgID string) (_ []*Membership, err error) { func (v *TokenVerifier) SearchMyMemberships(ctx context.Context, orgID string, shouldTriggerBulk bool) (_ []*Membership, err error) {
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
return v.authZRepo.SearchMyMemberships(ctx, orgID) return v.authZRepo.SearchMyMemberships(ctx, orgID, shouldTriggerBulk)
} }
func (v *TokenVerifier) ProjectIDAndOriginsByClientID(ctx context.Context, clientID string) (_ string, _ []string, err error) { func (v *TokenVerifier) ProjectIDAndOriginsByClientID(ctx context.Context, clientID string) (_ string, _ []string, err error) {

View File

@ -57,15 +57,19 @@ func eventRequestToFilter(ctx context.Context, req *admin_pb.ListEventsRequest)
OrderDesc(). OrderDesc().
InstanceID(authz.GetInstance(ctx).InstanceID()). InstanceID(authz.GetInstance(ctx).InstanceID()).
Limit(limit). Limit(limit).
AwaitOpenTransactions().
ResourceOwner(req.ResourceOwner). ResourceOwner(req.ResourceOwner).
EditorUser(req.EditorUserId). EditorUser(req.EditorUserId).
AddQuery().
AggregateIDs(aggregateIDs...).
AggregateTypes(aggregateTypes...).
EventTypes(eventTypes...).
CreationDateAfter(req.CreationDate.AsTime()). CreationDateAfter(req.CreationDate.AsTime()).
SequenceGreater(req.Sequence). SequenceGreater(req.Sequence)
Builder()
if len(aggregateIDs) > 0 || len(aggregateTypes) > 0 || len(eventTypes) > 0 {
builder.AddQuery().
AggregateIDs(aggregateIDs...).
AggregateTypes(aggregateTypes...).
EventTypes(eventTypes...).
Builder()
}
if req.Asc { if req.Asc {
builder.OrderAsc() builder.OrderAsc()

View File

@ -10,11 +10,6 @@ import (
func (s *Server) ListFailedEvents(ctx context.Context, _ *admin_pb.ListFailedEventsRequest) (*admin_pb.ListFailedEventsResponse, error) { func (s *Server) ListFailedEvents(ctx context.Context, _ *admin_pb.ListFailedEventsRequest) (*admin_pb.ListFailedEventsResponse, error) {
instanceID := authz.GetInstance(ctx).InstanceID() instanceID := authz.GetInstance(ctx).InstanceID()
failedEventsOld, err := s.administrator.GetFailedEvents(ctx, instanceID)
if err != nil {
return nil, err
}
convertedOld := FailedEventsViewToPb(failedEventsOld)
instanceIDQuery, err := query.NewFailedEventInstanceIDSearchQuery(instanceID) instanceIDQuery, err := query.NewFailedEventInstanceIDSearchQuery(instanceID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -25,17 +20,11 @@ func (s *Server) ListFailedEvents(ctx context.Context, _ *admin_pb.ListFailedEve
if err != nil { if err != nil {
return nil, err return nil, err
} }
convertedNew := FailedEventsToPb(s.database, failedEvents) return &admin_pb.ListFailedEventsResponse{Result: FailedEventsToPb(s.database, failedEvents)}, nil
return &admin_pb.ListFailedEventsResponse{Result: append(convertedOld, convertedNew...)}, nil
} }
func (s *Server) RemoveFailedEvent(ctx context.Context, req *admin_pb.RemoveFailedEventRequest) (*admin_pb.RemoveFailedEventResponse, error) { func (s *Server) RemoveFailedEvent(ctx context.Context, req *admin_pb.RemoveFailedEventRequest) (*admin_pb.RemoveFailedEventResponse, error) {
var err error err := s.query.RemoveFailedEvent(ctx, req.ViewName, authz.GetInstance(ctx).InstanceID(), req.FailedSequence)
if req.Database != s.database {
err = s.administrator.RemoveFailedEvent(ctx, RemoveFailedEventRequestToModel(ctx, req))
} else {
err = s.query.RemoveFailedEvent(ctx, req.ViewName, authz.GetInstance(ctx).InstanceID(), req.FailedSequence)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,39 +1,12 @@
package admin package admin
import ( import (
"context"
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/query" "github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/view/model"
admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin" admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin"
) )
func FailedEventsViewToPb(failedEvents []*model.FailedEvent) []*admin_pb.FailedEvent {
events := make([]*admin_pb.FailedEvent, len(failedEvents))
for i, failedEvent := range failedEvents {
events[i] = FailedEventViewToPb(failedEvent)
}
return events
}
func FailedEventViewToPb(failedEvent *model.FailedEvent) *admin_pb.FailedEvent {
var lastFailed *timestamppb.Timestamp
if !failedEvent.LastFailed.IsZero() {
lastFailed = timestamppb.New(failedEvent.LastFailed)
}
return &admin_pb.FailedEvent{
Database: failedEvent.Database,
ViewName: failedEvent.ViewName,
FailedSequence: failedEvent.FailedSequence,
FailureCount: failedEvent.FailureCount,
ErrorMessage: failedEvent.ErrMsg,
LastFailed: lastFailed,
}
}
func FailedEventsToPb(database string, failedEvents *query.FailedEvents) []*admin_pb.FailedEvent { func FailedEventsToPb(database string, failedEvents *query.FailedEvents) []*admin_pb.FailedEvent {
events := make([]*admin_pb.FailedEvent, len(failedEvents.FailedEvents)) events := make([]*admin_pb.FailedEvent, len(failedEvents.FailedEvents))
for i, failedEvent := range failedEvents.FailedEvents { for i, failedEvent := range failedEvents.FailedEvents {
@ -56,12 +29,3 @@ func FailedEventToPb(database string, failedEvent *query.FailedEvent) *admin_pb.
LastFailed: lastFailed, LastFailed: lastFailed,
} }
} }
func RemoveFailedEventRequestToModel(ctx context.Context, req *admin_pb.RemoveFailedEventRequest) *model.FailedEvent {
return &model.FailedEvent{
Database: req.Database,
ViewName: req.ViewName,
FailedSequence: req.FailedSequence,
InstanceID: authz.GetInstance(ctx).InstanceID(),
}
}

View File

@ -1,101 +0,0 @@
package admin
import (
"context"
"testing"
"time"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/test"
"github.com/zitadel/zitadel/internal/view/model"
admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin"
)
func TestFailedEventsToPbFields(t *testing.T) {
type args struct {
failedEvents []*model.FailedEvent
}
tests := []struct {
name string
args args
}{
{
name: "all fields",
args: args{
failedEvents: []*model.FailedEvent{
{
Database: "admin",
ViewName: "users",
FailedSequence: 456,
FailureCount: 5,
LastFailed: time.Now(),
ErrMsg: "some error",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := FailedEventsViewToPb(tt.args.failedEvents)
for _, g := range got {
test.AssertFieldsMapped(t, g)
}
})
}
}
func TestFailedEventToPbFields(t *testing.T) {
type args struct {
failedEvent *model.FailedEvent
}
tests := []struct {
name string
args args
}{
{
"all fields",
args{
failedEvent: &model.FailedEvent{
Database: "admin",
ViewName: "users",
FailedSequence: 456,
FailureCount: 5,
LastFailed: time.Now(),
ErrMsg: "some error",
},
},
},
}
for _, tt := range tests {
converted := FailedEventViewToPb(tt.args.failedEvent)
test.AssertFieldsMapped(t, converted)
}
}
func TestRemoveFailedEventRequestToModelFields(t *testing.T) {
type args struct {
ctx context.Context
req *admin_pb.RemoveFailedEventRequest
}
tests := []struct {
name string
args args
}{
{
"all fields",
args{
ctx: authz.WithInstanceID(context.Background(), "instanceID"),
req: &admin_pb.RemoveFailedEventRequest{
Database: "admin",
ViewName: "users",
FailedSequence: 456,
},
},
},
}
for _, tt := range tests {
converted := RemoveFailedEventRequestToModel(tt.args.ctx, tt.args.req)
test.AssertFieldsMapped(t, converted, "FailureCount", "LastFailed", "ErrMsg")
}
}

View File

@ -27,7 +27,7 @@ func (s *Server) ListIAMMembers(ctx context.Context, req *admin_pb.ListIAMMember
return nil, err return nil, err
} }
return &admin_pb.ListIAMMembersResponse{ return &admin_pb.ListIAMMembersResponse{
Details: object.ToListDetails(res.Count, res.Sequence, res.Timestamp), Details: object.ToListDetails(res.Count, res.Sequence, res.LastRun),
//TODO: resource owner of user of the member instead of the membership resource owner //TODO: resource owner of user of the member instead of the membership resource owner
Result: member.MembersToPb("", res.Members), Result: member.MembersToPb("", res.Members),
}, nil }, nil

View File

@ -19,7 +19,7 @@ func (s *Server) ListSecretGenerators(ctx context.Context, req *admin_pb.ListSec
} }
return &admin_pb.ListSecretGeneratorsResponse{ return &admin_pb.ListSecretGeneratorsResponse{
Result: SecretGeneratorsToPb(result.SecretGenerators), Result: SecretGeneratorsToPb(result.SecretGenerators),
Details: object.ToListDetails(result.Count, result.Sequence, result.Timestamp), Details: object.ToListDetails(result.Count, result.Sequence, result.LastRun),
}, nil }, nil
} }

View File

@ -30,7 +30,7 @@ func (s *Server) ListIDPs(ctx context.Context, req *admin_pb.ListIDPsRequest) (*
} }
return &admin_pb.ListIDPsResponse{ return &admin_pb.ListIDPsResponse{
Result: idp_grpc.IDPViewsToPb(resp.IDPs), Result: idp_grpc.IDPViewsToPb(resp.IDPs),
Details: object_pb.ToListDetails(resp.Count, resp.Sequence, resp.Timestamp), Details: object_pb.ToListDetails(resp.Count, resp.Sequence, resp.LastRun),
}, nil }, nil
} }
@ -175,7 +175,7 @@ func (s *Server) ListProviders(ctx context.Context, req *admin_pb.ListProvidersR
} }
return &admin_pb.ListProvidersResponse{ return &admin_pb.ListProvidersResponse{
Result: idp_grpc.ProvidersToPb(resp.Templates), Result: idp_grpc.ProvidersToPb(resp.Templates),
Details: object_pb.ToListDetails(resp.Count, resp.Sequence, resp.Timestamp), Details: object_pb.ToListDetails(resp.Count, resp.Sequence, resp.LastRun),
}, nil }, nil
} }

Some files were not shown because too many files have changed in this diff Show More