feat(eventstore): increase parallel write capabilities (#5940)

This implementation increases parallel write capabilities of the eventstore.
Please have a look at the technical advisories: [05](https://zitadel.com/docs/support/advisory/a10005) and  [06](https://zitadel.com/docs/support/advisory/a10006).
The implementation of eventstore.push is rewritten and stored events are migrated to a new table `eventstore.events2`.
If you are using cockroach: make sure that the database user of ZITADEL has `VIEWACTIVITY` grant. This is used to query events.
This commit is contained in:
Silvan
2023-10-19 12:19:10 +02:00
committed by GitHub
parent 259faba3f0
commit b5564572bc
791 changed files with 30326 additions and 43202 deletions

View File

@@ -12,7 +12,7 @@ CREATE TABLE adminapi.locks (
CREATE TABLE adminapi.current_sequences (
view_name TEXT,
current_sequence BIGINT,
event_timestamp TIMESTAMPTZ,
event_date TIMESTAMPTZ,
last_successful_spooler_run TIMESTAMPTZ,
instance_id TEXT NOT NULL,

View File

@@ -12,7 +12,7 @@ CREATE TABLE auth.locks (
CREATE TABLE auth.current_sequences (
view_name TEXT,
current_sequence BIGINT,
event_timestamp TIMESTAMPTZ,
event_date TIMESTAMPTZ,
last_successful_spooler_run TIMESTAMPTZ,
instance_id TEXT NOT NULL,

View File

@@ -1,23 +0,0 @@
package setup
import (
"embed"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 04/cockroach/index.sql
//go:embed 04/postgres/index.sql
stmts04 embed.FS
)
func New04(db *database.DB) *EventstoreIndexesNew {
return &EventstoreIndexesNew{
dbClient: db,
name: "04_eventstore_indexes",
step: "04",
fileName: "index.sql",
stmts: stmts04,
}
}

View File

@@ -1,4 +0,0 @@
CREATE INDEX IF NOT EXISTS write_model ON eventstore.events (instance_id, aggregate_type, aggregate_id, event_type, resource_owner)
STORING (id, aggregate_version, previous_aggregate_sequence, creation_date, event_data, editor_user, editor_service, previous_aggregate_type_sequence);
CREATE INDEX IF NOT EXISTS active_instances ON eventstore.events (creation_date desc, instance_id) USING HASH;

View File

@@ -1,3 +0,0 @@
CREATE INDEX IF NOT EXISTS write_model ON eventstore.events (instance_id, aggregate_type, aggregate_id, event_type, resource_owner);
CREATE INDEX IF NOT EXISTS active_instances ON eventstore.events (creation_date, instance_id);

View File

@@ -1,23 +0,0 @@
package setup
import (
"embed"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 09/cockroach/index.sql
//go:embed 09/postgres/index.sql
stmts09 embed.FS
)
func New09(db *database.DB) *EventstoreIndexesNew {
return &EventstoreIndexesNew{
dbClient: db,
name: "09_optimise_indexes",
step: "09",
fileName: "index.sql",
stmts: stmts09,
}
}

View File

@@ -1,51 +0,0 @@
-- replace agg_type_agg_id
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type_agg_id;
COMMIT;
BEGIN;
CREATE INDEX agg_type_agg_id ON eventstore.events (
instance_id
, aggregate_type
, aggregate_id
) STORING (
event_type
, aggregate_version
, previous_aggregate_sequence
, previous_aggregate_type_sequence
, creation_date
, event_data
, editor_user
, editor_service
, resource_owner
);
COMMIT;
-- replace agg_type
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type;
COMMIT;
BEGIN;
CREATE INDEX agg_type ON eventstore.events (
instance_id
, aggregate_type
, event_sequence
) STORING (
event_type
, aggregate_id
, aggregate_version
, previous_aggregate_sequence
, previous_aggregate_type_sequence
, creation_date
, event_data
, editor_user
, editor_service
, resource_owner
);
COMMIT;
-- drop unused index
BEGIN;
DROP INDEX IF EXISTS eventstore.events@agg_type_seq;
COMMIT;

View File

@@ -1,30 +0,0 @@
-- replace agg_type_agg_id
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type_agg_id;
COMMIT;
BEGIN;
CREATE INDEX agg_type_agg_id ON eventstore.events (
instance_id
, aggregate_type
, aggregate_id
);
COMMIT;
-- replace agg_type
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type;
COMMIT;
BEGIN;
CREATE INDEX agg_type ON eventstore.events (
instance_id
, aggregate_type
, event_sequence
);
COMMIT;
-- drop unused index
BEGIN;
DROP INDEX IF EXISTS eventstore.agg_type_seq;
COMMIT;

View File

@@ -3,7 +3,7 @@ package setup
import (
"context"
"database/sql"
_ "embed"
"embed"
"time"
"github.com/cockroachdb/cockroach-go/v2/crdb"
@@ -17,8 +17,9 @@ var (
correctCreationDate10CreateTable string
//go:embed 10/10_fill_table.sql
correctCreationDate10FillTable string
//go:embed 10/10_update.sql
correctCreationDate10Update string
//go:embed 10/cockroach/10_update.sql
//go:embed 10/postgres/10_update.sql
correctCreationDate10Update embed.FS
//go:embed 10/10_count_wrong_events.sql
correctCreationDate10CountWrongEvents string
//go:embed 10/10_empty_table.sql
@@ -34,7 +35,8 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
ctx, cancel := context.WithTimeout(ctx, mig.FailAfter)
defer cancel()
for {
for i := 0; ; i++ {
logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration")
var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
if mig.dbClient.Type() == "cockroach" {
@@ -46,6 +48,7 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
if err != nil {
return err
}
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table created")
_, err = tx.ExecContext(ctx, correctCreationDate10Truncate)
if err != nil {
@@ -55,19 +58,25 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context) (err error) {
if err != nil {
return err
}
logging.WithFields("mig", mig.String(), "iteration", i).Debug("temp table filled")
res := tx.QueryRowContext(ctx, correctCreationDate10CountWrongEvents)
if err := res.Scan(&affected); err != nil || affected == 0 {
return err
}
_, err = tx.ExecContext(ctx, correctCreationDate10Update)
updateStmt, err := readStmt(correctCreationDate10Update, "10", mig.dbClient.Type(), "10_update.sql")
if err != nil {
return err
}
logging.WithFields("count", affected).Info("creation dates changed")
_, err = tx.ExecContext(ctx, updateStmt)
if err != nil {
return err
}
logging.WithFields("mig", mig.String(), "iteration", i, "count", affected).Debug("creation dates updated")
return nil
})
logging.WithFields("mig", mig.String(), "iteration", i).Debug("end iteration")
if affected == 0 || err != nil {
return err
}

View File

@@ -1,4 +1,4 @@
CREATE temporary TABLE IF NOT EXISTS wrong_events (
CREATE TEMPORARY TABLE IF NOT EXISTS wrong_events (
instance_id TEXT
, event_sequence BIGINT
, current_cd TIMESTAMPTZ

View File

@@ -10,6 +10,8 @@ INSERT INTO wrong_events (
) AS next_cd
FROM
eventstore.events
WHERE
"position" IS NULL
) sub WHERE
current_cd < next_cd
ORDER BY

View File

@@ -1 +0,0 @@
UPDATE eventstore.events e SET creation_date = we.next_cd FROM wrong_events we WHERE e.event_sequence = we.event_sequence and e.instance_id = we.instance_id;

View File

@@ -0,0 +1 @@
UPDATE eventstore.events e SET (creation_date, "position") = (we.next_cd, we.next_cd::DECIMAL) FROM wrong_events we WHERE e.event_sequence = we.event_sequence AND e.instance_id = we.instance_id;

View File

@@ -0,0 +1,10 @@
UPDATE
eventstore.events e
SET
creation_date = we.next_cd
, "position" = (EXTRACT(EPOCH FROM we.next_cd))
FROM
wrong_events we
WHERE
e.event_sequence = we.event_sequence
AND e.instance_id = we.instance_id;

View File

@@ -1,92 +0,0 @@
package setup
import (
"context"
"database/sql"
"embed"
"github.com/cockroachdb/cockroach-go/v2/crdb"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 11/11_add_column.sql
addEventCreatedAt string
//go:embed 11/11_update_events.sql
setCreatedAt string
//go:embed 11/11_set_column.sql
setCreatedAtDetails string
//go:embed 11/postgres/create_index.sql
//go:embed 11/cockroach/create_index.sql
createdAtIndexCreateStmt embed.FS
//go:embed 11/postgres/drop_index.sql
//go:embed 11/cockroach/drop_index.sql
createdAtIndexDropStmt embed.FS
)
type AddEventCreatedAt struct {
BulkAmount int
step10 *CorrectCreationDate
dbClient *database.DB
}
func (mig *AddEventCreatedAt) Execute(ctx context.Context) error {
// execute step 10 again because events created after the first execution of step 10
// could still have the wrong ordering of sequences and creation date
if err := mig.step10.Execute(ctx); err != nil {
return err
}
_, err := mig.dbClient.ExecContext(ctx, addEventCreatedAt)
if err != nil {
return err
}
createIndex, err := readStmt(createdAtIndexCreateStmt, "11", mig.dbClient.Type(), "create_index.sql")
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, createIndex)
if err != nil {
return err
}
for i := 0; ; i++ {
var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
res, err := tx.Exec(setCreatedAt, mig.BulkAmount)
if err != nil {
return err
}
affected, _ = res.RowsAffected()
return nil
})
if err != nil {
return err
}
logging.WithFields("step", "11", "iteration", i, "affected", affected).Info("set created_at iteration done")
if affected < int64(mig.BulkAmount) {
break
}
}
logging.Info("set details")
_, err = mig.dbClient.ExecContext(ctx, setCreatedAtDetails)
if err != nil {
return err
}
dropIndex, err := readStmt(createdAtIndexDropStmt, "11", mig.dbClient.Type(), "drop_index.sql")
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, dropIndex)
return err
}
func (mig *AddEventCreatedAt) String() string {
return "11_event_created_at"
}

View File

@@ -1,6 +0,0 @@
BEGIN;
-- create table with empty created_at
ALTER TABLE eventstore.events ADD COLUMN IF NOT EXISTS created_at TIMESTAMPTZ DEFAULT NULL;
-- set column rules
ALTER TABLE eventstore.events ALTER COLUMN created_at SET DEFAULT clock_timestamp();
COMMIT;

View File

@@ -1,3 +0,0 @@
BEGIN;
ALTER TABLE eventstore.events ALTER COLUMN created_at SET NOT NULL;
COMMIT;

View File

@@ -1,21 +0,0 @@
UPDATE eventstore.events SET
created_at = creation_date
FROM (
SELECT
e.event_sequence as seq
, e.instance_id as i_id
, e.creation_date as cd
FROM
eventstore.events e
WHERE
created_at IS NULL
ORDER BY
event_sequence ASC
, instance_id
LIMIT $1
) AS e
WHERE
e.seq = eventstore.events.event_sequence
AND e.i_id = eventstore.events.instance_id
AND e.cd = eventstore.events.creation_date
;

View File

@@ -1,8 +0,0 @@
CREATE INDEX IF NOT EXISTS ca_fill_idx ON eventstore.events (
event_sequence DESC
, instance_id
) STORING (
id
, creation_date
, created_at
) WHERE created_at IS NULL;

View File

@@ -1 +0,0 @@
DROP INDEX IF EXISTS eventstore.events@ca_fill_idx;

View File

@@ -1,4 +0,0 @@
CREATE INDEX IF NOT EXISTS ca_fill_idx ON eventstore.events (
event_sequence DESC
, instance_id
) WHERE created_at IS NULL;

View File

@@ -1 +0,0 @@
DROP INDEX IF EXISTS eventstore.ca_fill_idx;

72
cmd/setup/14.go Normal file
View File

@@ -0,0 +1,72 @@
package setup
import (
"context"
"database/sql"
"embed"
"errors"
"strings"
"github.com/jackc/pgconn"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 14/cockroach/*.sql
//go:embed 14/postgres/*.sql
newEventsTable embed.FS
)
type NewEventsTable struct {
dbClient *database.DB
}
func (mig *NewEventsTable) Execute(ctx context.Context) error {
migrations, err := newEventsTable.ReadDir("14/" + mig.dbClient.Type())
if err != nil {
return err
}
// if events already exists events2 is created during a setup job
var count int
err = mig.dbClient.QueryRow(
func(row *sql.Row) error {
if err = row.Scan(&count); err != nil {
return err
}
return row.Err()
},
"SELECT count(*) FROM information_schema.tables WHERE table_schema = 'eventstore' AND table_name like 'events2'",
)
if err != nil || count == 1 {
return err
}
for _, migration := range migrations {
stmt, err := readStmt(newEventsTable, "14", mig.dbClient.Type(), migration.Name())
if err != nil {
return err
}
stmt = strings.ReplaceAll(stmt, "{{.username}}", mig.dbClient.Username())
logging.WithFields("migration", mig.String(), "file", migration.Name()).Debug("execute statement")
_, err = mig.dbClient.ExecContext(ctx, stmt)
if err != nil {
return err
}
}
return nil
}
func (mig *NewEventsTable) String() string {
return "14_events_push"
}
func (mig *NewEventsTable) ContinueOnErr(err error) bool {
pgErr := new(pgconn.PgError)
if errors.As(err, &pgErr) {
return pgErr.Code == "42P01"
}
return false
}

View File

@@ -0,0 +1 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@@ -0,0 +1,33 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order,
PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
creation_date::DECIMAL,
event_sequence
FROM eventstore.events_old;

View File

@@ -0,0 +1,7 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN revision SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN created_at SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN creator SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "owner" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "position" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN in_tx_order SET NOT NULL;

View File

@@ -0,0 +1,3 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC) STORING ("position");
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

View File

@@ -0,0 +1 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@@ -0,0 +1,31 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
EXTRACT(EPOCH FROM creation_date),
event_sequence
FROM eventstore.events_old;

View File

@@ -0,0 +1,4 @@
BEGIN;
ALTER TABLE eventstore.events2 DROP CONSTRAINT IF EXISTS events2_pkey;
ALTER TABLE eventstore.events2 ADD PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence");
COMMIT;

View File

@@ -0,0 +1,7 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL,
ALTER COLUMN revision SET NOT NULL,
ALTER COLUMN created_at SET NOT NULL,
ALTER COLUMN creator SET NOT NULL,
ALTER COLUMN "owner" SET NOT NULL,
ALTER COLUMN "position" SET NOT NULL,
ALTER COLUMN in_tx_order SET NOT NULL;

View File

@@ -0,0 +1,3 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC, instance_id);
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

45
cmd/setup/15.go Normal file
View File

@@ -0,0 +1,45 @@
package setup
import (
"context"
"embed"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
)
var (
//go:embed 15/cockroach/*.sql
//go:embed 15/postgres/*.sql
currentProjectionState embed.FS
)
type CurrentProjectionState struct {
dbClient *database.DB
}
func (mig *CurrentProjectionState) Execute(ctx context.Context) error {
migrations, err := currentProjectionState.ReadDir("15/" + mig.dbClient.Type())
if err != nil {
return err
}
for _, migration := range migrations {
stmt, err := readStmt(currentProjectionState, "15", mig.dbClient.Type(), migration.Name())
if err != nil {
return err
}
logging.WithFields("file", migration.Name(), "migration", mig.String()).Info("execute statement")
_, err = mig.dbClient.ExecContext(ctx, stmt)
if err != nil {
return err
}
}
return nil
}
func (mig *CurrentProjectionState) String() string {
return "15_current_projection_state"
}

View File

@@ -0,0 +1,16 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@@ -0,0 +1,29 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,16 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,26 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@@ -0,0 +1,28 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,27 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@@ -0,0 +1,27 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
ON CONFLICT DO NOTHING;

View File

@@ -9,6 +9,8 @@ import (
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/migration"
)
@@ -29,11 +31,14 @@ func Cleanup(config *Config) {
logging.Info("cleanup started")
dbClient, err := database.Connect(config.Database, false)
zitadelDBClient, err := database.Connect(config.Database, false, false)
logging.OnError(err).Fatal("unable to connect to database")
esPusherDBClient, err := database.Connect(config.Database, false, true)
logging.OnError(err).Fatal("unable to connect to database")
es, err := eventstore.Start(&eventstore.Config{Client: dbClient})
logging.OnError(err).Fatal("unable to start eventstore")
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
config.Eventstore.Querier = old_es.NewCRDB(zitadelDBClient)
es := eventstore.NewEventstore(config.Eventstore)
migration.RegisterMappers(es)
step, err := migration.LatestStep(ctx, es)

View File

@@ -15,6 +15,7 @@ import (
"github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/query/projection"
)
@@ -31,6 +32,7 @@ type Config struct {
DefaultInstance command.InstanceSetup
Machine *id.Config
Projections projection.Config
Eventstore *eventstore.Config
}
func MustNewConfig(v *viper.Viper) *Config {
@@ -60,16 +62,15 @@ type Steps struct {
s1ProjectionTable *ProjectionTable
s2AssetsTable *AssetTable
FirstInstance *FirstInstance
s4EventstoreIndexes *EventstoreIndexesNew
s5LastFailed *LastFailed
s6OwnerRemoveColumns *OwnerRemoveColumns
s7LogstoreTables *LogstoreTables
s8AuthTokens *AuthTokenIndexes
s9EventstoreIndexes2 *EventstoreIndexesNew
CorrectCreationDate *CorrectCreationDate
AddEventCreatedAt *AddEventCreatedAt
s12AddOTPColumns *AddOTPColumns
s13FixQuotaProjection *FixQuotaConstraints
s14NewEventsTable *NewEventsTable
s15CurrentStates *CurrentProjectionState
}
type encryptionKeyConfig struct {

View File

@@ -1,29 +0,0 @@
package setup
import (
"context"
"embed"
"github.com/zitadel/zitadel/internal/database"
)
type EventstoreIndexesNew struct {
dbClient *database.DB
name string
step string
fileName string
stmts embed.FS
}
func (mig *EventstoreIndexesNew) Execute(ctx context.Context) error {
stmt, err := readStmt(mig.stmts, mig.step, mig.dbClient.Type(), mig.fileName)
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, stmt)
return err
}
func (mig *EventstoreIndexesNew) String() string {
return mig.name
}

View File

@@ -14,6 +14,8 @@ import (
"github.com/zitadel/zitadel/cmd/tls"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
"github.com/zitadel/zitadel/internal/migration"
"github.com/zitadel/zitadel/internal/query/projection"
)
@@ -62,22 +64,26 @@ func Setup(config *Config, steps *Steps, masterKey string) {
ctx := context.Background()
logging.Info("setup started")
dbClient, err := database.Connect(config.Database, false)
zitadelDBClient, err := database.Connect(config.Database, false, false)
logging.OnError(err).Fatal("unable to connect to database")
esPusherDBClient, err := database.Connect(config.Database, false, true)
logging.OnError(err).Fatal("unable to connect to database")
eventstoreClient, err := eventstore.Start(&eventstore.Config{Client: dbClient})
config.Eventstore.Querier = old_es.NewCRDB(zitadelDBClient)
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
logging.OnError(err).Fatal("unable to start eventstore")
migration.RegisterMappers(eventstoreClient)
steps.s1ProjectionTable = &ProjectionTable{dbClient: dbClient.DB}
steps.s2AssetsTable = &AssetTable{dbClient: dbClient.DB}
steps.s1ProjectionTable = &ProjectionTable{dbClient: zitadelDBClient.DB}
steps.s2AssetsTable = &AssetTable{dbClient: zitadelDBClient.DB}
steps.FirstInstance.instanceSetup = config.DefaultInstance
steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User
steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP
steps.FirstInstance.oidcEncryptionKey = config.EncryptionKeys.OIDC
steps.FirstInstance.masterKey = masterKey
steps.FirstInstance.db = dbClient
steps.FirstInstance.db = zitadelDBClient
steps.FirstInstance.es = eventstoreClient
steps.FirstInstance.defaults = config.SystemDefaults
steps.FirstInstance.zitadelRoles = config.InternalAuthZ.RolePermissionMappings
@@ -85,19 +91,17 @@ func Setup(config *Config, steps *Steps, masterKey string) {
steps.FirstInstance.externalSecure = config.ExternalSecure
steps.FirstInstance.externalPort = config.ExternalPort
steps.s4EventstoreIndexes = New04(dbClient)
steps.s5LastFailed = &LastFailed{dbClient: dbClient.DB}
steps.s6OwnerRemoveColumns = &OwnerRemoveColumns{dbClient: dbClient.DB}
steps.s7LogstoreTables = &LogstoreTables{dbClient: dbClient.DB, username: config.Database.Username(), dbType: config.Database.Type()}
steps.s8AuthTokens = &AuthTokenIndexes{dbClient: dbClient}
steps.s9EventstoreIndexes2 = New09(dbClient)
steps.CorrectCreationDate.dbClient = dbClient
steps.AddEventCreatedAt.dbClient = dbClient
steps.AddEventCreatedAt.step10 = steps.CorrectCreationDate
steps.s12AddOTPColumns = &AddOTPColumns{dbClient: dbClient}
steps.s13FixQuotaProjection = &FixQuotaConstraints{dbClient: dbClient}
steps.s5LastFailed = &LastFailed{dbClient: zitadelDBClient.DB}
steps.s6OwnerRemoveColumns = &OwnerRemoveColumns{dbClient: zitadelDBClient.DB}
steps.s7LogstoreTables = &LogstoreTables{dbClient: zitadelDBClient.DB, username: config.Database.Username(), dbType: config.Database.Type()}
steps.s8AuthTokens = &AuthTokenIndexes{dbClient: zitadelDBClient}
steps.CorrectCreationDate.dbClient = esPusherDBClient
steps.s12AddOTPColumns = &AddOTPColumns{dbClient: zitadelDBClient}
steps.s13FixQuotaProjection = &FixQuotaConstraints{dbClient: zitadelDBClient}
steps.s14NewEventsTable = &NewEventsTable{dbClient: esPusherDBClient}
steps.s15CurrentStates = &CurrentProjectionState{dbClient: zitadelDBClient}
err = projection.Create(ctx, dbClient, eventstoreClient, config.Projections, nil, nil)
err = projection.Create(ctx, zitadelDBClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections")
repeatableSteps := []migration.RepeatableMigration{
@@ -114,32 +118,28 @@ func Setup(config *Config, steps *Steps, masterKey string) {
},
}
err = migration.Migrate(ctx, eventstoreClient, steps.s14NewEventsTable)
logging.WithFields("name", steps.s14NewEventsTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s1ProjectionTable)
logging.OnError(err).Fatal("unable to migrate step 1")
logging.WithFields("name", steps.s1ProjectionTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s2AssetsTable)
logging.OnError(err).Fatal("unable to migrate step 2")
logging.WithFields("name", steps.s2AssetsTable.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.FirstInstance)
logging.OnError(err).Fatal("unable to migrate step 3")
err = migration.Migrate(ctx, eventstoreClient, steps.s4EventstoreIndexes)
logging.OnError(err).Fatal("unable to migrate step 4")
logging.WithFields("name", steps.FirstInstance.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s5LastFailed)
logging.OnError(err).Fatal("unable to migrate step 5")
logging.WithFields("name", steps.s5LastFailed.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s6OwnerRemoveColumns)
logging.OnError(err).Fatal("unable to migrate step 6")
logging.WithFields("name", steps.s6OwnerRemoveColumns.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s7LogstoreTables)
logging.OnError(err).Fatal("unable to migrate step 7")
logging.WithFields("name", steps.s7LogstoreTables.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s8AuthTokens)
logging.OnError(err).Fatal("unable to migrate step 8")
err = migration.Migrate(ctx, eventstoreClient, steps.s9EventstoreIndexes2)
logging.OnError(err).Fatal("unable to migrate step 9")
err = migration.Migrate(ctx, eventstoreClient, steps.CorrectCreationDate)
logging.OnError(err).Fatal("unable to migrate step 10")
err = migration.Migrate(ctx, eventstoreClient, steps.AddEventCreatedAt)
logging.OnError(err).Fatal("unable to migrate step 11")
logging.WithFields("name", steps.s8AuthTokens.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s12AddOTPColumns)
logging.OnError(err).Fatal("unable to migrate step 12")
logging.WithFields("name", steps.s12AddOTPColumns.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s13FixQuotaProjection)
logging.OnError(err).Fatal("unable to migrate step 13")
logging.WithFields("name", steps.s13FixQuotaProjection.String()).OnError(err).Fatal("migration failed")
err = migration.Migrate(ctx, eventstoreClient, steps.s15CurrentStates)
logging.WithFields("name", steps.s15CurrentStates.String()).OnError(err).Fatal("migration failed")
for _, repeatableStep := range repeatableSteps {
err = migration.Migrate(ctx, eventstoreClient, repeatableStep)