chore!: Introduce ZITADEL v3 (#9645)

This PR summarizes multiple changes specifically only available with
ZITADEL v3:

- feat: Web Keys management
(https://github.com/zitadel/zitadel/pull/9526)
- fix(cmd): ensure proper working of mirror
(https://github.com/zitadel/zitadel/pull/9509)
- feat(Authz): system user support for permission check v2
(https://github.com/zitadel/zitadel/pull/9640)
- chore(license): change from Apache to AGPL
(https://github.com/zitadel/zitadel/pull/9597)
- feat(console): list v2 sessions
(https://github.com/zitadel/zitadel/pull/9539)
- fix(console): add loginV2 feature flag
(https://github.com/zitadel/zitadel/pull/9682)
- fix(feature flags): allow reading "own" flags
(https://github.com/zitadel/zitadel/pull/9649)
- feat(console): add Actions V2 UI
(https://github.com/zitadel/zitadel/pull/9591)

BREAKING CHANGE
- feat(webkey): migrate to v2beta API
(https://github.com/zitadel/zitadel/pull/9445)
- chore!: remove CockroachDB Support
(https://github.com/zitadel/zitadel/pull/9444)
- feat(actions): migrate to v2beta API
(https://github.com/zitadel/zitadel/pull/9489)

---------

Co-authored-by: Livio Spring <livio.a@gmail.com>
Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
Co-authored-by: Silvan <27845747+adlerhurst@users.noreply.github.com>
Co-authored-by: Ramon <mail@conblem.me>
Co-authored-by: Elio Bischof <elio@zitadel.com>
Co-authored-by: Kenta Yamaguchi <56732734+KEY60228@users.noreply.github.com>
Co-authored-by: Harsha Reddy <harsha.reddy@klaviyo.com>
Co-authored-by: Livio Spring <livio@zitadel.com>
Co-authored-by: Max Peintner <max@caos.ch>
Co-authored-by: Iraq <66622793+kkrime@users.noreply.github.com>
Co-authored-by: Florian Forster <florian@zitadel.com>
Co-authored-by: Tim Möhlmann <tim+github@zitadel.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Max Peintner <peintnerm@gmail.com>
This commit is contained in:
Fabienne Bühler
2025-04-02 16:53:06 +02:00
committed by GitHub
parent d14a23ae7e
commit 07ce3b6905
559 changed files with 14578 additions and 7622 deletions

View File

@@ -3,7 +3,7 @@ package setup
import (
"context"
"database/sql"
"embed"
_ "embed"
"strings"
"github.com/zitadel/zitadel/internal/eventstore"
@@ -12,31 +12,20 @@ import (
var (
//go:embed 07/logstore.sql
createLogstoreSchema07 string
//go:embed 07/cockroach/access.sql
//go:embed 07/postgres/access.sql
createAccessLogsTable07 embed.FS
//go:embed 07/cockroach/execution.sql
//go:embed 07/postgres/execution.sql
createExecutionLogsTable07 embed.FS
//go:embed 07/access.sql
createAccessLogsTable07 string
//go:embed 07/execution.sql
createExecutionLogsTable07 string
)
type LogstoreTables struct {
dbClient *sql.DB
username string
dbType string
}
func (mig *LogstoreTables) Execute(ctx context.Context, _ eventstore.Event) error {
accessStmt, err := readStmt(createAccessLogsTable07, "07", mig.dbType, "access.sql")
if err != nil {
return err
}
executionStmt, err := readStmt(createExecutionLogsTable07, "07", mig.dbType, "execution.sql")
if err != nil {
return err
}
stmt := strings.ReplaceAll(createLogstoreSchema07, "%[1]s", mig.username) + accessStmt + executionStmt
_, err = mig.dbClient.ExecContext(ctx, stmt)
stmt := strings.ReplaceAll(createLogstoreSchema07, "%[1]s", mig.username) + createAccessLogsTable07 + createExecutionLogsTable07
_, err := mig.dbClient.ExecContext(ctx, stmt)
return err
}

View File

@@ -1,14 +0,0 @@
CREATE TABLE IF NOT EXISTS logstore.access (
log_date TIMESTAMPTZ NOT NULL
, protocol INT NOT NULL
, request_url TEXT NOT NULL
, response_status INT NOT NULL
, request_headers JSONB
, response_headers JSONB
, instance_id TEXT NOT NULL
, project_id TEXT NOT NULL
, requested_domain TEXT
, requested_host TEXT
, INDEX protocol_date_desc (instance_id, protocol, log_date DESC) STORING (request_url, response_status, request_headers)
);

View File

@@ -1,11 +0,0 @@
CREATE TABLE IF NOT EXISTS logstore.execution (
log_date TIMESTAMPTZ NOT NULL
, took INTERVAL
, message TEXT NOT NULL
, loglevel INT NOT NULL
, instance_id TEXT NOT NULL
, action_id TEXT NOT NULL
, metadata JSONB
, INDEX log_date_desc (instance_id, log_date DESC) STORING (took)
);

View File

@@ -2,16 +2,15 @@ package setup
import (
"context"
"embed"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 08/cockroach/08.sql
//go:embed 08/postgres/08.sql
tokenIndexes08 embed.FS
//go:embed 08/08.sql
tokenIndexes08 string
)
type AuthTokenIndexes struct {
@@ -19,11 +18,7 @@ type AuthTokenIndexes struct {
}
func (mig *AuthTokenIndexes) Execute(ctx context.Context, _ eventstore.Event) error {
stmt, err := readStmt(tokenIndexes08, "08", mig.dbClient.Type(), "08.sql")
if err != nil {
return err
}
_, err = mig.dbClient.ExecContext(ctx, stmt)
_, err := mig.dbClient.ExecContext(ctx, tokenIndexes08)
return err
}

View File

@@ -1,5 +0,0 @@
CREATE INDEX IF NOT EXISTS inst_refresh_tkn_idx ON auth.tokens(instance_id, refresh_token_id);
CREATE INDEX IF NOT EXISTS inst_app_tkn_idx ON auth.tokens(instance_id, application_id);
CREATE INDEX IF NOT EXISTS inst_ro_tkn_idx ON auth.tokens(instance_id, resource_owner);
DROP INDEX IF EXISTS auth.tokens@user_user_agent_idx;
CREATE INDEX IF NOT EXISTS inst_usr_agnt_tkn_idx ON auth.tokens(instance_id, user_id, user_agent_id);

View File

@@ -3,7 +3,7 @@ package setup
import (
"context"
"database/sql"
"embed"
_ "embed"
"time"
"github.com/cockroachdb/cockroach-go/v2/crdb"
@@ -18,9 +18,8 @@ var (
correctCreationDate10CreateTable string
//go:embed 10/10_fill_table.sql
correctCreationDate10FillTable string
//go:embed 10/cockroach/10_update.sql
//go:embed 10/postgres/10_update.sql
correctCreationDate10Update embed.FS
//go:embed 10/10_update.sql
correctCreationDate10Update string
//go:embed 10/10_count_wrong_events.sql
correctCreationDate10CountWrongEvents string
//go:embed 10/10_empty_table.sql
@@ -40,11 +39,6 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context, _ eventstore.Event)
logging.WithFields("mig", mig.String(), "iteration", i).Debug("start iteration")
var affected int64
err = crdb.ExecuteTx(ctx, mig.dbClient.DB, nil, func(tx *sql.Tx) error {
if mig.dbClient.Type() == "cockroach" {
if _, err := tx.Exec("SET experimental_enable_temp_tables=on"); err != nil {
return err
}
}
_, err := tx.ExecContext(ctx, correctCreationDate10CreateTable)
if err != nil {
return err
@@ -66,11 +60,7 @@ func (mig *CorrectCreationDate) Execute(ctx context.Context, _ eventstore.Event)
return err
}
updateStmt, err := readStmt(correctCreationDate10Update, "10", mig.dbClient.Type(), "10_update.sql")
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, updateStmt)
_, err = tx.ExecContext(ctx, correctCreationDate10Update)
if err != nil {
return err
}

View File

@@ -1 +0,0 @@
UPDATE eventstore.events e SET (creation_date, "position") = (we.next_cd, we.next_cd::DECIMAL) FROM wrong_events we WHERE e.event_sequence = we.event_sequence AND e.instance_id = we.instance_id;

View File

@@ -15,8 +15,7 @@ import (
)
var (
//go:embed 14/cockroach/*.sql
//go:embed 14/postgres/*.sql
//go:embed 14/*.sql
newEventsTable embed.FS
)
@@ -40,7 +39,7 @@ func (mig *NewEventsTable) Execute(ctx context.Context, _ eventstore.Event) erro
return err
}
statements, err := readStatements(newEventsTable, "14", mig.dbClient.Type())
statements, err := readStatements(newEventsTable, "14")
if err != nil {
return err
}

View File

@@ -1,33 +0,0 @@
CREATE TABLE eventstore.events2 (
instance_id,
aggregate_type,
aggregate_id,
event_type,
"sequence",
revision,
created_at,
payload,
creator,
"owner",
"position",
in_tx_order,
PRIMARY KEY (instance_id, aggregate_type, aggregate_id, "sequence")
) AS SELECT
instance_id,
aggregate_type,
aggregate_id,
event_type,
event_sequence,
substr(aggregate_version, 2)::SMALLINT,
creation_date,
event_data,
editor_user,
resource_owner,
creation_date::DECIMAL,
event_sequence
FROM eventstore.events_old;

View File

@@ -1,7 +0,0 @@
ALTER TABLE eventstore.events2 ALTER COLUMN event_type SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN revision SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN created_at SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN creator SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "owner" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN "position" SET NOT NULL;
ALTER TABLE eventstore.events2 ALTER COLUMN in_tx_order SET NOT NULL;

View File

@@ -1,3 +0,0 @@
CREATE INDEX IF NOT EXISTS es_active_instances ON eventstore.events2 (created_at DESC) STORING ("position");
CREATE INDEX IF NOT EXISTS es_wm ON eventstore.events2 (aggregate_id, instance_id, aggregate_type, event_type);
CREATE INDEX IF NOT EXISTS es_projection ON eventstore.events2 (instance_id, aggregate_type, event_type, "position");

View File

@@ -1 +0,0 @@
ALTER TABLE eventstore.events RENAME TO events_old;

View File

@@ -11,8 +11,7 @@ import (
)
var (
//go:embed 15/cockroach/*.sql
//go:embed 15/postgres/*.sql
//go:embed 15/*.sql
currentProjectionState embed.FS
)
@@ -21,7 +20,7 @@ type CurrentProjectionState struct {
}
func (mig *CurrentProjectionState) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(currentProjectionState, "15", mig.dbClient.Type())
statements, err := readStatements(currentProjectionState, "15")
if err != nil {
return err
}

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.projection_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.error
, fe.last_failed
FROM
projections.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,29 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.projection_name
, cs.instance_id
, e.created_at
, e.position
, cs.timestamp
FROM
projections.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.aggregate_type = cs.aggregate_type
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
projections.current_sequences cs2
WHERE
cs.projection_name = cs2.projection_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,28 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
adminapi.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
adminapi.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,28 +0,0 @@
INSERT INTO projections.current_states (
projection_name
, instance_id
, event_date
, "position"
, last_updated
) (SELECT
cs.view_name
, cs.instance_id
, e.created_at
, e.position
, cs.last_successful_spooler_run
FROM
auth.current_sequences cs
JOIN eventstore.events2 e ON
e.instance_id = cs.instance_id
AND e.sequence = cs.current_sequence
AND cs.current_sequence = (
SELECT
MAX(cs2.current_sequence)
FROM
auth.current_sequences cs2
WHERE
cs.view_name = cs2.view_name
AND cs.instance_id = cs2.instance_id
)
)
ON CONFLICT DO NOTHING;

View File

@@ -1,16 +0,0 @@
CREATE TABLE IF NOT EXISTS projections.failed_events2 (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, aggregate_type TEXT NOT NULL
, aggregate_id TEXT NOT NULL
, event_creation_date TIMESTAMPTZ NOT NULL
, failed_sequence INT8 NOT NULL
, failure_count INT2 NULL DEFAULT 0
, error TEXT
, last_failed TIMESTAMPTZ
, PRIMARY KEY (projection_name, instance_id, aggregate_type, aggregate_id, failed_sequence)
);
CREATE INDEX IF NOT EXISTS fe2_instance_id_idx on projections.failed_events2 (instance_id);

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
adminapi.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,26 +0,0 @@
INSERT INTO projections.failed_events2 (
projection_name
, instance_id
, aggregate_type
, aggregate_id
, event_creation_date
, failed_sequence
, failure_count
, error
, last_failed
) SELECT
fe.view_name
, fe.instance_id
, e.aggregate_type
, e.aggregate_id
, e.created_at
, e.sequence
, fe.failure_count
, fe.err_msg
, fe.last_failed
FROM
auth.failed_events fe
JOIN eventstore.events2 e ON
e.instance_id = fe.instance_id
AND e.sequence = fe.failed_sequence
ON CONFLICT DO NOTHING;

View File

@@ -1,15 +0,0 @@
CREATE TABLE IF NOT EXISTS projections.current_states (
projection_name TEXT NOT NULL
, instance_id TEXT NOT NULL
, last_updated TIMESTAMPTZ
, aggregate_id TEXT
, aggregate_type TEXT
, "sequence" INT8
, event_date TIMESTAMPTZ
, "position" DECIMAL
, PRIMARY KEY (projection_name, instance_id)
);
CREATE INDEX IF NOT EXISTS cs_instance_id_idx ON projections.current_states (instance_id);

View File

@@ -3,17 +3,14 @@ package setup
import (
"context"
_ "embed"
"fmt"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 34/cockroach/34_cache_schema.sql
addCacheSchemaCockroach string
//go:embed 34/postgres/34_cache_schema.sql
addCacheSchemaPostgres string
//go:embed 34/34_cache_schema.sql
addCacheSchema string
)
type AddCacheSchema struct {
@@ -21,14 +18,7 @@ type AddCacheSchema struct {
}
func (mig *AddCacheSchema) Execute(ctx context.Context, _ eventstore.Event) (err error) {
switch mig.dbClient.Type() {
case "cockroach":
_, err = mig.dbClient.ExecContext(ctx, addCacheSchemaCockroach)
case "postgres":
_, err = mig.dbClient.ExecContext(ctx, addCacheSchemaPostgres)
default:
err = fmt.Errorf("add cache schema: unsupported db type %q", mig.dbClient.Type())
}
_, err = mig.dbClient.ExecContext(ctx, addCacheSchema)
return err
}

View File

@@ -1,27 +0,0 @@
create schema if not exists cache;
create table if not exists cache.objects (
cache_name varchar not null,
id uuid not null default gen_random_uuid(),
created_at timestamptz not null default now(),
last_used_at timestamptz not null default now(),
payload jsonb not null,
primary key(cache_name, id)
);
create table if not exists cache.string_keys(
cache_name varchar not null check (cache_name <> ''),
index_id integer not null check (index_id > 0),
index_key varchar not null check (index_key <> ''),
object_id uuid not null,
primary key (cache_name, index_id, index_key),
constraint fk_object
foreign key(cache_name, object_id)
references cache.objects(cache_name, id)
on delete cascade
);
create index if not exists string_keys_object_id_idx
on cache.string_keys (cache_name, object_id); -- for delete cascade

View File

@@ -21,7 +21,7 @@ type AddPositionToIndexEsWm struct {
}
func (mig *AddPositionToIndexEsWm) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(addPositionToEsWmIndex, "35", "")
statements, err := readStatements(addPositionToEsWmIndex, "35")
if err != nil {
return err
}

View File

@@ -24,8 +24,7 @@ const (
)
var (
//go:embed 40/cockroach/*.sql
//go:embed 40/postgres/*.sql
//go:embed 40/*.sql
initPushFunc embed.FS
)
@@ -112,5 +111,5 @@ func (mig *InitPushFunc) inTxOrderType(ctx context.Context) (typeName string, er
}
func (mig *InitPushFunc) filePath(fileName string) string {
return path.Join("40", mig.dbClient.Type(), fileName)
return path.Join("40", fileName)
}

View File

@@ -1,10 +0,0 @@
CREATE TYPE IF NOT EXISTS eventstore.command AS (
instance_id TEXT
, aggregate_type TEXT
, aggregate_id TEXT
, command_type TEXT
, revision INT2
, payload JSONB
, creator TEXT
, owner TEXT
);

View File

@@ -1,137 +0,0 @@
CREATE OR REPLACE FUNCTION eventstore.latest_aggregate_state(
instance_id TEXT
, aggregate_type TEXT
, aggregate_id TEXT
, sequence OUT BIGINT
, owner OUT TEXT
)
LANGUAGE 'plpgsql'
AS $$
BEGIN
SELECT
COALESCE(e.sequence, 0) AS sequence
, e.owner
INTO
sequence
, owner
FROM
eventstore.events2 e
WHERE
e.instance_id = $1
AND e.aggregate_type = $2
AND e.aggregate_id = $3
ORDER BY
e.sequence DESC
LIMIT 1;
RETURN;
END;
$$;
CREATE OR REPLACE FUNCTION eventstore.commands_to_events2(commands eventstore.command[])
RETURNS eventstore.events2[]
LANGUAGE 'plpgsql'
AS $$
DECLARE
current_sequence BIGINT;
current_owner TEXT;
instance_id TEXT;
aggregate_type TEXT;
aggregate_id TEXT;
_events eventstore.events2[];
_aggregates CURSOR FOR
select
DISTINCT ("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
FROM
UNNEST(commands) AS c;
BEGIN
OPEN _aggregates;
LOOP
FETCH NEXT IN _aggregates INTO instance_id, aggregate_type, aggregate_id;
-- crdb does not support EXIT WHEN NOT FOUND
EXIT WHEN instance_id IS NULL;
SELECT
*
INTO
current_sequence
, current_owner
FROM eventstore.latest_aggregate_state(
instance_id
, aggregate_type
, aggregate_id
);
-- RETURN QUERY is not supported by crdb: https://github.com/cockroachdb/cockroach/issues/105240
SELECT
ARRAY_CAT(_events, ARRAY_AGG(e))
INTO
_events
FROM (
SELECT
("c").instance_id
, ("c").aggregate_type
, ("c").aggregate_id
, ("c").command_type -- AS event_type
, COALESCE(current_sequence, 0) + ROW_NUMBER() OVER () -- AS sequence
, ("c").revision
, NOW() -- AS created_at
, ("c").payload
, ("c").creator
, COALESCE(current_owner, ("c").owner) -- AS owner
, cluster_logical_timestamp() -- AS position
, ordinality::{{ .InTxOrderType }} -- AS in_tx_order
FROM
UNNEST(commands) WITH ORDINALITY AS c
WHERE
("c").instance_id = instance_id
AND ("c").aggregate_type = aggregate_type
AND ("c").aggregate_id = aggregate_id
) AS e;
END LOOP;
CLOSE _aggregates;
RETURN _events;
END;
$$;
CREATE OR REPLACE FUNCTION eventstore.push(commands eventstore.command[]) RETURNS SETOF eventstore.events2 AS $$
INSERT INTO eventstore.events2
SELECT
("e").instance_id
, ("e").aggregate_type
, ("e").aggregate_id
, ("e").event_type
, ("e").sequence
, ("e").revision
, ("e").created_at
, ("e").payload
, ("e").creator
, ("e").owner
, ("e")."position"
, ("e").in_tx_order
FROM
UNNEST(eventstore.commands_to_events2(commands)) e
ORDER BY
in_tx_order
RETURNING *
$$ LANGUAGE SQL;
/*
select (c).* from UNNEST(eventstore.commands_to_events2(
ARRAY[
ROW('', 'system', 'SYSTEM', 'ct1', 1, '{"key": "value"}', 'c1', 'SYSTEM')
, ROW('', 'system', 'SYSTEM', 'ct2', 1, '{"key": "value"}', 'c1', 'SYSTEM')
, ROW('289525561255060732', 'org', '289575074711790844', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('289525561255060732', 'user', '289575075164906748', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('289525561255060732', 'oidc_session', 'V2_289575178579535100', 'ct3', 1, '{"key": "value"}', 'c1', '289575074711790844')
, ROW('', 'system', 'SYSTEM', 'ct3', 1, '{"key": "value"}', 'c1', 'SYSTEM')
]::eventstore.command[]
) )c;
*/

View File

@@ -1,5 +0,0 @@
SELECT data_type
FROM information_schema.columns
WHERE table_schema = 'eventstore'
AND table_name = 'events2'
AND column_name = 'in_tx_order';

View File

@@ -12,8 +12,7 @@ import (
)
var (
//go:embed 43/cockroach/*.sql
//go:embed 43/postgres/*.sql
//go:embed 43/*.sql
createFieldsDomainIndex embed.FS
)
@@ -22,7 +21,7 @@ type CreateFieldsDomainIndex struct {
}
func (mig *CreateFieldsDomainIndex) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(createFieldsDomainIndex, "43", mig.dbClient.Type())
statements, err := readStatements(createFieldsDomainIndex, "43")
if err != nil {
return err
}

View File

@@ -1,3 +0,0 @@
CREATE INDEX CONCURRENTLY IF NOT EXISTS fields_instance_domains_idx
ON eventstore.fields (object_id)
WHERE object_type = 'instance_domain' AND field_name = 'domain';

View File

@@ -21,7 +21,7 @@ type ReplaceCurrentSequencesIndex struct {
}
func (mig *ReplaceCurrentSequencesIndex) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(replaceCurrentSequencesIndex, "44", "")
statements, err := readStatements(replaceCurrentSequencesIndex, "44")
if err != nil {
return err
}

View File

@@ -21,7 +21,7 @@ var (
)
func (mig *InitPermissionFunctions) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(permissionFunctions, "46", "")
statements, err := readStatements(permissionFunctions, "46")
if err != nil {
return err
}

View File

@@ -21,7 +21,7 @@ var (
)
func (mig *InitPermittedOrgsFunction) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(permittedOrgsFunction, "49", "")
statements, err := readStatements(permittedOrgsFunction, "49")
if err != nil {
return err
}

37
cmd/setup/53.go Normal file
View File

@@ -0,0 +1,37 @@
package setup
import (
"context"
"embed"
"fmt"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
type InitPermittedOrgsFunction53 struct {
dbClient *database.DB
}
//go:embed 53/*.sql
var permittedOrgsFunction53 embed.FS
func (mig *InitPermittedOrgsFunction53) Execute(ctx context.Context, _ eventstore.Event) error {
statements, err := readStatements(permittedOrgsFunction53, "53")
if err != nil {
return err
}
for _, stmt := range statements {
logging.WithFields("file", stmt.file, "migration", mig.String()).Info("execute statement")
if _, err := mig.dbClient.ExecContext(ctx, stmt.query); err != nil {
return fmt.Errorf("%s %s: %w", mig.String(), stmt.file, err)
}
}
return nil
}
func (*InitPermittedOrgsFunction53) String() string {
return "53_init_permitted_orgs_function"
}

View File

@@ -0,0 +1,43 @@
DROP FUNCTION IF EXISTS eventstore.get_system_permissions;
CREATE OR REPLACE FUNCTION eventstore.get_system_permissions(
permissions_json JSONB
/*
[
{
"member_type": "System",
"aggregate_id": "",
"object_id": "",
"permissions": ["iam.read", "iam.write", "iam.polic.read"]
},
{
"member_type": "IAM",
"aggregate_id": "310716990375453665",
"object_id": "",
"permissions": ["iam.read", "iam.write", "iam.polic.read"]
}
]
*/
, permm TEXT
)
RETURNS TABLE (
member_type TEXT,
aggregate_id TEXT,
object_id TEXT
)
LANGUAGE 'plpgsql'
AS $$
BEGIN
RETURN QUERY
SELECT res.member_type, res.aggregate_id, res.object_id FROM (
SELECT
(perm)->>'member_type' AS member_type,
(perm)->>'aggregate_id' AS aggregate_id,
(perm)->>'object_id' AS object_id,
permission
FROM jsonb_array_elements(permissions_json) AS perm
CROSS JOIN jsonb_array_elements_text(perm->'permissions') AS permission) AS res
WHERE res. permission= permm;
END;
$$;

View File

@@ -0,0 +1,144 @@
DROP FUNCTION IF EXISTS eventstore.check_system_user_perms;
CREATE OR REPLACE FUNCTION eventstore.check_system_user_perms(
system_user_perms JSONB
, perm TEXT
, filter_orgs TEXT
, org_ids OUT TEXT[]
)
LANGUAGE 'plpgsql'
AS $$
BEGIN
WITH found_permissions(member_type, aggregate_id, object_id ) AS (
SELECT * FROM eventstore.get_system_permissions(
system_user_perms,
perm)
)
SELECT array_agg(DISTINCT o.org_id) INTO org_ids
FROM eventstore.instance_orgs o, found_permissions
WHERE
CASE WHEN (SELECT TRUE WHERE found_permissions.member_type = 'System' LIMIT 1) THEN
TRUE
WHEN (SELECT TRUE WHERE found_permissions.member_type = 'IAM' LIMIT 1) THEN
-- aggregate_id not present
CASE WHEN (SELECT TRUE WHERE '' = ANY (
(
SELECT array_agg(found_permissions.aggregate_id)
FROM found_permissions
WHERE member_type = 'IAM'
GROUP BY member_type
LIMIT 1
)::TEXT[])) THEN
TRUE
-- aggregate_id is present
ELSE
o.instance_id = ANY (
(
SELECT array_agg(found_permissions.aggregate_id)
FROM found_permissions
WHERE member_type = 'IAM'
GROUP BY member_type
LIMIT 1
)::TEXT[])
END
WHEN (SELECT TRUE WHERE found_permissions.member_type = 'Organization' LIMIT 1) THEN
-- aggregate_id not present
CASE WHEN (SELECT TRUE WHERE '' = ANY (
(
SELECT array_agg(found_permissions.aggregate_id)
FROM found_permissions
WHERE member_type = 'Organization'
GROUP BY member_type
LIMIT 1
)::TEXT[])) THEN
TRUE
-- aggregate_id is present
ELSE
o.org_id = ANY (
(
SELECT array_agg(found_permissions.aggregate_id)
FROM found_permissions
WHERE member_type = 'Organization'
GROUP BY member_type
LIMIT 1
)::TEXT[])
END
END
AND
CASE WHEN filter_orgs != ''
THEN o.org_id IN (filter_orgs)
ELSE TRUE END
LIMIT 1;
END;
$$;
DROP FUNCTION IF EXISTS eventstore.permitted_orgs;
CREATE OR REPLACE FUNCTION eventstore.permitted_orgs(
instanceId TEXT
, userId TEXT
, system_user_perms JSONB
, perm TEXT
, filter_orgs TEXT
, org_ids OUT TEXT[]
)
LANGUAGE 'plpgsql'
AS $$
BEGIN
-- if system user
IF system_user_perms IS NOT NULL THEN
org_ids := eventstore.check_system_user_perms(system_user_perms, perm, filter_orgs);
-- if human/machine user
ELSE
DECLARE
matched_roles TEXT[]; -- roles containing permission
BEGIN
SELECT array_agg(rp.role) INTO matched_roles
FROM eventstore.role_permissions rp
WHERE rp.instance_id = instanceId
AND rp.permission = perm;
-- First try if the permission was granted thru an instance-level role
DECLARE
has_instance_permission bool;
BEGIN
SELECT true INTO has_instance_permission
FROM eventstore.instance_members im
WHERE im.role = ANY(matched_roles)
AND im.instance_id = instanceId
AND im.user_id = userId
LIMIT 1;
IF has_instance_permission THEN
-- Return all organizations or only those in filter_orgs
SELECT array_agg(o.org_id) INTO org_ids
FROM eventstore.instance_orgs o
WHERE o.instance_id = instanceId
AND CASE WHEN filter_orgs != ''
THEN o.org_id IN (filter_orgs)
ELSE TRUE END;
RETURN;
END IF;
END;
-- Return the organizations where permission were granted thru org-level roles
SELECT array_agg(sub.org_id) INTO org_ids
FROM (
SELECT DISTINCT om.org_id
FROM eventstore.org_members om
WHERE om.role = ANY(matched_roles)
AND om.instance_id = instanceID
AND om.user_id = userId
) AS sub;
END;
END IF;
END;
$$;

View File

@@ -35,7 +35,7 @@ func Cleanup(config *Config) {
logging.OnError(err).Fatal("unable to connect to database")
config.Eventstore.Pusher = new_es.NewEventstore(dbClient)
config.Eventstore.Querier = old_es.NewCRDB(dbClient)
config.Eventstore.Querier = old_es.NewPostgres(dbClient)
es := eventstore.NewEventstore(config.Eventstore)
step, err := migration.LastStuckStep(ctx, es)

View File

@@ -12,7 +12,7 @@ import (
"github.com/zitadel/zitadel/cmd/encryption"
"github.com/zitadel/zitadel/cmd/hooks"
"github.com/zitadel/zitadel/internal/actions"
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/oidc"
"github.com/zitadel/zitadel/internal/api/ui/login"
"github.com/zitadel/zitadel/internal/cache/connector"
@@ -22,6 +22,7 @@ import (
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/execution"
"github.com/zitadel/zitadel/internal/id"
"github.com/zitadel/zitadel/internal/notification/handlers"
"github.com/zitadel/zitadel/internal/query/projection"
@@ -34,7 +35,8 @@ type Config struct {
Database database.Config
Caches *connector.CachesConfig
SystemDefaults systemdefaults.SystemDefaults
InternalAuthZ internal_authz.Config
InternalAuthZ authz.Config
SystemAuthZ authz.Config
ExternalDomain string
ExternalPort uint16
ExternalSecure bool
@@ -45,6 +47,7 @@ type Config struct {
Machine *id.Config
Projections projection.Config
Notifications handlers.WorkerConfig
Executions execution.WorkerConfig
Eventstore *eventstore.Config
InitProjections InitProjections
@@ -53,7 +56,7 @@ type Config struct {
Login login.Config
WebAuthNName string
Telemetry *handlers.TelemetryPusherConfig
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
SystemAPIUsers map[string]*authz.SystemAPIUser
}
type InitProjections struct {
@@ -68,12 +71,12 @@ func MustNewConfig(v *viper.Viper) *Config {
err := v.Unmarshal(config,
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
hooks.SliceTypeStringDecode[authz.RoleMapping],
hooks.MapTypeStringDecode[string, *authz.SystemAPIUser],
hooks.MapHTTPHeaderStringDecode,
database.DecodeHook,
database.DecodeHook(false),
actions.HTTPConfigDecodeHook,
hook.EnumHookFunc(internal_authz.MemberTypeString),
hook.EnumHookFunc(authz.MemberTypeString),
hook.Base64ToBytesHookFunc(),
hook.TagToLanguageHookFunc(),
mapstructure.StringToTimeDurationHookFunc(),
@@ -146,6 +149,7 @@ type Steps struct {
s50IDPTemplate6UsePKCE *IDPTemplate6UsePKCE
s51IDPTemplate6RootCA *IDPTemplate6RootCA
s52IDPTemplate6LDAP2 *IDPTemplate6LDAP2
s53InitPermittedOrgsFunction *InitPermittedOrgsFunction53
}
func MustNewSteps(v *viper.Viper) *Steps {

View File

@@ -13,9 +13,6 @@ type RiverMigrateRepeatable struct {
}
func (mig *RiverMigrateRepeatable) Execute(ctx context.Context, _ eventstore.Event) error {
if mig.client.Type() != "postgres" {
return nil
}
return queue.NewMigrator(mig.client).Execute(ctx)
}

View File

@@ -55,7 +55,7 @@ func New() *cobra.Command {
Short: "setup ZITADEL instance",
Long: `sets up data to start ZITADEL.
Requirements:
- cockroachdb`,
- postgreSQL`,
Run: func(cmd *cobra.Command, args []string) {
err := tls.ModeFromFlag(cmd)
logging.OnError(err).Fatal("invalid tlsMode")
@@ -107,7 +107,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
dbClient, err := database.Connect(config.Database, false)
logging.OnError(err).Fatal("unable to connect to database")
config.Eventstore.Querier = old_es.NewCRDB(dbClient)
config.Eventstore.Querier = old_es.NewPostgres(dbClient)
esV3 := new_es.NewEventstore(dbClient)
config.Eventstore.Pusher = esV3
config.Eventstore.Searcher = esV3
@@ -137,7 +137,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s5LastFailed = &LastFailed{dbClient: dbClient.DB}
steps.s6OwnerRemoveColumns = &OwnerRemoveColumns{dbClient: dbClient.DB}
steps.s7LogstoreTables = &LogstoreTables{dbClient: dbClient.DB, username: config.Database.Username(), dbType: config.Database.Type()}
steps.s7LogstoreTables = &LogstoreTables{dbClient: dbClient.DB, username: config.Database.Username()}
steps.s8AuthTokens = &AuthTokenIndexes{dbClient: dbClient}
steps.CorrectCreationDate.dbClient = dbClient
steps.s12AddOTPColumns = &AddOTPColumns{dbClient: dbClient}
@@ -179,6 +179,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s50IDPTemplate6UsePKCE = &IDPTemplate6UsePKCE{dbClient: dbClient}
steps.s51IDPTemplate6RootCA = &IDPTemplate6RootCA{dbClient: dbClient}
steps.s52IDPTemplate6LDAP2 = &IDPTemplate6LDAP2{dbClient: dbClient}
steps.s53InitPermittedOrgsFunction = &InitPermittedOrgsFunction53{dbClient: dbClient}
err = projection.Create(ctx, dbClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections")
@@ -220,6 +221,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s50IDPTemplate6UsePKCE,
steps.s51IDPTemplate6RootCA,
steps.s52IDPTemplate6LDAP2,
steps.s53InitPermittedOrgsFunction,
} {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
}
@@ -308,8 +310,8 @@ func mustExecuteMigration(ctx context.Context, eventstoreClient *eventstore.Even
// under the folder/typ/filename path.
// Typ describes the database dialect and may be omitted if no
// dialect specific migration is specified.
func readStmt(fs embed.FS, folder, typ, filename string) (string, error) {
stmt, err := fs.ReadFile(path.Join(folder, typ, filename))
func readStmt(fs embed.FS, folder, filename string) (string, error) {
stmt, err := fs.ReadFile(path.Join(folder, filename))
return string(stmt), err
}
@@ -322,16 +324,15 @@ type statement struct {
// under the folder/type path.
// Typ describes the database dialect and may be omitted if no
// dialect specific migration is specified.
func readStatements(fs embed.FS, folder, typ string) ([]statement, error) {
basePath := path.Join(folder, typ)
dir, err := fs.ReadDir(basePath)
func readStatements(fs embed.FS, folder string) ([]statement, error) {
dir, err := fs.ReadDir(folder)
if err != nil {
return nil, err
}
statements := make([]statement, len(dir))
for i, file := range dir {
statements[i].file = file.Name()
statements[i].query, err = readStmt(fs, folder, typ, file.Name())
statements[i].query, err = readStmt(fs, folder, file.Name())
if err != nil {
return nil, err
}
@@ -412,7 +413,7 @@ func startCommandsQueries(
sessionTokenVerifier,
func(q *query.Queries) domain.PermissionCheck {
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
},
0, // not needed for projections
@@ -437,7 +438,7 @@ func startCommandsQueries(
authZRepo, err := authz.Start(queries, eventstoreClient, dbClient, keys.OIDC, config.ExternalSecure)
logging.OnError(err).Fatal("unable to start authz repo")
permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
return internal_authz.CheckPermission(ctx, authZRepo, config.SystemAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
commands, err := command.StartCommands(ctx,
@@ -472,9 +473,6 @@ func startCommandsQueries(
)
logging.OnError(err).Fatal("unable to start commands")
if !config.Notifications.LegacyEnabled && dbClient.Type() == "cockroach" {
logging.Fatal("notifications must be set to LegacyEnabled=true when using CockroachDB")
}
q, err := queue.NewQueue(&queue.Config{
Client: dbClient,
})
@@ -501,7 +499,6 @@ func startCommandsQueries(
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
dbClient,
q,
)