From 571015703ea0da0a37f5ad6375de24aada3b015f Mon Sep 17 00:00:00 2001 From: Silvan Date: Wed, 29 May 2024 09:34:37 +0200 Subject: [PATCH 1/6] docs: add office hours (#7999) --- MEETING_SCHEDULE.md | 29 +++++++++++++++++++++++++++++ README.md | 3 +++ docs/docs/guides/overview.mdx | 4 ++++ 3 files changed, 36 insertions(+) create mode 100644 MEETING_SCHEDULE.md diff --git a/MEETING_SCHEDULE.md b/MEETING_SCHEDULE.md new file mode 100644 index 0000000000..04dd4ff849 --- /dev/null +++ b/MEETING_SCHEDULE.md @@ -0,0 +1,29 @@ +# ZITADEL Office Hours + +Dear community! +We're excited to announce bi-weekly office hours. + +## #1 Dive Deep into Actions v2 + +The first office hour is dedicated to exploring the [new Actions v2 feature](https://zitadel.com/docs/concepts/features/actions_v2). + +What to expect: + +* **Deep Dive**: @adlerhurst will walk you through the functionalities and benefits of Actions v2. +* **Live Q&A**: Get your questions answered directly by the ZITADEL team during the dedicated Q&A session. + +Details: + +* **Target Audience**: Developers and IT Ops personnel using ZITADEL +* **Topic**: Actions v2 deep dive and Q&A +* **When**: Wednesday 29th of May 12 pm PST / 3 pm EST / 9 pm CEST +* **Duration**: about 1 hour +* **Platform**: Zitadel Discord Server (Join us here: https://zitadel.com/office-hours?event=1243282884677341275 ) + +In the meantime: + +Feel free to share any questions you already have about Actions v2 in the chat of the [office hours channel](https://zitadel.com/office-hours) on our Discord server. + +We look forward to seeing you there! + +P.S. Spread the word! Share this announcement with your fellow ZITADEL users who might be interested. \ No newline at end of file diff --git a/README.md b/README.md index 449ecb3589..1840218eb7 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,9 @@

+|Community Meeting| +|------------------| +|ZITADEL holds bi-weekly community calls. To join the community calls or to watch previous meeting notes and recordings, please visit the [meeting schedule](https://github.com/zitadel/zitadel/blob/main/MEETING_SCHEDULE.md).| Are you searching for a user management tool that is quickly set up like Auth0 and open source like Keycloak? diff --git a/docs/docs/guides/overview.mdx b/docs/docs/guides/overview.mdx index 3a42374e82..d01a4a6903 100644 --- a/docs/docs/guides/overview.mdx +++ b/docs/docs/guides/overview.mdx @@ -39,6 +39,10 @@ Join our [Discord chat](https://zitadel.com/chat) or open a [discussion](https:/ Cloud and enterprise customers can additionally reach us privately via our [support communication channels](/legal/service-description/support-services). +## Office Hours + +ZITADEL holds bi-weekly community calls. To join the community calls use [this link](https://zitadel.com/office-hours), or find further information [here](https://github.com/zitadel/zitadel/blob/main/MEETING_SCHEDULE.md). + ## Contribute ZITADEL is open source — and so is the documentation. From 3f77b49a413053f0f89e5ad226fec27f4e32ebad Mon Sep 17 00:00:00 2001 From: mffap Date: Wed, 29 May 2024 11:14:27 +0200 Subject: [PATCH 2/6] docs(service users): improve client id and client secret docs (#7990) # Which Problems Are Solved - Misaligned heading - Curl with auth header is hard to understand without the link # How the Problems Are Solved Instead of explaining how to create a proper encoding, just use --user flag for client_id and client_secret --- .../guides/integrate/service-users/client-credentials.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/docs/guides/integrate/service-users/client-credentials.md b/docs/docs/guides/integrate/service-users/client-credentials.md index 8d2caf6e83..7924f2b6ae 100644 --- a/docs/docs/guides/integrate/service-users/client-credentials.md +++ b/docs/docs/guides/integrate/service-users/client-credentials.md @@ -26,7 +26,7 @@ If you lose it, you will have to generate a new one. ![Create new service user](/img/console_serviceusers_secret.gif) -## 2. Authenticating a service user and request a token +### 2. Authenticating a service user and request a token In this step, we will authenticate a service user and receive an access_token to use against the ZITADEL API. @@ -36,13 +36,15 @@ You will need to craft a POST request to ZITADEL's token endpoint: curl --request POST \ --url https://$CUSTOM-DOMAIN/oauth/v2/token \ --header 'Content-Type: application/x-www-form-urlencoded' \ - --header 'Authorization: Basic ${BASIC_AUTH}' \ --data grant_type=client_credentials \ - --data scope='openid profile' + --data scope='openid profile' \ + --user "$CLIENT_ID:$CLIENT_SECRET" ``` +* `CUSTOM_DOMAIN` should be set to your [custom domain](/docs/concepts/features/custom-domain) * `grant_type` should be set to `client_credentials` * `scope` should contain any [Scopes](/apis/openidoauth/scopes) you want to include, but must include `openid`. For this example, please include `profile` +* `CLIENT_ID` and `CLIENT_SECRET` should be set with the values shown in Console when generating a new secret to enable [basic authentication](/docs/apis/openidoauth/authn-methods) If you want to access ZITADEL APIs, make sure to include the required scopes `urn:zitadel:iam:org:project:id:zitadel:aud`. Read our guide [how to access ZITADEL APIs](../zitadel-apis/access-zitadel-apis) to learn more. From eca8ffda709e701c14d196d43c834d072cf4dec1 Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Wed, 29 May 2024 17:45:46 +0200 Subject: [PATCH 3/6] fix(login): correctly set preferred login name in the login ui (#8038) # Which Problems Are Solved A customer noted that after upgrade to 2.53.0, users were no longer able to reset their passwords through the login UI. This was due to a accidental change in https://github.com/zitadel/zitadel/pull/7969 # How the Problems Are Solved The `preferred_login_name` is now correctly read from the database. # Additional Changes None. # Additional Context relates to #7969 --- internal/user/repository/view/user_by_id.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/user/repository/view/user_by_id.sql b/internal/user/repository/view/user_by_id.sql index fa85fc43f4..1e21e59486 100644 --- a/internal/user/repository/view/user_by_id.sql +++ b/internal/user/repository/view/user_by_id.sql @@ -33,7 +33,7 @@ SELECT , (SELECT array_agg(ll.login_name) login_names FROM projections.login_names3 ll WHERE u.instance_id = ll.instance_id AND u.id = ll.user_id GROUP BY ll.user_id, ll.instance_id) AS login_names - , l.login_name + , l.login_name as preferred_login_name , h.first_name , h.last_name , h.nick_name From fa5e6d191483781642a59b82c17dc2fc4702c946 Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Wed, 29 May 2024 18:14:46 +0200 Subject: [PATCH 4/6] docs(advisory): add technical advisory for token type change (#8029) # Which Problems Are Solved Among others #7822 changed the event type of the `user.token.added` to `user.token.v2.added`. To make customers aware of this in case they use it for calculating DAU / MAU, resp. for an audit trail, we want to raise awareness. # How the Problems Are Solved Technical advisory to state the change. # Additional Changes None. # Additional Context Relates to #7822 Co-authored-by: Fabi --- docs/docs/support/advisory/a10009.md | 2 +- docs/docs/support/advisory/a10010.md | 30 ++++++++++++++++++++++++ docs/docs/support/technical_advisory.mdx | 14 ++++++++++- 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 docs/docs/support/advisory/a10010.md diff --git a/docs/docs/support/advisory/a10009.md b/docs/docs/support/advisory/a10009.md index 7137fdceaf..014700760a 100644 --- a/docs/docs/support/advisory/a10009.md +++ b/docs/docs/support/advisory/a10009.md @@ -6,7 +6,7 @@ title: Technical Advisory 10009 Version: 2.53.0 -Date: Calendar week 23/24 2024 +Date: 2024-05-28 ## Description diff --git a/docs/docs/support/advisory/a10010.md b/docs/docs/support/advisory/a10010.md new file mode 100644 index 0000000000..c2fd95902e --- /dev/null +++ b/docs/docs/support/advisory/a10010.md @@ -0,0 +1,30 @@ +--- +title: Technical Advisory 10010 +--- + +## Date and Version + +Version: 2.53.0 + +Date: 2024-05-28 + +## Description + +Version 2.53.0 optimizes the way tokens are created and migrates them to the v2 implementation already used by OAuth / OIDC tokens created through the session API. + +Because of this tokens events are no longer created on the user itself. To be as backwards compatible as possible a separate event is created on the user for the audit log. + +## Statement + +This change was tracked in the following PR: +[perf(oidc): optimize token creation](https://github.com/zitadel/zitadel/pull/7822), which was released in Version [2.53.0](https://github.com/zitadel/zitadel/releases/tag/v2.53.0) + +## Mitigation + +If you use the ListEvents API to check the audit trail of a user or being able to compute Daily or Monthly Active Users, be sure to also include the `user.token.v2.added` event type in your search +if you already query for the `user.token.added` event type. + +## Impact + +Once this update has been released and deployed, the `user.token.added` event will no longer be created when a user access token is created, but instead a `user.token.v2.added`. +Existing `user.token.added` events will be untouched. diff --git a/docs/docs/support/technical_advisory.mdx b/docs/docs/support/technical_advisory.mdx index 565dae3904..e205b87683 100644 --- a/docs/docs/support/technical_advisory.mdx +++ b/docs/docs/support/technical_advisory.mdx @@ -164,7 +164,19 @@ We understand that these advisories may include breaking changes, and we aim to Fixes rare cases where updating projections was blocked by a `WRITE_TOO_OLD`-error when using cockroachdb. 2.53.0 - 2024-05-27 + 2024-05-28 + + + + A-10010 + + Event type of token added event changed + Breaking Behavior Change + + Version 2.53.0 improves the token issuance. Due to this there are changes to the event types created on token creation. + + 2.53.0 + 2024-05-28 From d254828d476861e7c9f09c182c4f4159f244a11e Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Thu, 30 May 2024 09:06:32 +0200 Subject: [PATCH 5/6] fix: session idp intent check (#8040) # Which Problems Are Solved If an IdP intent succeeded with the user was not linked yet, the IdP link was then added, the following IdP check on the session API would then fail with `Intent meant for another user (COMMAND-O8xk3w)`. This issue was introduced with when allowing IdP intents from other organizations (https://github.com/zitadel/zitadel/pull/7871) # How the Problems Are Solved The IdP link is now correctly checked in the session API (using the user's organization instead of the one from the intent). # Additional Changes - Improved the corresponding integration test to cover the exact bahvior. - Tests, which had to be updated with newer cases where additionally changed to use expectEventstore instead of deprecated eventstoreExpect and the two eventstore mocks of the session_tests.go where combined. # Additional Context - Relates to #7871 - This issue was reported by a customer. - will be back ported to 2.52.x --- .../session/v2/session_integration_test.go | 32 +-- internal/command/session.go | 2 +- internal/command/session_test.go | 237 +++++++++++------- 3 files changed, 163 insertions(+), 108 deletions(-) diff --git a/internal/api/grpc/session/v2/session_integration_test.go b/internal/api/grpc/session/v2/session_integration_test.go index 9423eea9ae..689893319b 100644 --- a/internal/api/grpc/session/v2/session_integration_test.go +++ b/internal/api/grpc/session/v2/session_integration_test.go @@ -429,6 +429,14 @@ func TestServer_CreateSession_successfulIntent_instant(t *testing.T) { func TestServer_CreateSession_successfulIntentUnknownUserID(t *testing.T) { idpID := Tester.AddGenericOAuthProvider(t, CTX) + // successful intent without known / linked user + idpUserID := "id" + intentID, token, _, _ := Tester.CreateSuccessfulOAuthIntent(t, CTX, idpID, "", idpUserID) + + // link the user (with info from intent) + Tester.CreateUserIDPlink(CTX, User.GetUserId(), idpUserID, idpID, User.GetUserId()) + + // session with intent check must now succeed createResp, err := Client.CreateSession(CTX, &session.CreateSessionRequest{ Checks: &session.Checks{ User: &session.CheckUser{ @@ -436,28 +444,6 @@ func TestServer_CreateSession_successfulIntentUnknownUserID(t *testing.T) { UserId: User.GetUserId(), }, }, - }, - }) - require.NoError(t, err) - verifyCurrentSession(t, createResp.GetSessionId(), createResp.GetSessionToken(), createResp.GetDetails().GetSequence(), time.Minute, nil, nil, 0, User.GetUserId()) - - idpUserID := "id" - intentID, token, _, _ := Tester.CreateSuccessfulOAuthIntent(t, CTX, idpID, "", idpUserID) - updateResp, err := Client.SetSession(CTX, &session.SetSessionRequest{ - SessionId: createResp.GetSessionId(), - Checks: &session.Checks{ - IdpIntent: &session.CheckIDPIntent{ - IdpIntentId: intentID, - IdpIntentToken: token, - }, - }, - }) - require.Error(t, err) - Tester.CreateUserIDPlink(CTX, User.GetUserId(), idpUserID, idpID, User.GetUserId()) - intentID, token, _, _ = Tester.CreateSuccessfulOAuthIntent(t, CTX, idpID, User.GetUserId(), idpUserID) - updateResp, err = Client.SetSession(CTX, &session.SetSessionRequest{ - SessionId: createResp.GetSessionId(), - Checks: &session.Checks{ IdpIntent: &session.CheckIDPIntent{ IdpIntentId: intentID, IdpIntentToken: token, @@ -465,7 +451,7 @@ func TestServer_CreateSession_successfulIntentUnknownUserID(t *testing.T) { }, }) require.NoError(t, err) - verifyCurrentSession(t, createResp.GetSessionId(), updateResp.GetSessionToken(), updateResp.GetDetails().GetSequence(), time.Minute, nil, nil, 0, User.GetUserId(), wantUserFactor, wantIntentFactor) + verifyCurrentSession(t, createResp.GetSessionId(), createResp.GetSessionToken(), createResp.GetDetails().GetSequence(), time.Minute, nil, nil, 0, User.GetUserId(), wantUserFactor, wantIntentFactor) } func TestServer_CreateSession_startedIntentFalseToken(t *testing.T) { diff --git a/internal/command/session.go b/internal/command/session.go index c9fd29ac46..d6fcb072ad 100644 --- a/internal/command/session.go +++ b/internal/command/session.go @@ -123,7 +123,7 @@ func CheckIntent(intentID, token string) SessionCommand { return zerrors.ThrowPreconditionFailed(nil, "COMMAND-O8xk3w", "Errors.Intent.OtherUser") } } else { - linkWriteModel := NewUserIDPLinkWriteModel(cmd.sessionWriteModel.UserID, cmd.intentWriteModel.IDPID, cmd.intentWriteModel.IDPUserID, cmd.intentWriteModel.ResourceOwner) + linkWriteModel := NewUserIDPLinkWriteModel(cmd.sessionWriteModel.UserID, cmd.intentWriteModel.IDPID, cmd.intentWriteModel.IDPUserID, cmd.sessionWriteModel.UserResourceOwner) err := cmd.eventstore.FilterToQueryReducer(ctx, linkWriteModel) if err != nil { return err diff --git a/internal/command/session_test.go b/internal/command/session_test.go index b41a886aa2..feaf395c22 100644 --- a/internal/command/session_test.go +++ b/internal/command/session_test.go @@ -31,7 +31,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { userAggr := &user.NewAggregate("user1", "org1").Aggregate type fields struct { - eventstore *eventstore.Eventstore + eventstore func(*testing.T) *eventstore.Eventstore sessionWriteModel *SessionWriteModel } type res struct { @@ -46,7 +46,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { { name: "missing UID", fields: fields{ - eventstore: &eventstore.Eventstore{}, + eventstore: expectEventstore(), sessionWriteModel: &SessionWriteModel{}, }, res: res{ @@ -57,7 +57,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { { name: "filter error", fields: fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectFilterError(io.ErrClosedPipe), ), sessionWriteModel: &SessionWriteModel{ @@ -72,7 +72,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { { name: "removed user", fields: fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectFilter( eventFromEventPusher( user.NewHumanAddedEvent(context.Background(), @@ -101,7 +101,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { { name: "ok", fields: fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectFilter( eventFromEventPusher( user.NewHumanAddedEvent(context.Background(), @@ -133,7 +133,7 @@ func TestSessionCommands_getHumanWriteModel(t *testing.T) { } for _, tt := range tests { s := &SessionCommands{ - eventstore: tt.fields.eventstore, + eventstore: tt.fields.eventstore(t), sessionWriteModel: tt.fields.sessionWriteModel, } got, err := s.gethumanWriteModel(context.Background()) @@ -271,7 +271,7 @@ func TestCommands_CreateSession(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Commands{ - eventstore: eventstoreExpect(t, tt.expect...), + eventstore: expectEventstore(tt.expect...)(t), idGenerator: tt.fields.idGenerator, sessionTokenCreator: tt.fields.tokenCreator, } @@ -284,7 +284,7 @@ func TestCommands_CreateSession(t *testing.T) { func TestCommands_UpdateSession(t *testing.T) { type fields struct { - eventstore *eventstore.Eventstore + eventstore func(*testing.T) *eventstore.Eventstore tokenVerifier func(ctx context.Context, sessionToken, sessionID, tokenID string) (err error) } type args struct { @@ -307,7 +307,7 @@ func TestCommands_UpdateSession(t *testing.T) { { "eventstore failed", fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectFilterError(zerrors.ThrowInternal(nil, "id", "filter failed")), ), }, @@ -321,7 +321,7 @@ func TestCommands_UpdateSession(t *testing.T) { { "no change", fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectFilter( eventFromEventPusher( session.NewAddedEvent(context.Background(), @@ -361,7 +361,7 @@ func TestCommands_UpdateSession(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Commands{ - eventstore: tt.fields.eventstore, + eventstore: tt.fields.eventstore(t), sessionTokenVerifier: tt.fields.tokenVerifier, } got, err := c.UpdateSession(tt.args.ctx, tt.args.sessionID, tt.args.checks, tt.args.metadata, tt.args.lifetime) @@ -387,7 +387,7 @@ func TestCommands_updateSession(t *testing.T) { testNow := time.Now() type fields struct { - eventstore *eventstore.Eventstore + eventstore func(*testing.T) *eventstore.Eventstore } type args struct { ctx context.Context @@ -408,7 +408,7 @@ func TestCommands_updateSession(t *testing.T) { { "terminated", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore(), }, args{ ctx: context.Background(), @@ -423,7 +423,7 @@ func TestCommands_updateSession(t *testing.T) { { "check failed", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore(), }, args{ ctx: context.Background(), @@ -443,7 +443,7 @@ func TestCommands_updateSession(t *testing.T) { { "no change", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore(), }, args{ ctx: authz.NewMockContext("instance1", "", ""), @@ -463,14 +463,13 @@ func TestCommands_updateSession(t *testing.T) { { "negative lifetime", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore(), }, args{ ctx: authz.NewMockContext("instance1", "", ""), checks: &SessionCommands{ sessionWriteModel: NewSessionWriteModel("sessionID", "instance1"), sessionCommands: []SessionCommand{}, - eventstore: eventstoreExpect(t), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -489,7 +488,7 @@ func TestCommands_updateSession(t *testing.T) { { "lifetime set", fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( expectPush( session.NewLifetimeSetEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, 10*time.Minute, @@ -505,7 +504,6 @@ func TestCommands_updateSession(t *testing.T) { checks: &SessionCommands{ sessionWriteModel: NewSessionWriteModel("sessionID", "instance1"), sessionCommands: []SessionCommand{}, - eventstore: eventstoreExpect(t), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -530,7 +528,17 @@ func TestCommands_updateSession(t *testing.T) { { "set user, password, metadata and token", fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher( + user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), + ), + eventFromEventPusher( + user.NewHumanPasswordChangedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "$plain$x$password", false, ""), + ), + ), expectPush( session.NewUserCheckedEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, "userID", "org1", testNow, &language.Afrikaans, @@ -555,18 +563,6 @@ func TestCommands_updateSession(t *testing.T) { CheckUser("userID", "org1", &language.Afrikaans), CheckPassword("password"), }, - eventstore: eventstoreExpect(t, - expectFilter( - eventFromEventPusher( - user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, - "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), - ), - eventFromEventPusher( - user.NewHumanPasswordChangedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, - "$plain$x$password", false, ""), - ), - ), - ), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -594,7 +590,14 @@ func TestCommands_updateSession(t *testing.T) { { "set user, intent not successful", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher( + user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), + ), + ), + ), }, args{ ctx: authz.NewMockContext("instance1", "", ""), @@ -604,14 +607,6 @@ func TestCommands_updateSession(t *testing.T) { CheckUser("userID", "org1", &language.Afrikaans), CheckIntent("intent", "aW50ZW50"), }, - eventstore: eventstoreExpect(t, - expectFilter( - eventFromEventPusher( - user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, - "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), - ), - ), - ), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -633,7 +628,25 @@ func TestCommands_updateSession(t *testing.T) { { "set user, intent not for user", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher( + user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), + ), + eventFromEventPusher( + idpintent.NewSucceededEvent(context.Background(), + &idpintent.NewAggregate("id", "instance1").Aggregate, + nil, + "idpUserID", + "idpUserName", + "userID2", + nil, + "", + ), + ), + ), + ), }, args{ ctx: authz.NewMockContext("instance1", "", ""), @@ -643,25 +656,6 @@ func TestCommands_updateSession(t *testing.T) { CheckUser("userID", "org1", &language.Afrikaans), CheckIntent("intent", "aW50ZW50"), }, - eventstore: eventstoreExpect(t, - expectFilter( - eventFromEventPusher( - user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, - "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), - ), - eventFromEventPusher( - idpintent.NewSucceededEvent(context.Background(), - &idpintent.NewAggregate("id", "instance1").Aggregate, - nil, - "idpUserID", - "idpUserName", - "userID2", - nil, - "", - ), - ), - ), - ), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -683,7 +677,7 @@ func TestCommands_updateSession(t *testing.T) { { "set user, intent incorrect token", fields{ - eventstore: eventstoreExpect(t), + eventstore: expectEventstore(), }, args{ ctx: authz.NewMockContext("instance1", "", ""), @@ -693,7 +687,6 @@ func TestCommands_updateSession(t *testing.T) { CheckUser("userID", "org1", &language.Afrikaans), CheckIntent("intent2", "aW50ZW50"), }, - eventstore: eventstoreExpect(t), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -715,7 +708,24 @@ func TestCommands_updateSession(t *testing.T) { { "set user, intent, metadata and token", fields{ - eventstore: eventstoreExpect(t, + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher( + user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), + ), + eventFromEventPusher( + idpintent.NewSucceededEvent(context.Background(), + &idpintent.NewAggregate("id", "instance1").Aggregate, + nil, + "idpUserID", + "idpUsername", + "userID", + nil, + "", + ), + ), + ), expectPush( session.NewUserCheckedEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, "userID", "org1", testNow, &language.Afrikaans), @@ -736,25 +746,6 @@ func TestCommands_updateSession(t *testing.T) { CheckUser("userID", "org1", &language.Afrikaans), CheckIntent("intent", "aW50ZW50"), }, - eventstore: eventstoreExpect(t, - expectFilter( - eventFromEventPusher( - user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, - "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), - ), - eventFromEventPusher( - idpintent.NewSucceededEvent(context.Background(), - &idpintent.NewAggregate("id", "instance1").Aggregate, - nil, - "idpUserID", - "idpUsername", - "userID", - nil, - "", - ), - ), - ), - ), createToken: func(sessionID string) (string, string, error) { return "tokenID", "token", @@ -779,12 +770,90 @@ func TestCommands_updateSession(t *testing.T) { }, }, }, + { + "set user, intent (user not linked yet)", + fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher( + user.NewHumanAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "username", "", "", "", "", language.English, domain.GenderUnspecified, "", false), + ), + eventFromEventPusher( + idpintent.NewStartedEvent(context.Background(), + &idpintent.NewAggregate("id", "instance1").Aggregate, + nil, + nil, + "idpID", + ), + ), + eventFromEventPusher( + idpintent.NewSucceededEvent(context.Background(), + &idpintent.NewAggregate("id", "instance1").Aggregate, + nil, + "idpUserID", + "idpUsername", + "", + nil, + "", + ), + ), + ), + expectFilter( + eventFromEventPusher( + user.NewUserIDPLinkAddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, + "idpID", + "idpUsername", + "idpUserID", + ), + ), + ), + expectPush( + session.NewUserCheckedEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, + "userID", "org1", testNow, &language.Afrikaans), + session.NewIntentCheckedEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, + testNow), + session.NewTokenSetEvent(context.Background(), &session.NewAggregate("sessionID", "instance1").Aggregate, + "tokenID"), + ), + ), + }, + args{ + ctx: authz.NewMockContext("instance1", "", ""), + checks: &SessionCommands{ + sessionWriteModel: NewSessionWriteModel("sessionID", "instance1"), + sessionCommands: []SessionCommand{ + CheckUser("userID", "org1", &language.Afrikaans), + CheckIntent("intent", "aW50ZW50"), + }, + createToken: func(sessionID string) (string, string, error) { + return "tokenID", + "token", + nil + }, + intentAlg: decryption(nil), + now: func() time.Time { + return testNow + }, + }, + }, + res{ + want: &SessionChanged{ + ObjectDetails: &domain.ObjectDetails{ + ResourceOwner: "instance1", + }, + ID: "sessionID", + NewToken: "token", + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Commands{ - eventstore: tt.fields.eventstore, + eventstore: tt.fields.eventstore(t), } + tt.args.checks.eventstore = c.eventstore got, err := c.updateSession(tt.args.ctx, tt.args.checks, tt.args.metadata, tt.args.lifetime) require.ErrorIs(t, err, tt.res.err) assert.Equal(t, tt.res.want, got) From 2243306ef6f01a1f4eefc54f2a803869c3e30ce7 Mon Sep 17 00:00:00 2001 From: Silvan Date: Thu, 30 May 2024 11:35:30 +0200 Subject: [PATCH 6/6] feat(cmd): mirror (#7004) # Which Problems Are Solved Adds the possibility to mirror an existing database to a new one. For that a new command was added `zitadel mirror`. Including it's subcommands for a more fine grained mirror of the data. Sub commands: * `zitadel mirror eventstore`: copies only events and their unique constraints * `zitadel mirror system`: mirrors the data of the `system`-schema * `zitadel mirror projections`: runs all projections * `zitadel mirror auth`: copies auth requests * `zitadel mirror verify`: counts the amount of rows in the source and destination database and prints the diff. The command requires one of the following flags: * `--system`: copies all instances of the system * `--instance `, `--instance `: copies only the defined instances The command is save to execute multiple times by adding the `--replace`-flag. This replaces currently existing data except of the `events`-table # Additional Changes A `--for-mirror`-flag was added to `zitadel setup` to prepare the new database. The flag skips the creation of the first instances and initial run of projections. It is now possible to skip the creation of the first instance during setup by setting `FirstInstance.Skip` to true in the steps configuration. # Additional info It is currently not possible to merge multiple databases. See https://github.com/zitadel/zitadel/issues/7964 for more details. It is currently not possible to use files. See https://github.com/zitadel/zitadel/issues/7966 for more information. closes https://github.com/zitadel/zitadel/issues/7586 closes https://github.com/zitadel/zitadel/issues/7486 ### Definition of Ready - [x] I am happy with the code - [x] Short description of the feature/issue is added in the pr description - [x] PR is linked to the corresponding user story - [x] Acceptance criteria are met - [x] All open todos and follow ups are defined in a new ticket and justified - [x] Deviations from the acceptance criteria and design are agreed with the PO and documented. - [x] No debug or dead code - [x] My code has no repetitions - [x] Critical parts are tested automatically - [ ] Where possible E2E tests are implemented - [x] Documentation/examples are up-to-date - [x] All non-functional requirements are met - [x] Functionality of the acceptance criteria is checked manually on the dev system. --------- Co-authored-by: Livio Spring --- cmd/initialise/init.go | 2 +- cmd/mirror/auth.go | 91 +++++ cmd/mirror/config.go | 80 +++++ cmd/mirror/defaults.yaml | 114 +++++++ cmd/mirror/event.go | 96 ++++++ cmd/mirror/event_store.go | 250 ++++++++++++++ cmd/mirror/mirror.go | 93 ++++++ cmd/mirror/projections.go | 316 ++++++++++++++++++ cmd/mirror/system.go | 139 ++++++++ cmd/mirror/verify.go | 111 ++++++ cmd/setup/03.go | 5 + cmd/setup/config.go | 1 + cmd/setup/setup.go | 24 +- cmd/setup/steps.yaml | 2 + cmd/start/start.go | 6 + cmd/start/start_from_init.go | 2 +- cmd/start/start_from_setup.go | 2 +- cmd/zitadel.go | 2 + docs/docs/apis/introduction.mdx | 2 +- docs/docs/examples/login/symfony.md | 2 +- .../docs/examples/secure-api/nodejs-nestjs.md | 6 +- .../examples/secure-api/python-django.mdx | 2 +- .../docs/examples/secure-api/python-flask.mdx | 18 +- .../identity-providers/introduction.md | 10 +- .../guides/integrate/login-ui/_logout.mdx | 2 +- .../integrate/login-ui/_select-account.mdx | 2 +- .../guides/integrate/onboarding/end-users.mdx | 2 +- .../token-introspection/basic-auth.mdx | 2 +- .../integrate/zitadel-apis/event-api.md | 2 +- docs/docs/guides/manage/cloud/settings.md | 2 +- docs/docs/guides/migrate/sources/keycloak.md | 10 +- docs/docs/guides/migrate/users.md | 10 +- docs/docs/guides/start/quickstart.mdx | 4 +- .../legal/policies/brand-trademark-policy.md | 2 +- .../vulnerability-disclosure-policy.mdx | 2 +- .../troubleshooting/_instance_not_found.mdx | 4 +- docs/docs/self-hosting/manage/cli/mirror.mdx | 232 +++++++++++++ .../docs/self-hosting/manage/cli/overview.mdx | 34 ++ .../manage/productionchecklist.md | 4 +- docs/docs/support/advisory/a10003.md | 4 +- .../software-release-cycles-support.md | 2 +- docs/sidebars.js | 12 + .../eventsourcing/handler/handler.go | 14 +- .../eventsourcing/handler/handler.go | 10 + internal/crypto/rsa.go | 2 +- internal/database/cockroach/crdb.go | 9 +- internal/database/database.go | 2 +- internal/database/postgres/pg.go | 9 +- internal/eventstore/handler/v2/handler.go | 3 - internal/notification/projections.go | 10 + internal/query/projection/projection.go | 10 + internal/query/query.go | 4 +- internal/v2/eventstore/postgres/event.go | 4 +- internal/v2/eventstore/postgres/push.go | 67 ++-- internal/v2/eventstore/postgres/push_test.go | 57 ++-- internal/v2/eventstore/postgres/query.go | 1 + internal/v2/eventstore/postgres/storage.go | 24 +- internal/v2/eventstore/push.go | 6 +- internal/v2/eventstore/query.go | 63 ++++ internal/v2/projection/highest_position.go | 15 + .../v2/readmodel/last_successful_mirror.go | 72 ++++ internal/v2/system/aggregate.go | 8 + internal/v2/system/mirror/aggregate.go | 8 + internal/v2/system/mirror/failed.go | 52 +++ internal/v2/system/mirror/started.go | 68 ++++ internal/v2/system/mirror/succeeded.go | 53 +++ 66 files changed, 2150 insertions(+), 129 deletions(-) create mode 100644 cmd/mirror/auth.go create mode 100644 cmd/mirror/config.go create mode 100644 cmd/mirror/defaults.yaml create mode 100644 cmd/mirror/event.go create mode 100644 cmd/mirror/event_store.go create mode 100644 cmd/mirror/mirror.go create mode 100644 cmd/mirror/projections.go create mode 100644 cmd/mirror/system.go create mode 100644 cmd/mirror/verify.go create mode 100644 docs/docs/self-hosting/manage/cli/mirror.mdx create mode 100644 docs/docs/self-hosting/manage/cli/overview.mdx create mode 100644 internal/v2/projection/highest_position.go create mode 100644 internal/v2/readmodel/last_successful_mirror.go create mode 100644 internal/v2/system/aggregate.go create mode 100644 internal/v2/system/mirror/aggregate.go create mode 100644 internal/v2/system/mirror/failed.go create mode 100644 internal/v2/system/mirror/started.go create mode 100644 internal/v2/system/mirror/succeeded.go diff --git a/cmd/initialise/init.go b/cmd/initialise/init.go index d45bacfc17..917e6a2d93 100644 --- a/cmd/initialise/init.go +++ b/cmd/initialise/init.go @@ -40,7 +40,7 @@ func New() *cobra.Command { Long: `Sets up the minimum requirements to start ZITADEL. Prerequisites: -- cockroachDB +- database (PostgreSql or cockroachdb) The user provided by flags needs privileges to - create the database if it does not exist diff --git a/cmd/mirror/auth.go b/cmd/mirror/auth.go new file mode 100644 index 0000000000..df94708e71 --- /dev/null +++ b/cmd/mirror/auth.go @@ -0,0 +1,91 @@ +package mirror + +import ( + "context" + _ "embed" + "io" + "time" + + "github.com/jackc/pgx/v5/stdlib" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/database/dialect" +) + +func authCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "auth", + Short: "mirrors the auth requests table from one database to another", + Long: `mirrors the auth requests table from one database to another +ZITADEL needs to be initialized and set up with the --for-mirror flag +Only auth requests are mirrored`, + Run: func(cmd *cobra.Command, args []string) { + config := mustNewMigrationConfig(viper.GetViper()) + copyAuth(cmd.Context(), config) + }, + } + + cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete auth requests of defined instances before copy") + + return cmd +} + +func copyAuth(ctx context.Context, config *Migration) { + sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery) + logging.OnError(err).Fatal("unable to connect to source database") + defer sourceClient.Close() + + destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher) + logging.OnError(err).Fatal("unable to connect to destination database") + defer destClient.Close() + + copyAuthRequests(ctx, sourceClient, destClient) +} + +func copyAuthRequests(ctx context.Context, source, dest *database.DB) { + start := time.Now() + + sourceConn, err := source.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire connection") + defer sourceConn.Close() + + r, w := io.Pipe() + errs := make(chan error, 1) + + go func() { + err = sourceConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + _, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT id, regexp_replace(request::TEXT, '\\\\u0000', '', 'g')::JSON request, code, request_type, creation_date, change_date, instance_id FROM auth.auth_requests "+instanceClause()+") TO STDOUT") + w.Close() + return err + }) + errs <- err + }() + + destConn, err := dest.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire connection") + defer destConn.Close() + + var affected int64 + err = destConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + + if shouldReplace { + _, err := conn.Exec(ctx, "DELETE FROM auth.auth_requests "+instanceClause()) + if err != nil { + return err + } + } + + tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY auth.auth_requests FROM STDIN") + affected = tag.RowsAffected() + + return err + }) + logging.OnError(err).Fatal("unable to copy auth requests to destination") + logging.OnError(<-errs).Fatal("unable to copy auth requests from source") + logging.WithFields("took", time.Since(start), "count", affected).Info("auth requests migrated") +} diff --git a/cmd/mirror/config.go b/cmd/mirror/config.go new file mode 100644 index 0000000000..5d2ec8fac7 --- /dev/null +++ b/cmd/mirror/config.go @@ -0,0 +1,80 @@ +package mirror + +import ( + _ "embed" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/cmd/hooks" + "github.com/zitadel/zitadel/internal/actions" + internal_authz "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/command" + "github.com/zitadel/zitadel/internal/config/hook" + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/domain" + "github.com/zitadel/zitadel/internal/id" +) + +type Migration struct { + Source database.Config + Destination database.Config + + EventBulkSize uint32 + + Log *logging.Config + Machine *id.Config +} + +var ( + //go:embed defaults.yaml + defaultConfig []byte +) + +func mustNewMigrationConfig(v *viper.Viper) *Migration { + config := new(Migration) + mustNewConfig(v, config) + + err := config.Log.SetLogger() + logging.OnError(err).Fatal("unable to set logger") + + id.Configure(config.Machine) + + return config +} + +func mustNewProjectionsConfig(v *viper.Viper) *ProjectionsConfig { + config := new(ProjectionsConfig) + mustNewConfig(v, config) + + err := config.Log.SetLogger() + logging.OnError(err).Fatal("unable to set logger") + + id.Configure(config.Machine) + + return config +} + +func mustNewConfig(v *viper.Viper, config any) { + err := v.Unmarshal(config, + viper.DecodeHook(mapstructure.ComposeDecodeHookFunc( + hooks.SliceTypeStringDecode[*domain.CustomMessageText], + hooks.SliceTypeStringDecode[*command.SetQuota], + hooks.SliceTypeStringDecode[internal_authz.RoleMapping], + hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser], + hooks.MapTypeStringDecode[domain.Feature, any], + hooks.MapHTTPHeaderStringDecode, + hook.Base64ToBytesHookFunc(), + hook.TagToLanguageHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToTimeHookFunc(time.RFC3339), + mapstructure.StringToSliceHookFunc(","), + database.DecodeHook, + actions.HTTPConfigDecodeHook, + hook.EnumHookFunc(internal_authz.MemberTypeString), + )), + ) + logging.OnError(err).Fatal("unable to read default config") +} diff --git a/cmd/mirror/defaults.yaml b/cmd/mirror/defaults.yaml new file mode 100644 index 0000000000..7db91ecc0b --- /dev/null +++ b/cmd/mirror/defaults.yaml @@ -0,0 +1,114 @@ +Source: + cockroach: + Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST + Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT + Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE + MaxOpenConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS + MaxIdleConns: 6 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS + EventPushConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO + ProjectionSpoolerConnRatio: 0.33 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO + MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME + MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME + Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS + User: + Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME + Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD + SSL: + Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE + RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT + Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT + Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY + # Postgres is used as soon as a value is set + # The values describe the possible fields to set values + postgres: + Host: # ZITADEL_DATABASE_POSTGRES_HOST + Port: # ZITADEL_DATABASE_POSTGRES_PORT + Database: # ZITADEL_DATABASE_POSTGRES_DATABASE + MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS + MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS + MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME + MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME + Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS + User: + Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME + Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD + SSL: + Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE + RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT + Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT + Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY + +Destination: + cockroach: + Host: localhost # ZITADEL_DATABASE_COCKROACH_HOST + Port: 26257 # ZITADEL_DATABASE_COCKROACH_PORT + Database: zitadel # ZITADEL_DATABASE_COCKROACH_DATABASE + MaxOpenConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXOPENCONNS + MaxIdleConns: 0 # ZITADEL_DATABASE_COCKROACH_MAXIDLECONNS + MaxConnLifetime: 30m # ZITADEL_DATABASE_COCKROACH_MAXCONNLIFETIME + MaxConnIdleTime: 5m # ZITADEL_DATABASE_COCKROACH_MAXCONNIDLETIME + EventPushConnRatio: 0.01 # ZITADEL_DATABASE_COCKROACH_EVENTPUSHCONNRATIO + ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DATABASE_COCKROACH_PROJECTIONSPOOLERCONNRATIO + Options: "" # ZITADEL_DATABASE_COCKROACH_OPTIONS + User: + Username: zitadel # ZITADEL_DATABASE_COCKROACH_USER_USERNAME + Password: "" # ZITADEL_DATABASE_COCKROACH_USER_PASSWORD + SSL: + Mode: disable # ZITADEL_DATABASE_COCKROACH_USER_SSL_MODE + RootCert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT + Cert: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT + Key: "" # ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY + # Postgres is used as soon as a value is set + # The values describe the possible fields to set values + postgres: + Host: # ZITADEL_DATABASE_POSTGRES_HOST + Port: # ZITADEL_DATABASE_POSTGRES_PORT + Database: # ZITADEL_DATABASE_POSTGRES_DATABASE + MaxOpenConns: # ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS + MaxIdleConns: # ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS + MaxConnLifetime: # ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME + MaxConnIdleTime: # ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME + Options: # ZITADEL_DATABASE_POSTGRES_OPTIONS + User: + Username: # ZITADEL_DATABASE_POSTGRES_USER_USERNAME + Password: # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD + SSL: + Mode: # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE + RootCert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_ROOTCERT + Cert: # ZITADEL_DATABASE_POSTGRES_USER_SSL_CERT + Key: # ZITADEL_DATABASE_POSTGRES_USER_SSL_KEY + +EventBulkSize: 10000 + +Projections: + # The maximum duration a transaction remains open + # before it spots left folding additional events + # and updates the table. + TransactionDuration: 0s # ZITADEL_PROJECTIONS_TRANSACTIONDURATION + # turn off scheduler during operation + RequeueEvery: 0s + ConcurrentInstances: 7 + EventBulkLimit: 1000 + Customizations: + notifications: + MaxFailureCount: 1 + +Eventstore: + MaxRetries: 3 + +Auth: + Spooler: + TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION + BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT + +Admin: + Spooler: + TransactionDuration: 0s #ZITADEL_AUTH_SPOOLER_TRANSACTIONDURATION + BulkLimit: 10 #ZITADEL_AUTH_SPOOLER_BULKLIMIT + +FirstInstance: + # We only need to create an empty zitadel database so this step must be skipped + Skip: true + +Log: + Level: info diff --git a/cmd/mirror/event.go b/cmd/mirror/event.go new file mode 100644 index 0000000000..2bb0d52f45 --- /dev/null +++ b/cmd/mirror/event.go @@ -0,0 +1,96 @@ +package mirror + +import ( + "context" + + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/v2/projection" + "github.com/zitadel/zitadel/internal/v2/readmodel" + "github.com/zitadel/zitadel/internal/v2/system" + mirror_event "github.com/zitadel/zitadel/internal/v2/system/mirror" +) + +func queryLastSuccessfulMigration(ctx context.Context, destinationES *eventstore.EventStore, source string) (*readmodel.LastSuccessfulMirror, error) { + lastSuccess := readmodel.NewLastSuccessfulMirror(source) + if shouldIgnorePrevious { + return lastSuccess, nil + } + _, err := destinationES.Query( + ctx, + eventstore.NewQuery( + system.AggregateInstance, + lastSuccess, + eventstore.SetFilters(lastSuccess.Filter()), + ), + ) + if err != nil { + return nil, err + } + + return lastSuccess, nil +} + +func writeMigrationStart(ctx context.Context, sourceES *eventstore.EventStore, id string, destination string) (_ float64, err error) { + var cmd *eventstore.Command + if len(instanceIDs) > 0 { + cmd, err = mirror_event.NewStartedInstancesCommand(destination, instanceIDs) + if err != nil { + return 0, err + } + } else { + cmd = mirror_event.NewStartedSystemCommand(destination) + } + + var position projection.HighestPosition + + err = sourceES.Push( + ctx, + eventstore.NewPushIntent( + system.AggregateInstance, + eventstore.AppendAggregate( + system.AggregateOwner, + system.AggregateType, + id, + eventstore.CurrentSequenceMatches(0), + eventstore.AppendCommands(cmd), + ), + eventstore.PushReducer(&position), + ), + ) + if err != nil { + return 0, err + } + return position.Position, nil +} + +func writeMigrationSucceeded(ctx context.Context, destinationES *eventstore.EventStore, id, source string, position float64) error { + return destinationES.Push( + ctx, + eventstore.NewPushIntent( + system.AggregateInstance, + eventstore.AppendAggregate( + system.AggregateOwner, + system.AggregateType, + id, + eventstore.CurrentSequenceMatches(0), + eventstore.AppendCommands(mirror_event.NewSucceededCommand(source, position)), + ), + ), + ) +} + +func writeMigrationFailed(ctx context.Context, destinationES *eventstore.EventStore, id, source string, err error) error { + return destinationES.Push( + ctx, + eventstore.NewPushIntent( + system.AggregateInstance, + eventstore.AppendAggregate( + system.AggregateOwner, + system.AggregateType, + id, + eventstore.CurrentSequenceMatches(0), + eventstore.AppendCommands(mirror_event.NewFailedCommand(source, err)), + ), + ), + ) +} diff --git a/cmd/mirror/event_store.go b/cmd/mirror/event_store.go new file mode 100644 index 0000000000..358f878d77 --- /dev/null +++ b/cmd/mirror/event_store.go @@ -0,0 +1,250 @@ +package mirror + +import ( + "context" + "database/sql" + _ "embed" + "errors" + "io" + "time" + + "github.com/jackc/pgx/v5/stdlib" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + db "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/database/dialect" + "github.com/zitadel/zitadel/internal/id" + "github.com/zitadel/zitadel/internal/v2/database" + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/v2/eventstore/postgres" + "github.com/zitadel/zitadel/internal/zerrors" +) + +var shouldIgnorePrevious bool + +func eventstoreCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "eventstore", + Short: "mirrors the eventstore of an instance from one database to another", + Long: `mirrors the eventstore of an instance from one database to another +ZITADEL needs to be initialized and set up with the --for-mirror flag +Migrate only copies events2 and unique constraints`, + Run: func(cmd *cobra.Command, args []string) { + config := mustNewMigrationConfig(viper.GetViper()) + copyEventstore(cmd.Context(), config) + }, + } + + cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete unique constraints of defined instances before copy") + cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table") + + return cmd +} + +func copyEventstore(ctx context.Context, config *Migration) { + sourceClient, err := db.Connect(config.Source, false, dialect.DBPurposeQuery) + logging.OnError(err).Fatal("unable to connect to source database") + defer sourceClient.Close() + + destClient, err := db.Connect(config.Destination, false, dialect.DBPurposeEventPusher) + logging.OnError(err).Fatal("unable to connect to destination database") + defer destClient.Close() + + copyEvents(ctx, sourceClient, destClient, config.EventBulkSize) + copyUniqueConstraints(ctx, sourceClient, destClient) +} + +func positionQuery(db *db.DB) string { + switch db.Type() { + case "postgres": + return "SELECT EXTRACT(EPOCH FROM clock_timestamp())" + case "cockroach": + return "SELECT cluster_logical_timestamp()" + default: + logging.WithFields("db_type", db.Type()).Fatal("database type not recognized") + return "" + } +} + +func copyEvents(ctx context.Context, source, dest *db.DB, bulkSize uint32) { + start := time.Now() + reader, writer := io.Pipe() + + migrationID, err := id.SonyFlakeGenerator().Next() + logging.OnError(err).Fatal("unable to generate migration id") + + sourceConn, err := source.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire source connection") + + destConn, err := dest.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire dest connection") + + sourceES := eventstore.NewEventstoreFromOne(postgres.New(source, &postgres.Config{ + MaxRetries: 3, + })) + destinationES := eventstore.NewEventstoreFromOne(postgres.New(dest, &postgres.Config{ + MaxRetries: 3, + })) + + previousMigration, err := queryLastSuccessfulMigration(ctx, destinationES, source.DatabaseName()) + logging.OnError(err).Fatal("unable to query latest successful migration") + + maxPosition, err := writeMigrationStart(ctx, sourceES, migrationID, dest.DatabaseName()) + logging.OnError(err).Fatal("unable to write migration started event") + + logging.WithFields("from", previousMigration.Position, "to", maxPosition).Info("start event migration") + + nextPos := make(chan bool, 1) + pos := make(chan float64, 1) + errs := make(chan error, 3) + + go func() { + err := sourceConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + nextPos <- true + var i uint32 + for position := range pos { + var stmt database.Statement + stmt.WriteString("COPY (SELECT instance_id, aggregate_type, aggregate_id, event_type, sequence, revision, created_at, regexp_replace(payload::TEXT, '\\\\u0000', '', 'g')::JSON payload, creator, owner, ") + stmt.WriteArg(position) + stmt.WriteString(" position, row_number() OVER (PARTITION BY instance_id ORDER BY position, in_tx_order) AS in_tx_order FROM eventstore.events2 ") + stmt.WriteString(instanceClause()) + stmt.WriteString(" AND ") + database.NewNumberAtMost(maxPosition).Write(&stmt, "position") + stmt.WriteString(" AND ") + database.NewNumberGreater(previousMigration.Position).Write(&stmt, "position") + stmt.WriteString(" ORDER BY instance_id, position, in_tx_order") + stmt.WriteString(" LIMIT ") + stmt.WriteArg(bulkSize) + stmt.WriteString(" OFFSET ") + stmt.WriteArg(bulkSize * i) + stmt.WriteString(") TO STDOUT") + + // Copy does not allow args so we use we replace the args in the statement + tag, err := conn.PgConn().CopyTo(ctx, writer, stmt.Debug()) + if err != nil { + return zerrors.ThrowUnknownf(err, "MIGRA-KTuSq", "unable to copy events from source during iteration %d", i) + } + if tag.RowsAffected() < int64(bulkSize) { + return nil + } + + nextPos <- true + i++ + } + return nil + }) + writer.Close() + close(nextPos) + errs <- err + }() + + // generate next position for + go func() { + defer close(pos) + for range nextPos { + var position float64 + err := dest.QueryRowContext( + ctx, + func(row *sql.Row) error { + return row.Scan(&position) + }, + positionQuery(dest), + ) + if err != nil { + errs <- zerrors.ThrowUnknown(err, "MIGRA-kMyPH", "unable to query next position") + return + } + pos <- position + } + }() + + var eventCount int64 + errs <- destConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + + tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.events2 FROM STDIN") + eventCount = tag.RowsAffected() + if err != nil { + return zerrors.ThrowUnknown(err, "MIGRA-DTHi7", "unable to copy events into destination") + } + + return nil + }) + + close(errs) + writeCopyEventsDone(ctx, destinationES, migrationID, source.DatabaseName(), maxPosition, errs) + + logging.WithFields("took", time.Since(start), "count", eventCount).Info("events migrated") +} + +func writeCopyEventsDone(ctx context.Context, es *eventstore.EventStore, id, source string, position float64, errs <-chan error) { + joinedErrs := make([]error, 0, len(errs)) + for err := range errs { + joinedErrs = append(joinedErrs, err) + } + err := errors.Join(joinedErrs...) + + if err != nil { + logging.WithError(err).Error("unable to mirror events") + err := writeMigrationFailed(ctx, es, id, source, err) + logging.OnError(err).Fatal("unable to write failed event") + return + } + + err = writeMigrationSucceeded(ctx, es, id, source, position) + logging.OnError(err).Fatal("unable to write failed event") +} + +func copyUniqueConstraints(ctx context.Context, source, dest *db.DB) { + start := time.Now() + reader, writer := io.Pipe() + errs := make(chan error, 1) + + sourceConn, err := source.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire source connection") + + go func() { + err := sourceConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + var stmt database.Statement + stmt.WriteString("COPY (SELECT instance_id, unique_type, unique_field FROM eventstore.unique_constraints ") + stmt.WriteString(instanceClause()) + stmt.WriteString(") TO stdout") + + _, err := conn.PgConn().CopyTo(ctx, writer, stmt.String()) + writer.Close() + return err + }) + errs <- err + }() + + destConn, err := dest.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire dest connection") + + var eventCount int64 + err = destConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + + if shouldReplace { + var stmt database.Statement + stmt.WriteString("DELETE FROM eventstore.unique_constraints ") + stmt.WriteString(instanceClause()) + + _, err := conn.Exec(ctx, stmt.String()) + if err != nil { + return err + } + } + + tag, err := conn.PgConn().CopyFrom(ctx, reader, "COPY eventstore.unique_constraints FROM stdin") + eventCount = tag.RowsAffected() + + return err + }) + logging.OnError(err).Fatal("unable to copy unique constraints to destination") + logging.OnError(<-errs).Fatal("unable to copy unique constraints from source") + logging.WithFields("took", time.Since(start), "count", eventCount).Info("unique constraints migrated") +} diff --git a/cmd/mirror/mirror.go b/cmd/mirror/mirror.go new file mode 100644 index 0000000000..3e4902ca7d --- /dev/null +++ b/cmd/mirror/mirror.go @@ -0,0 +1,93 @@ +package mirror + +import ( + "bytes" + _ "embed" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/cmd/key" +) + +var ( + instanceIDs []string + isSystem bool + shouldReplace bool +) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "mirror", + Short: "mirrors all data of ZITADEL from one database to another", + Long: `mirrors all data of ZITADEL from one database to another +ZITADEL needs to be initialized and set up with --for-mirror + +The command does mirror all data needed and recomputes the projections. +For more details call the help functions of the sub commands. + +Order of execution: +1. mirror system tables +2. mirror auth tables +3. mirror event store tables +4. recompute projections +5. verify`, + Run: func(cmd *cobra.Command, args []string) { + config := mustNewMigrationConfig(viper.GetViper()) + projectionConfig := mustNewProjectionsConfig(viper.GetViper()) + + masterKey, err := key.MasterKey(cmd) + logging.OnError(err).Fatal("unable to read master key") + + copySystem(cmd.Context(), config) + copyAuth(cmd.Context(), config) + copyEventstore(cmd.Context(), config) + + projections(cmd.Context(), projectionConfig, masterKey) + verifyMigration(cmd.Context(), config) + }, + } + + mirrorFlags(cmd) + cmd.Flags().BoolVar(&shouldIgnorePrevious, "ignore-previous", false, "ignores previous migrations of the events table") + cmd.Flags().BoolVar(&shouldReplace, "replace", false, `replaces all data of the following tables for the provided instances or all if the "--system"-flag is set: +* system.assets +* auth.auth_requests +* eventstore.unique_constraints +The flag should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states.`) + migrateProjectionsFlags(cmd) + + err := viper.MergeConfig(bytes.NewBuffer(defaultConfig)) + logging.OnError(err).Fatal("unable to read default config") + + cmd.AddCommand( + eventstoreCmd(), + systemCmd(), + projectionsCmd(), + authCmd(), + verifyCmd(), + ) + + return cmd +} + +func mirrorFlags(cmd *cobra.Command) { + cmd.PersistentFlags().StringSliceVar(&instanceIDs, "instance", nil, "id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.") + cmd.PersistentFlags().BoolVar(&isSystem, "system", false, "migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times.") + cmd.MarkFlagsOneRequired("system", "instance") + cmd.MarkFlagsMutuallyExclusive("system", "instance") +} + +func instanceClause() string { + if isSystem { + return "WHERE instance_id <> ''" + } + for i := range instanceIDs { + instanceIDs[i] = "'" + instanceIDs[i] + "'" + } + + // COPY does not allow parameters so we need to set them directly + return "WHERE instance_id IN (" + strings.Join(instanceIDs, ", ") + ")" +} diff --git a/cmd/mirror/projections.go b/cmd/mirror/projections.go new file mode 100644 index 0000000000..af7ba98c5c --- /dev/null +++ b/cmd/mirror/projections.go @@ -0,0 +1,316 @@ +package mirror + +import ( + "context" + "database/sql" + "net/http" + "sync" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/cmd/encryption" + "github.com/zitadel/zitadel/cmd/key" + "github.com/zitadel/zitadel/cmd/tls" + admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing" + admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler" + admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view" + internal_authz "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/api/oidc" + "github.com/zitadel/zitadel/internal/api/ui/login" + auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing" + auth_handler "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/handler" + auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" + "github.com/zitadel/zitadel/internal/authz" + authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" + "github.com/zitadel/zitadel/internal/command" + "github.com/zitadel/zitadel/internal/config/systemdefaults" + crypto_db "github.com/zitadel/zitadel/internal/crypto/database" + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/database/dialect" + "github.com/zitadel/zitadel/internal/domain" + "github.com/zitadel/zitadel/internal/eventstore" + old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql" + new_es "github.com/zitadel/zitadel/internal/eventstore/v3" + "github.com/zitadel/zitadel/internal/i18n" + "github.com/zitadel/zitadel/internal/id" + "github.com/zitadel/zitadel/internal/notification" + "github.com/zitadel/zitadel/internal/notification/handlers" + "github.com/zitadel/zitadel/internal/query" + "github.com/zitadel/zitadel/internal/query/projection" + static_config "github.com/zitadel/zitadel/internal/static/config" + es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore" + es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres" + "github.com/zitadel/zitadel/internal/webauthn" +) + +func projectionsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "projections", + Short: "calls the projections synchronously", + Run: func(cmd *cobra.Command, args []string) { + config := mustNewProjectionsConfig(viper.GetViper()) + + masterKey, err := key.MasterKey(cmd) + logging.OnError(err).Fatal("unable to read master key") + + projections(cmd.Context(), config, masterKey) + }, + } + + migrateProjectionsFlags(cmd) + + return cmd +} + +type ProjectionsConfig struct { + Destination database.Config + Projections projection.Config + EncryptionKeys *encryption.EncryptionKeyConfig + SystemAPIUsers map[string]*internal_authz.SystemAPIUser + Eventstore *eventstore.Config + + Admin admin_es.Config + Auth auth_es.Config + + Log *logging.Config + Machine *id.Config + + ExternalPort uint16 + ExternalDomain string + ExternalSecure bool + InternalAuthZ internal_authz.Config + SystemDefaults systemdefaults.SystemDefaults + Telemetry *handlers.TelemetryPusherConfig + Login login.Config + OIDC oidc.Config + WebAuthNName string + DefaultInstance command.InstanceSetup + AssetStorage static_config.AssetStorageConfig +} + +func migrateProjectionsFlags(cmd *cobra.Command) { + key.AddMasterKeyFlag(cmd) + tls.AddTLSModeFlag(cmd) +} + +func projections( + ctx context.Context, + config *ProjectionsConfig, + masterKey string, +) { + start := time.Now() + + client, err := database.Connect(config.Destination, false, dialect.DBPurposeQuery) + logging.OnError(err).Fatal("unable to connect to database") + + keyStorage, err := crypto_db.NewKeyStorage(client, masterKey) + logging.OnError(err).Fatal("cannot start key storage") + + keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage) + logging.OnError(err).Fatal("unable to read encryption keys") + + staticStorage, err := config.AssetStorage.NewStorage(client.DB) + logging.OnError(err).Fatal("unable create static storage") + + config.Eventstore.Querier = old_es.NewCRDB(client) + esPusherDBClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher) + logging.OnError(err).Fatal("unable to connect eventstore push client") + config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient) + es := eventstore.NewEventstore(config.Eventstore) + esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{ + MaxRetries: config.Eventstore.MaxRetries, + })) + + sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) + + queries, err := query.StartQueries( + ctx, + es, + esV4.Querier, + client, + client, + config.Projections, + config.SystemDefaults, + keys.IDPConfig, + keys.OTP, + keys.OIDC, + keys.SAML, + config.InternalAuthZ.RolePermissionMappings, + sessionTokenVerifier, + func(q *query.Queries) domain.PermissionCheck { + return func(ctx context.Context, permission, orgID, resourceID string) (err error) { + return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) + } + }, + 0, + config.SystemAPIUsers, + false, + ) + logging.OnError(err).Fatal("unable to start queries") + + authZRepo, err := authz.Start(queries, es, client, keys.OIDC, config.ExternalSecure) + logging.OnError(err).Fatal("unable to start authz repo") + + webAuthNConfig := &webauthn.Config{ + DisplayName: config.WebAuthNName, + ExternalSecure: config.ExternalSecure, + } + commands, err := command.StartCommands( + es, + config.SystemDefaults, + config.InternalAuthZ.RolePermissionMappings, + staticStorage, + webAuthNConfig, + config.ExternalDomain, + config.ExternalSecure, + config.ExternalPort, + keys.IDPConfig, + keys.OTP, + keys.SMTP, + keys.SMS, + keys.User, + keys.DomainVerification, + keys.OIDC, + keys.SAML, + &http.Client{}, + func(ctx context.Context, permission, orgID, resourceID string) (err error) { + return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) + }, + sessionTokenVerifier, + config.OIDC.DefaultAccessTokenLifetime, + config.OIDC.DefaultRefreshTokenExpiration, + config.OIDC.DefaultRefreshTokenIdleExpiration, + config.DefaultInstance.SecretGenerators, + ) + logging.OnError(err).Fatal("unable to start commands") + + err = projection.Create(ctx, client, es, config.Projections, keys.OIDC, keys.SAML, config.SystemAPIUsers) + logging.OnError(err).Fatal("unable to start projections") + + i18n.MustLoadSupportedLanguagesFromDir() + + notification.Register( + ctx, + config.Projections.Customizations["notifications"], + config.Projections.Customizations["notificationsquotas"], + config.Projections.Customizations["telemetry"], + *config.Telemetry, + config.ExternalDomain, + config.ExternalPort, + config.ExternalSecure, + commands, + queries, + es, + config.Login.DefaultOTPEmailURLV2, + config.SystemDefaults.Notifications.FileSystemPath, + keys.User, + keys.SMTP, + keys.SMS, + ) + + config.Auth.Spooler.Client = client + config.Auth.Spooler.Eventstore = es + authView, err := auth_view.StartView(config.Auth.Spooler.Client, keys.OIDC, queries, config.Auth.Spooler.Eventstore) + logging.OnError(err).Fatal("unable to start auth view") + auth_handler.Register(ctx, config.Auth.Spooler, authView, queries) + + config.Admin.Spooler.Client = client + config.Admin.Spooler.Eventstore = es + adminView, err := admin_view.StartView(config.Admin.Spooler.Client) + logging.OnError(err).Fatal("unable to start admin view") + + admin_handler.Register(ctx, config.Admin.Spooler, adminView, staticStorage) + + instances := make(chan string, config.Projections.ConcurrentInstances) + failedInstances := make(chan string) + wg := sync.WaitGroup{} + wg.Add(int(config.Projections.ConcurrentInstances)) + + go func() { + for instance := range failedInstances { + logging.WithFields("instance", instance).Error("projection failed") + } + }() + + for i := 0; i < int(config.Projections.ConcurrentInstances); i++ { + go execProjections(ctx, instances, failedInstances, &wg) + } + + for _, instance := range queryInstanceIDs(ctx, client) { + instances <- instance + } + close(instances) + wg.Wait() + + close(failedInstances) + + logging.WithFields("took", time.Since(start)).Info("projections executed") +} + +func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) { + for instance := range instances { + logging.WithFields("instance", instance).Info("start projections") + ctx = internal_authz.WithInstanceID(ctx, instance) + + err := projection.ProjectInstance(ctx) + if err != nil { + logging.WithFields("instance", instance).OnError(err).Info("trigger failed") + failedInstances <- instance + continue + } + + err = admin_handler.ProjectInstance(ctx) + if err != nil { + logging.WithFields("instance", instance).OnError(err).Info("trigger admin handler failed") + failedInstances <- instance + continue + } + + err = auth_handler.ProjectInstance(ctx) + if err != nil { + logging.WithFields("instance", instance).OnError(err).Info("trigger auth handler failed") + failedInstances <- instance + continue + } + + err = notification.ProjectInstance(ctx) + if err != nil { + logging.WithFields("instance", instance).OnError(err).Info("trigger notification failed") + failedInstances <- instance + continue + } + logging.WithFields("instance", instance).Info("projections done") + } + wg.Done() +} + +// returns the instance configured by flag +// or all instances which are not removed +func queryInstanceIDs(ctx context.Context, source *database.DB) []string { + if len(instanceIDs) > 0 { + return instanceIDs + } + + instances := []string{} + err := source.QueryContext( + ctx, + func(r *sql.Rows) error { + for r.Next() { + var instance string + + if err := r.Scan(&instance); err != nil { + return err + } + instances = append(instances, instance) + } + return r.Err() + }, + "SELECT DISTINCT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.added' AND instance_id NOT IN (SELECT instance_id FROM eventstore.events2 WHERE instance_id <> '' AND aggregate_type = 'instance' AND event_type = 'instance.removed')", + ) + logging.OnError(err).Fatal("unable to query instances") + + return instances +} diff --git a/cmd/mirror/system.go b/cmd/mirror/system.go new file mode 100644 index 0000000000..e16836aa8c --- /dev/null +++ b/cmd/mirror/system.go @@ -0,0 +1,139 @@ +package mirror + +import ( + "context" + _ "embed" + "io" + "time" + + "github.com/jackc/pgx/v5/stdlib" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/database/dialect" +) + +func systemCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "mirrors the system tables of ZITADEL from one database to another", + Long: `mirrors the system tables of ZITADEL from one database to another +ZITADEL needs to be initialized +Only keys and assets are mirrored`, + Run: func(cmd *cobra.Command, args []string) { + config := mustNewMigrationConfig(viper.GetViper()) + copySystem(cmd.Context(), config) + }, + } + + cmd.Flags().BoolVar(&shouldReplace, "replace", false, "allow delete ALL keys and assets of defined instances before copy") + + return cmd +} + +func copySystem(ctx context.Context, config *Migration) { + sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery) + logging.OnError(err).Fatal("unable to connect to source database") + defer sourceClient.Close() + + destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher) + logging.OnError(err).Fatal("unable to connect to destination database") + defer destClient.Close() + + copyAssets(ctx, sourceClient, destClient) + copyEncryptionKeys(ctx, sourceClient, destClient) +} + +func copyAssets(ctx context.Context, source, dest *database.DB) { + start := time.Now() + + sourceConn, err := source.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire source connection") + defer sourceConn.Close() + + r, w := io.Pipe() + errs := make(chan error, 1) + + go func() { + err = sourceConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + // ignore hash column because it's computed + _, err := conn.PgConn().CopyTo(ctx, w, "COPY (SELECT instance_id, asset_type, resource_owner, name, content_type, data, updated_at FROM system.assets "+instanceClause()+") TO stdout") + w.Close() + return err + }) + errs <- err + }() + + destConn, err := dest.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire dest connection") + defer destConn.Close() + + var eventCount int64 + err = destConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + + if shouldReplace { + _, err := conn.Exec(ctx, "DELETE FROM system.assets "+instanceClause()) + if err != nil { + return err + } + } + + tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.assets (instance_id, asset_type, resource_owner, name, content_type, data, updated_at) FROM stdin") + eventCount = tag.RowsAffected() + + return err + }) + logging.OnError(err).Fatal("unable to copy assets to destination") + logging.OnError(<-errs).Fatal("unable to copy assets from source") + logging.WithFields("took", time.Since(start), "count", eventCount).Info("assets migrated") +} + +func copyEncryptionKeys(ctx context.Context, source, dest *database.DB) { + start := time.Now() + + sourceConn, err := source.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire source connection") + defer sourceConn.Close() + + r, w := io.Pipe() + errs := make(chan error, 1) + + go func() { + err = sourceConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + // ignore hash column because it's computed + _, err := conn.PgConn().CopyTo(ctx, w, "COPY system.encryption_keys TO stdout") + w.Close() + return err + }) + errs <- err + }() + + destConn, err := dest.Conn(ctx) + logging.OnError(err).Fatal("unable to acquire dest connection") + defer destConn.Close() + + var eventCount int64 + err = destConn.Raw(func(driverConn interface{}) error { + conn := driverConn.(*stdlib.Conn).Conn() + + if shouldReplace { + _, err := conn.Exec(ctx, "TRUNCATE system.encryption_keys") + if err != nil { + return err + } + } + + tag, err := conn.PgConn().CopyFrom(ctx, r, "COPY system.encryption_keys FROM stdin") + eventCount = tag.RowsAffected() + + return err + }) + logging.OnError(err).Fatal("unable to copy encryption keys to destination") + logging.OnError(<-errs).Fatal("unable to copy encryption keys from source") + logging.WithFields("took", time.Since(start), "count", eventCount).Info("encryption keys migrated") +} diff --git a/cmd/mirror/verify.go b/cmd/mirror/verify.go new file mode 100644 index 0000000000..7b90ad89aa --- /dev/null +++ b/cmd/mirror/verify.go @@ -0,0 +1,111 @@ +package mirror + +import ( + "context" + "database/sql" + _ "embed" + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/database/dialect" +) + +func verifyCmd() *cobra.Command { + return &cobra.Command{ + Use: "verify", + Short: "counts if source and dest have the same amount of entries", + Run: func(cmd *cobra.Command, args []string) { + config := mustNewMigrationConfig(viper.GetViper()) + verifyMigration(cmd.Context(), config) + }, + } +} + +var schemas = []string{ + "adminapi", + "auth", + "eventstore", + "projections", + "system", +} + +func verifyMigration(ctx context.Context, config *Migration) { + sourceClient, err := database.Connect(config.Source, false, dialect.DBPurposeQuery) + logging.OnError(err).Fatal("unable to connect to source database") + defer sourceClient.Close() + + destClient, err := database.Connect(config.Destination, false, dialect.DBPurposeEventPusher) + logging.OnError(err).Fatal("unable to connect to destination database") + defer destClient.Close() + + for _, schema := range schemas { + for _, table := range append(getTables(ctx, destClient, schema), getViews(ctx, destClient, schema)...) { + sourceCount := countEntries(ctx, sourceClient, table) + destCount := countEntries(ctx, destClient, table) + + entry := logging.WithFields("table", table, "dest", destCount, "source", sourceCount) + if sourceCount == destCount { + entry.Debug("equal count") + continue + } + entry.WithField("diff", destCount-sourceCount).Info("unequal count") + } + } +} + +func getTables(ctx context.Context, dest *database.DB, schema string) (tables []string) { + err := dest.QueryContext( + ctx, + func(r *sql.Rows) error { + for r.Next() { + var table string + if err := r.Scan(&table); err != nil { + return err + } + tables = append(tables, table) + } + return r.Err() + }, + "SELECT CONCAT(schemaname, '.', tablename) FROM pg_tables WHERE schemaname = $1", + schema, + ) + logging.WithFields("schema", schema).OnError(err).Fatal("unable to query tables") + return tables +} + +func getViews(ctx context.Context, dest *database.DB, schema string) (tables []string) { + err := dest.QueryContext( + ctx, + func(r *sql.Rows) error { + for r.Next() { + var table string + if err := r.Scan(&table); err != nil { + return err + } + tables = append(tables, table) + } + return r.Err() + }, + "SELECT CONCAT(schemaname, '.', viewname) FROM pg_views WHERE schemaname = $1", + schema, + ) + logging.WithFields("schema", schema).OnError(err).Fatal("unable to query views") + return tables +} + +func countEntries(ctx context.Context, client *database.DB, table string) (count int) { + err := client.QueryRowContext( + ctx, + func(r *sql.Row) error { + return r.Scan(&count) + }, + fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause()), + ) + logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count") + + return count +} diff --git a/cmd/setup/03.go b/cmd/setup/03.go index 0d8c988688..4860ae3eec 100644 --- a/cmd/setup/03.go +++ b/cmd/setup/03.go @@ -26,6 +26,8 @@ type FirstInstance struct { PatPath string Features *command.InstanceFeatures + Skip bool + instanceSetup command.InstanceSetup userEncryptionKey *crypto.KeyConfig smtpEncryptionKey *crypto.KeyConfig @@ -42,6 +44,9 @@ type FirstInstance struct { } func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error { + if mig.Skip { + return nil + } keyStorage, err := mig.verifyEncryptionKeys(ctx) if err != nil { return err diff --git a/cmd/setup/config.go b/cmd/setup/config.go index 1ba85804ab..81ec6f2332 100644 --- a/cmd/setup/config.go +++ b/cmd/setup/config.go @@ -28,6 +28,7 @@ import ( ) type Config struct { + ForMirror bool Database database.Config SystemDefaults systemdefaults.SystemDefaults InternalAuthZ internal_authz.Config diff --git a/cmd/setup/setup.go b/cmd/setup/setup.go index bb0111d5be..6ed0cc4dc7 100644 --- a/cmd/setup/setup.go +++ b/cmd/setup/setup.go @@ -34,6 +34,8 @@ import ( notify_handler "github.com/zitadel/zitadel/internal/notification" "github.com/zitadel/zitadel/internal/query" "github.com/zitadel/zitadel/internal/query/projection" + es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore" + es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres" "github.com/zitadel/zitadel/internal/webauthn" ) @@ -57,13 +59,16 @@ Requirements: err = BindInitProjections(cmd) logging.OnError(err).Fatal("unable to bind \"init-projections\" flag") + err = bindForMirror(cmd) + logging.OnError(err).Fatal("unable to bind \"for-mirror\" flag") + config := MustNewConfig(viper.GetViper()) steps := MustNewSteps(viper.New()) masterKey, err := key.MasterKey(cmd) logging.OnError(err).Panic("No master key provided") - Setup(config, steps, masterKey) + Setup(cmd.Context(), config, steps, masterKey) }, } @@ -77,6 +82,7 @@ Requirements: func Flags(cmd *cobra.Command) { cmd.PersistentFlags().StringArrayVar(&stepFiles, "steps", nil, "paths to step files to overwrite default steps") cmd.Flags().Bool("init-projections", viper.GetBool("InitProjections"), "beta feature: initializes projections after they are created, allows smooth start as projections are up to date") + cmd.Flags().Bool("for-mirror", viper.GetBool("ForMirror"), "use this flag if you want to mirror your existing data") key.AddMasterKeyFlag(cmd) tls.AddTLSModeFlag(cmd) } @@ -85,8 +91,11 @@ func BindInitProjections(cmd *cobra.Command) error { return viper.BindPFlag("InitProjections.Enabled", cmd.Flags().Lookup("init-projections")) } -func Setup(config *Config, steps *Steps, masterKey string) { - ctx := context.Background() +func bindForMirror(cmd *cobra.Command) error { + return viper.BindPFlag("ForMirror", cmd.Flags().Lookup("for-mirror")) +} + +func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) { logging.Info("setup started") i18n.MustLoadSupportedLanguagesFromDir() @@ -102,10 +111,14 @@ func Setup(config *Config, steps *Steps, masterKey string) { config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient) eventstoreClient := eventstore.NewEventstore(config.Eventstore) logging.OnError(err).Fatal("unable to start eventstore") + eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{ + MaxRetries: config.Eventstore.MaxRetries, + })) steps.s1ProjectionTable = &ProjectionTable{dbClient: queryDBClient.DB} steps.s2AssetsTable = &AssetTable{dbClient: queryDBClient.DB} + steps.FirstInstance.Skip = config.ForMirror || steps.FirstInstance.Skip steps.FirstInstance.instanceSetup = config.DefaultInstance steps.FirstInstance.userEncryptionKey = config.EncryptionKeys.User steps.FirstInstance.smtpEncryptionKey = config.EncryptionKeys.SMTP @@ -197,10 +210,11 @@ func Setup(config *Config, steps *Steps, masterKey string) { } // projection initialization must be done last, since the steps above might add required columns to the projections - if config.InitProjections.Enabled { + if !config.ForMirror && config.InitProjections.Enabled { initProjections( ctx, eventstoreClient, + eventstoreV4, queryDBClient, projectionDBClient, masterKey, @@ -222,6 +236,7 @@ func readStmt(fs embed.FS, folder, typ, filename string) (string, error) { func initProjections( ctx context.Context, eventstoreClient *eventstore.Eventstore, + eventstoreV4 *es_v4.EventStore, queryDBClient, projectionDBClient *database.DB, masterKey string, @@ -278,6 +293,7 @@ func initProjections( queries, err := query.StartQueries( ctx, eventstoreClient, + eventstoreV4.Querier, queryDBClient, projectionDBClient, config.Projections, diff --git a/cmd/setup/steps.yaml b/cmd/setup/steps.yaml index 5e7805f24d..03476c6648 100644 --- a/cmd/setup/steps.yaml +++ b/cmd/setup/steps.yaml @@ -1,5 +1,7 @@ # By using the FirstInstance section, you can overwrite the DefaultInstance configuration for the first instance created by zitadel setup. FirstInstance: + # If set to true zitadel is setup without initial data + Skip: false # The machine key from the section FirstInstance.Org.Machine.MachineKey is written to the MachineKeyPath. MachineKeyPath: # ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH # The personal access token from the section FirstInstance.Org.Machine.Pat is written to the PatPath. diff --git a/cmd/start/start.go b/cmd/start/start.go index 49de4d2073..a7688a83cb 100644 --- a/cmd/start/start.go +++ b/cmd/start/start.go @@ -78,6 +78,8 @@ import ( "github.com/zitadel/zitadel/internal/notification" "github.com/zitadel/zitadel/internal/query" "github.com/zitadel/zitadel/internal/static" + es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore" + es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres" "github.com/zitadel/zitadel/internal/webauthn" "github.com/zitadel/zitadel/openapi" ) @@ -153,12 +155,16 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient) config.Eventstore.Querier = old_es.NewCRDB(queryDBClient) eventstoreClient := eventstore.NewEventstore(config.Eventstore) + eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{ + MaxRetries: config.Eventstore.MaxRetries, + })) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) queries, err := query.StartQueries( ctx, eventstoreClient, + eventstoreV4.Querier, queryDBClient, projectionDBClient, config.Projections, diff --git a/cmd/start/start_from_init.go b/cmd/start/start_from_init.go index 78c0be719d..38a6a6c4d1 100644 --- a/cmd/start/start_from_init.go +++ b/cmd/start/start_from_init.go @@ -36,7 +36,7 @@ Requirements: setupConfig := setup.MustNewConfig(viper.GetViper()) setupSteps := setup.MustNewSteps(viper.New()) - setup.Setup(setupConfig, setupSteps, masterKey) + setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey) startConfig := MustNewConfig(viper.GetViper()) diff --git a/cmd/start/start_from_setup.go b/cmd/start/start_from_setup.go index ec26f47414..a8b7295f2a 100644 --- a/cmd/start/start_from_setup.go +++ b/cmd/start/start_from_setup.go @@ -34,7 +34,7 @@ Requirements: setupConfig := setup.MustNewConfig(viper.GetViper()) setupSteps := setup.MustNewSteps(viper.New()) - setup.Setup(setupConfig, setupSteps, masterKey) + setup.Setup(cmd.Context(), setupConfig, setupSteps, masterKey) startConfig := MustNewConfig(viper.GetViper()) diff --git a/cmd/zitadel.go b/cmd/zitadel.go index fdddfed007..c855dd4495 100644 --- a/cmd/zitadel.go +++ b/cmd/zitadel.go @@ -15,6 +15,7 @@ import ( "github.com/zitadel/zitadel/cmd/build" "github.com/zitadel/zitadel/cmd/initialise" "github.com/zitadel/zitadel/cmd/key" + "github.com/zitadel/zitadel/cmd/mirror" "github.com/zitadel/zitadel/cmd/ready" "github.com/zitadel/zitadel/cmd/setup" "github.com/zitadel/zitadel/cmd/start" @@ -55,6 +56,7 @@ func New(out io.Writer, in io.Reader, args []string, server chan<- *start.Server start.New(server), start.NewStartFromInit(server), start.NewStartFromSetup(server), + mirror.New(), key.New(), ready.New(), ) diff --git a/docs/docs/apis/introduction.mdx b/docs/docs/apis/introduction.mdx index a4220abba5..081b31cbce 100644 --- a/docs/docs/apis/introduction.mdx +++ b/docs/docs/apis/introduction.mdx @@ -281,7 +281,7 @@ ZITADEL hosts everything under a single domain: `{instance}.zitadel.cloud` or yo The domain is used as the OIDC issuer and as the base url for the gRPC and REST APIs, the Login and Console UI, which you'll find under `{your_domain}/ui/console/`. -Are you self-hosting and having troubles with *Instance not found* errors? [Check out this page](https://zitadel.com/docs/self-hosting/manage/custom-domain). +Are you self-hosting and having troubles with *Instance not found* errors? [Check out this page](/docs/self-hosting/manage/custom-domain). ## API path prefixes diff --git a/docs/docs/examples/login/symfony.md b/docs/docs/examples/login/symfony.md index 3a5bfa3ac9..86a793b02f 100644 --- a/docs/docs/examples/login/symfony.md +++ b/docs/docs/examples/login/symfony.md @@ -102,7 +102,7 @@ composer require drenso/symfony-oidc-bundle First, we need to create a User class for the database, so we can persist user info between requests. In this case you don't need password authentication. Email addresses are not unique for ZITADEL users. There can be multiple user accounts with the same email address. -See [User Constraints](https://zitadel.com/docs/concepts/structure/users#constraints) for more details. +See [User Constraints](/docs/concepts/structure/users#constraints) for more details. We will use the User Info `sub` claim as unique "display" name for the user. `sub` equals the unique User ID from ZITADEL. This creates a User Repository and Entity that implements the `UserInterface`: diff --git a/docs/docs/examples/secure-api/nodejs-nestjs.md b/docs/docs/examples/secure-api/nodejs-nestjs.md index 13c2191140..54da814225 100644 --- a/docs/docs/examples/secure-api/nodejs-nestjs.md +++ b/docs/docs/examples/secure-api/nodejs-nestjs.md @@ -9,9 +9,9 @@ This documentation section guides you through the process of integrating ZITADEL ## Overview -The NestJS API includes a single secured route that prints "Hello World!" when authenticated. The API expects an authorization header with a valid JWT, serving as a bearer token to authenticate the user when calling the API. The API will validate the access token on the [introspect endpoint](https://zitadel.com/docs/apis/openidoauth/endpoints#introspection_endpoint) and receive the user from ZITADEL. +The NestJS API includes a single secured route that prints "Hello World!" when authenticated. The API expects an authorization header with a valid JWT, serving as a bearer token to authenticate the user when calling the API. The API will validate the access token on the [introspect endpoint](/docs/apis/openidoauth/endpoints#introspection_endpoint) and receive the user from ZITADEL. -The API application utilizes [JWT with Private Key](https://zitadel.com/docs/apis/openidoauth/authn-methods#jwt-with-private-key) for authentication against ZITADEL and accessing the introspection endpoint. Make sure to create an API Application within Zitadel and download the JSON. In this instance, we use this service account, so make sure to provide the secrets in the example application via environmental variables. +The API application utilizes [JWT with Private Key](/docs/apis/openidoauth/authn-methods#jwt-with-private-key) for authentication against ZITADEL and accessing the introspection endpoint. Make sure to create an API Application within Zitadel and download the JSON. In this instance, we use this service account, so make sure to provide the secrets in the example application via environmental variables. ## Overview @@ -25,7 +25,7 @@ Make sure you have Node.js and npm installed on your machine. ### ZITADEL Configuration for the API -1. Create a ZITADEL instance and a project by following the steps [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance). +1. Create a ZITADEL instance and a project by following the steps [here](/docs/guides/start/quickstart#2-create-your-first-instance). 2. Set up an API application within your project: - Create a new application of type "API" with authentication method "Private Key". diff --git a/docs/docs/examples/secure-api/python-django.mdx b/docs/docs/examples/secure-api/python-django.mdx index 215f3a8e7a..f571c30f57 100644 --- a/docs/docs/examples/secure-api/python-django.mdx +++ b/docs/docs/examples/secure-api/python-django.mdx @@ -145,7 +145,7 @@ python manage.py runserver ### Call the API To call the API you need an access token, which is then verified by ZITADEL. -Please follow [this guide here](https://zitadel.com/docs/guides/integrate/private-key-jwt#get-an-access-token), ignoring the first step as we already have the `.json`-key-file from the serviceaccount. +Please follow [this guide here](/docs/guides/integrate/token-introspection/private-key-jwt#get-an-access-token), ignoring the first step as we already have the `.json`-key-file from the serviceaccount. Optionally set the token as an environment variable: ``` diff --git a/docs/docs/examples/secure-api/python-flask.mdx b/docs/docs/examples/secure-api/python-flask.mdx index 1263c945e9..798dfe5f1e 100644 --- a/docs/docs/examples/secure-api/python-flask.mdx +++ b/docs/docs/examples/secure-api/python-flask.mdx @@ -12,11 +12,11 @@ This example shows you how to secure a Python3 Flask API with both authenticatio The Python API will have public, private, and private-scoped routes and check if a user is authenticated and authorized to access the routes. The private routes expect an authorization header with a valid access token in the request. The access token is used as a bearer token to authenticate the user when calling the API. -The API will validate the access token on the [introspect endpoint](https://zitadel.com/docs/apis/openidoauth/endpoints#introspection_endpoint) and will receive the user's roles from ZITADEL. +The API will validate the access token on the [introspect endpoint](/docs/apis/openidoauth/endpoints#introspection_endpoint) and will receive the user's roles from ZITADEL. -The API application uses [Client Secret Basic](https://zitadel.com/docs/apis/openidoauth/authn-methods#client-secret-basic) to authenticate against ZITADEL and access the introspection endpoint. +The API application uses [Client Secret Basic](/docs/apis/openidoauth/authn-methods#client-secret-basic) to authenticate against ZITADEL and access the introspection endpoint. You can use any valid access_token from a user or service account to send requests to the example API. -In this example we will use a service account with a [personal access token](https://zitadel.com/docs/guides/integrate/service-users/personal-access-token) which can be used directly to access the example API. +In this example we will use a service account with a [personal access token](/docs/guides/integrate/service-users/personal-access-token) which can be used directly to access the example API. ## Running the example @@ -31,9 +31,9 @@ In order to run the example you need to have `python3` and `pip3` installed. You need to setup a couple of things in ZITADEL. -1. If you don't have an instance yet, please go ahead and create an instance as explained [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance). Also, create a new project by following the steps [here](https://zitadel.com/docs/guides/start/quickstart#2-create-your-first-instance). +1. If you don't have an instance yet, please go ahead and create an instance as explained [here](/docs/guides/start/quickstart#2-create-your-first-instance). Also, create a new project by following the steps [here](/docs/guides/start/quickstart#2-create-your-first-instance). -2. You must create an API application in your project. Follow [this guide](https://zitadel.com/docs/guides/manage/console/applications) to create a new application of type "API" with authentication method "Basic". Save both the ClientID and ClientSecret after you create the application. +2. You must create an API application in your project. Follow [this guide](/docs/guides/manage/console/applications) to create a new application of type "API" with authentication method "Basic". Save both the ClientID and ClientSecret after you create the application. ### Create the API @@ -179,7 +179,7 @@ class ZitadelIntrospectTokenValidator(IntrospectTokenValidator): res = self.introspect_token(*args, **kwargs) return res ``` -3. Create a new file named ".env" in the directory. Copy the configuration in the [".env.example"](https://github.com/zitadel/example-api-python3-flask/blob/main/.env.example) file to the newly created .env file. Set the values with your Instance Domain/Issuer URL, Client ID, and Client Secret from the previous steps. Obtain your Issuer URL by following [these steps](https://zitadel.com/docs/guides/start/quickstart#referred1). +3. Create a new file named ".env" in the directory. Copy the configuration in the [".env.example"](https://github.com/zitadel/example-api-python3-flask/blob/main/.env.example) file to the newly created .env file. Set the values with your Instance Domain/Issuer URL, Client ID, and Client Secret from the previous steps. Obtain your Issuer URL by following [these steps](/docs/guides/start/quickstart#referred1). ```python ZITADEL_DOMAIN = "https://your-domain-abcdef.zitadel.cloud" @@ -191,9 +191,9 @@ CLIENT_SECRET = "NVAp70IqiGmJldbS...." ![Create a service user](/img/python-flask/3.png) -1. Create a service user and a Personal Access Token (PAT) for that user by following [this guide](https://zitadel.com/docs/guides/integrate/service-users/personal-access-token#create-a-service-user-with-a-pat). -2. To enable authorization, follow [this guide](https://zitadel.com/docs/guides/manage/console/roles) to create a role `read:messages` on your project. -3. Next, create an authorization for the service user you created by adding the role `read:messages` to the user. Follow this [guide](https://zitadel.com/docs/guides/manage/console/roles#authorizations) for more information on creating an authorization. +1. Create a service user and a Personal Access Token (PAT) for that user by following [this guide](/docs/guides/integrate/service-users/personal-access-token#create-a-service-user-with-a-pat). +2. To enable authorization, follow [this guide](/docs/guides/manage/console/roles) to create a role `read:messages` on your project. +3. Next, create an authorization for the service user you created by adding the role `read:messages` to the user. Follow this [guide](/docs/guides/manage/console/roles#authorizations) for more information on creating an authorization. ### Run the API diff --git a/docs/docs/guides/integrate/identity-providers/introduction.md b/docs/docs/guides/integrate/identity-providers/introduction.md index 4ecea3424a..b97b50b6db 100644 --- a/docs/docs/guides/integrate/identity-providers/introduction.md +++ b/docs/docs/guides/integrate/identity-providers/introduction.md @@ -131,9 +131,9 @@ In the guides below, some of which utilize the Generic OIDC or SAML templates fo If ZITADEL doesn't offer a specific template for your Identity Provider (IdP) and your IdP is fully compliant with OpenID Connect (OIDC), you have the option to use the generic OIDC provider configuration. -For those utilizing a SAML Service Provider, the SAML Service Provider option is available. You can learn how to set up a SAML Service Provider with our [MockSAML example](https://zitadel.com/docs/guides/integrate/identity-providers/mocksaml). +For those utilizing a SAML Service Provider, the SAML Service Provider option is available. You can learn how to set up a SAML Service Provider with our [MockSAML example](/docs/guides/integrate/identity-providers/mocksaml). -Should you wish to transition from a generic OIDC provider to Entra ID (formerly Azure Active Directory) or Google, consider following this [guide](https://zitadel.com/docs/guides/integrate/identity-providers/migrate). +Should you wish to transition from a generic OIDC provider to Entra ID (formerly Azure Active Directory) or Google, consider following this [guide](/docs/guides/integrate/identity-providers/migrate). @@ -176,6 +176,6 @@ Deciding whether to configure an external Identity Provider (IdP) at the organiz ## References -- [Identity brokering in ZITADEL](https://zitadel.com/docs/concepts/features/identity-brokering) -- [The ZITADEL API reference for managing external IdPs](https://zitadel.com/docs/category/apis/resources/admin/identity-providers) -- [Handle external logins in a custom login UI](https://zitadel.com/docs/guides/integrate/login-ui/external-login) \ No newline at end of file +- [Identity brokering in ZITADEL](/docs/concepts/features/identity-brokering) +- [The ZITADEL API reference for managing external IdPs](/docs/category/apis/resources/admin/identity-providers) +- [Handle external logins in a custom login UI](/docs/guides/integrate/login-ui/external-login) \ No newline at end of file diff --git a/docs/docs/guides/integrate/login-ui/_logout.mdx b/docs/docs/guides/integrate/login-ui/_logout.mdx index 6cb2a757a9..33bc3457f6 100644 --- a/docs/docs/guides/integrate/login-ui/_logout.mdx +++ b/docs/docs/guides/integrate/login-ui/_logout.mdx @@ -1,5 +1,5 @@ When your user is done using your application and clicks on the logout button, you have to send a request to the terminate session endpoint. -[Terminate Session Documentation](https://zitadel.com/docs/apis/resources/session_service/session-service-delete-session) +[Terminate Session Documentation](/docs/apis/resources/session_service/session-service-delete-session) Sessions can be terminated by either: - the authenticated user diff --git a/docs/docs/guides/integrate/login-ui/_select-account.mdx b/docs/docs/guides/integrate/login-ui/_select-account.mdx index b16cce281c..97528186e6 100644 --- a/docs/docs/guides/integrate/login-ui/_select-account.mdx +++ b/docs/docs/guides/integrate/login-ui/_select-account.mdx @@ -3,7 +3,7 @@ If you want to build your own select account/account picker, you have to cache t We recommend storing a list of the session Ids with the corresponding session token in a cookie. The list of session IDs can be sent in the “search sessions” request to get a detailed list of sessions for the account selection. -[Search Sessions Documentation](https://zitadel.com/docs/apis/resources/session_service/session-service-list-sessions) +[Search Sessions Documentation](/docs/apis/resources/session_service/session-service-list-sessions) ### Request diff --git a/docs/docs/guides/integrate/onboarding/end-users.mdx b/docs/docs/guides/integrate/onboarding/end-users.mdx index 3c8038c5ca..274c2f7eaa 100644 --- a/docs/docs/guides/integrate/onboarding/end-users.mdx +++ b/docs/docs/guides/integrate/onboarding/end-users.mdx @@ -115,7 +115,7 @@ We do have a guide series on how to build your own login ui, which also includes - Passkeys - External Login Providers -You can find all the guides here: [Build your own login UI](https://zitadel.com/docs/guides/integrate/login-ui) +You can find all the guides here: [Build your own login UI](/docs/guides/integrate/login-ui) The create user request also allows you to add metadata (key, value) to the user. This gives you the possibility to collect additional data from your users during the registration process and store it directly to the user in ZITADEL. diff --git a/docs/docs/guides/integrate/token-introspection/basic-auth.mdx b/docs/docs/guides/integrate/token-introspection/basic-auth.mdx index cad0f55c75..6beb244ed0 100644 --- a/docs/docs/guides/integrate/token-introspection/basic-auth.mdx +++ b/docs/docs/guides/integrate/token-introspection/basic-auth.mdx @@ -5,7 +5,7 @@ sidebar_label: Basic Authentication import IntrospectionResponse from './_introspection-response.mdx'; -This is a guide on how to secure your API using [Basic Authentication](https://zitadel.com/docs/apis/openidoauth/authn-methods#client-secret-basic). +This is a guide on how to secure your API using [Basic Authentication](/docs/apis/openidoauth/authn-methods#client-secret-basic). ## Register the API in ZITADEL diff --git a/docs/docs/guides/integrate/zitadel-apis/event-api.md b/docs/docs/guides/integrate/zitadel-apis/event-api.md index e4899421f1..fcc5649ccc 100644 --- a/docs/docs/guides/integrate/zitadel-apis/event-api.md +++ b/docs/docs/guides/integrate/zitadel-apis/event-api.md @@ -7,7 +7,7 @@ ZITADEL leverages the power of eventsourcing, meaning every action and change wi To provide you with greater flexibility and access to these events, ZITADEL has introduced an Event API. This API allows you to easily retrieve and utilize the events generated within the system, enabling you to integrate them into your own system and respond to specific events as they occur. -You need to give a user the [manager role](https://zitadel.com/docs/guides/manage/console/managers) IAM_OWNER_VIEWER or IAM_OWNER to access the Event API. +You need to give a user the [manager role](/docs/guides/manage/console/managers) IAM_OWNER_VIEWER or IAM_OWNER to access the Event API. If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview). ## Request Events diff --git a/docs/docs/guides/manage/cloud/settings.md b/docs/docs/guides/manage/cloud/settings.md index f4c4543ec7..5b646db3e1 100644 --- a/docs/docs/guides/manage/cloud/settings.md +++ b/docs/docs/guides/manage/cloud/settings.md @@ -19,7 +19,7 @@ You can subscribe and unsubscribe to notifications and newsletters: - Security: Receive notifications related to security issues :::info Technical Advisories -If you want to stay up to date on our technical advisories, we recommend [subscribing here to the mailing list](https://zitadel.com/docs/support/technical_advisory#subscribe-to-our-mailing-list). +If you want to stay up to date on our technical advisories, we recommend [subscribing here to the mailing list](/docs/support/technical_advisory#subscribe-to-our-mailing-list). Technical advisories are notices that report major issues with ZITADEL Self-Hosted or the ZITADEL Cloud platform that could potentially impact security or stability in production environments. ::: diff --git a/docs/docs/guides/migrate/sources/keycloak.md b/docs/docs/guides/migrate/sources/keycloak.md index b432b62163..ccb8c6e704 100644 --- a/docs/docs/guides/migrate/sources/keycloak.md +++ b/docs/docs/guides/migrate/sources/keycloak.md @@ -5,7 +5,7 @@ sidebar_label: From Keycloak ## Migrating from Keycloak to ZITADEL -This guide will use [Docker installation](https://www.docker.com/) to run Keycloak and ZITADEL. However, both Keycloak and ZITADEL offer different installation methods. As a result, this guide won't include any required production tuning or security hardening for either system. However, it's advised you follow [recommended guidelines](https://zitadel.com/docs/guides/manage/self-hosted/production) before putting those systems into production. You can skip setting up Keycloak and ZITADEL if you already have running instances. +This guide will use [Docker installation](https://www.docker.com/) to run Keycloak and ZITADEL. However, both Keycloak and ZITADEL offer different installation methods. As a result, this guide won't include any required production tuning or security hardening for either system. However, it's advised you follow [recommended guidelines](/docs/self-hosting/manage/production) before putting those systems into production. You can skip setting up Keycloak and ZITADEL if you already have running instances. ## Set up Keycloak ### Run Keycloak @@ -77,7 +77,7 @@ docker cp :/tmp/my-realm-users-0.json . ## Set up ZITADEL -After creating a sample application that connects to Keycloak, you need to set up ZITADEL in order to migrate the application and users from Keycloak to ZITADEL. For this, ZITADEL offers a [Docker Compose](https://zitadel.com/docs/self-hosting/deploy/compose) installation guide. Follow the instructions under the [Docker compose](https://zitadel.com/docs/self-hosting/deploy/compose#docker-compose) section to run a ZITADEL instance locally. +After creating a sample application that connects to Keycloak, you need to set up ZITADEL in order to migrate the application and users from Keycloak to ZITADEL. For this, ZITADEL offers a [Docker Compose](/docs/self-hosting/deploy/compose) installation guide. Follow the instructions under the [Docker compose](/docs/self-hosting/deploy/compose#docker-compose) section to run a ZITADEL instance locally. Next, the application will be available at [http://localhost:8080/ui/console/](http://localhost:8080/ui/console/). @@ -91,13 +91,13 @@ Now you can access the console with the following default credentials: ## Import Keycloak users into ZITADEL -As explained in this [ZITADEL user migration guide](https://zitadel.com/docs/guides/migrate/users), you can import users individually or in bulk. Since we are looking at importing a single user from Keycloak, migrating that individual user to ZITADEL can be done with the [ImportHumanUser](https://zitadel.com/docs/apis/resources/mgmt/management-service-import-human-user) endpoint. +As explained in this [ZITADEL user migration guide](/docs/guides/migrate/users), you can import users individually or in bulk. Since we are looking at importing a single user from Keycloak, migrating that individual user to ZITADEL can be done with the [ImportHumanUser](/docs/apis/resources/mgmt/management-service-import-human-user) endpoint. > With this endpoint, an email will only be sent to the user if the email is marked as not verified or if there's no password set. ### Create a service user to consume ZITADEL API -But first of all, in order to use this ZITADEL API, you need to create a [service user](https://zitadel.com/docs/guides/integrate/service-users/authenticate-service-users#exercise-create-a-service-user). +But first of all, in order to use this ZITADEL API, you need to create a [service user](/docs/guides/integrate/service-users/authenticate-service-users#exercise-create-a-service-user). Go to the **Users** menu and select the **Service Users** tab. And click the **+ New** button. @@ -167,7 +167,7 @@ if your Keycloak Realm has a single user, your `my-realm-users-0.json` file, int } ``` -Now, you need to transform the JSON to the ZITADEL data format by adhering to the ZITADEL API [specification](https://zitadel.com/docs/apis/resources/mgmt/management-service-import-human-user) to import a user. The minimal format would be as shown below: +Now, you need to transform the JSON to the ZITADEL data format by adhering to the ZITADEL API [specification](/docs/apis/resources/mgmt/management-service-import-human-user) to import a user. The minimal format would be as shown below: ```js { diff --git a/docs/docs/guides/migrate/users.md b/docs/docs/guides/migrate/users.md index bf8a2cb992..5eb8ceb694 100644 --- a/docs/docs/guides/migrate/users.md +++ b/docs/docs/guides/migrate/users.md @@ -42,7 +42,7 @@ Please also consult our [guide](/docs/guides/manage/user/reg-create-user) on how ## Bulk import -For bulk import use the [import endpoint](https://zitadel.com/docs/apis/resources/admin/admin-service-import-data) on the admin API: +For bulk import use the [import endpoint](/docs/apis/resources/admin/admin-service-import-data) on the admin API: ```json { @@ -191,7 +191,7 @@ Currently it is not possible to migrate passkeys directly from another system. ## Users linked to an external IDP -A users `sub` is bound to the external [IDP's Client ID](https://zitadel.com/docs/guides/manage/console/default-settings#identity-providers). +A users `sub` is bound to the external [IDP's Client ID](/docs/guides/manage/console/default-settings#identity-providers). This means that the IDP Client ID configured in ZITADEL must be the same ID as in the legacy system. Users should be imported with their `externalUserId`. @@ -211,7 +211,7 @@ _snippet from [bulk-import](#bulk-import) example:_ } ``` -You can use an Action with [post-creation flow](https://zitadel.com/docs/apis/actions/external-authentication#post-creation) to pull information such as roles from the old system and apply them to the user in ZITADEL. +You can use an Action with [post-creation flow](/docs/apis/actions/external-authentication#post-creation) to pull information such as roles from the old system and apply them to the user in ZITADEL. ## Metadata @@ -220,7 +220,7 @@ Use metadata to store additional attributes of the users, such as organizational :::info Metadata must be added to users after the users were created. Currently metadata can't be added during user creation. -[API reference: User Metadata](https://zitadel.com/docs/category/apis/resources/mgmt/user-metadata) +[API reference: User Metadata](/docs/category/apis/resources/mgmt/user-metadata) ::: Request metadata from the userinfo endpoint by passing the required [reserved scope](/docs/apis/openidoauth/scopes#reserved-scopes) in your auth request. @@ -232,5 +232,5 @@ You can assign roles from owned or granted projects to a user. :::info Authorizations must be added to users after the users were created. Currently metadata can't be added during user creation. -[API reference: User Authorization / Grants](https://zitadel.com/docs/category/apis/resources/auth/user-authorizations-grants) +[API reference: User Authorization / Grants](/docs/category/apis/resources/auth/user-authorizations-grants) ::: \ No newline at end of file diff --git a/docs/docs/guides/start/quickstart.mdx b/docs/docs/guides/start/quickstart.mdx index e76c98bd05..99f193ddd5 100644 --- a/docs/docs/guides/start/quickstart.mdx +++ b/docs/docs/guides/start/quickstart.mdx @@ -353,7 +353,7 @@ The provided config extends the `UserManagerSettings` of the `oidc-client-ts` li - redirect_uri (the URL to redirect to after the authorization flow is complete) - post_logout_redirect_uri (the URL to redirect to after the user logs out) - scope (the permissions requested from the user) -- project_resource_id (To add a ZITADEL project scope. `urn:zitadel:iam:org:project:id:[projectId]:aud` and `urn:zitadel:iam:org:projects:roles` [scopes](https://zitadel.com/docs/apis/openidoauth/scopes#reserved-scopes).) +- project_resource_id (To add a ZITADEL project scope. `urn:zitadel:iam:org:project:id:[projectId]:aud` and `urn:zitadel:iam:org:projects:roles` [scopes](/docs/apis/openidoauth/scopes#reserved-scopes).) - prompt ([the OIDC prompt parameter](/apis/openidoauth/endpoints#additional-parameters)) 2. Create a folder named components in the src directory. Create two files named Login.js and Callback.js. @@ -412,4 +412,4 @@ And this brings us to the end of this quick start guide! This tutorial covered how to configure ZITADEL and how to use React to build an app that communicates with ZITADEL to access secured resources. -We hope you enjoyed the tutorial and encourage you to check out the ZITADEL [documentation](https://zitadel.com/docs) for more information on how to use the ZITADEL platform to its full potential. Thanks for joining us! +We hope you enjoyed the tutorial and encourage you to check out the ZITADEL [documentation](/docs) for more information on how to use the ZITADEL platform to its full potential. Thanks for joining us! diff --git a/docs/docs/legal/policies/brand-trademark-policy.md b/docs/docs/legal/policies/brand-trademark-policy.md index b6b1c6aaff..8016760c58 100644 --- a/docs/docs/legal/policies/brand-trademark-policy.md +++ b/docs/docs/legal/policies/brand-trademark-policy.md @@ -34,7 +34,7 @@ To ensure the logo is used as intended, we provide specific examples below and r - Use in architecture diagrams without implying affiliation or partnership - Editorial and informational purposes such as blog posts or news articles -- Linking back to our [website](https://zitadel.com), official [repositories](https://github.com/zitadel), or [documentation](https://zitadel.com/docs) +- Linking back to our [website](https://zitadel.com), official [repositories](https://github.com/zitadel), or [documentation](/docs) - Indicating that the software is available for use or installation without implying any affiliation or endorsement ### Not acceptable diff --git a/docs/docs/legal/policies/vulnerability-disclosure-policy.mdx b/docs/docs/legal/policies/vulnerability-disclosure-policy.mdx index 1566655243..df8245d72e 100644 --- a/docs/docs/legal/policies/vulnerability-disclosure-policy.mdx +++ b/docs/docs/legal/policies/vulnerability-disclosure-policy.mdx @@ -57,7 +57,7 @@ We will not publish this information by default to protect your privacy. ### What not to report - Disclosure of known public files or directories, e.g. robots.txt, files under .well-known, or files that are included in our public repositories (eg, go.mod) -- DoS of users when [Lockout Policy is enabled](https://zitadel.com/docs/guides/manage/console/default-settings#lockout) +- DoS of users when [Lockout Policy is enabled](/docs/guides/manage/console/default-settings#lockout) - Suggestions on Certificate Authority Authorization (CAA) rules - Suggestions on DMARC/DKIM/SPF settings - Suggestions on DNSSEC settings diff --git a/docs/docs/self-hosting/deploy/troubleshooting/_instance_not_found.mdx b/docs/docs/self-hosting/deploy/troubleshooting/_instance_not_found.mdx index 98785275b9..76b8e7b2f6 100644 --- a/docs/docs/self-hosting/deploy/troubleshooting/_instance_not_found.mdx +++ b/docs/docs/self-hosting/deploy/troubleshooting/_instance_not_found.mdx @@ -1,5 +1,5 @@ `ID=QUERY-n0wng Message=Instance not found` If you're self hosting with a custom domain, you need to instruct ZITADEL to use the `ExternalDomain`. -You can find further instructions in our guide about [custom domains](https://zitadel.com/docs/self-hosting/manage/custom-domain). -We also provide a guide on how to [configure](https://zitadel.com/docs/self-hosting/manage/configure) ZITADEL with variables from files or environment variables. +You can find further instructions in our guide about [custom domains](/docs/self-hosting/manage/custom-domain). +We also provide a guide on how to [configure](/docs/self-hosting/manage/configure) ZITADEL with variables from files or environment variables. diff --git a/docs/docs/self-hosting/manage/cli/mirror.mdx b/docs/docs/self-hosting/manage/cli/mirror.mdx new file mode 100644 index 0000000000..8f96ed93df --- /dev/null +++ b/docs/docs/self-hosting/manage/cli/mirror.mdx @@ -0,0 +1,232 @@ +--- +title: Mirror data to another database +sidebar_label: Mirror command +--- + +The `mirror` command allows you to do database to database migrations. This functionality is useful to copy data from one database to another. + +The data can be mirrored to multiple database without influencing each other. + +## Use cases + +Migrate from cockroachdb to postgres or vice versa. + +Replicate data to a secondary environment for testing. + +## Prerequisites + +You need an existing source database, most probably the database ZITADEL currently serves traffic from. + +To mirror the data the destination database needs to be initialized and setup without an instance. + +### Start the destination database + +Follow one of the following guides to start the database: + +* [Linux](/docs/self-hosting/deploy/linux#run-postgresql) +* [MacOS](/docs/self-hosting/deploy/macos#install-postgresql) + +Or use the following commands for [Docker Compose](/docs/self-hosting/deploy/compose) + +```bash +# Download the docker compose example configuration. +wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose.yaml + +# Run the database and application containers. +docker compose up db --detach +``` + +## Example + +The following commands setup the database as described above. See [configuration](#configuration) for more details about the configuration options. + +```bash +zitadel init --config /path/to/your/new/config.yaml +zitadel setup --for-init --config /path/to/your/new/config.yaml # make sure to set --tlsMode and masterkey analog to your current deployment +zitadel mirror --system --config /path/to/your/mirror/config.yaml # make sure to set --tlsMode and masterkey analog to your current deployment +``` + +## Usage + +The general syntax for the mirror command is: + +```bash +zitadel mirror [flags] + +Flags: + -h, --help help for mirror + + --config stringArray path to config file to overwrite system defaults + + --ignore-previous ignores previous migrations of the events table. This flag should be used if you manually dropped previously mirrored events. + --replace replaces all data of the following tables for the provided instances or all if the `--system`-flag is set: + * system.assets + * auth.auth_requests + * eventstore.unique_constraints + The should be provided if you want to execute the mirror command multiple times so that the static data are also mirrored to prevent inconsistent states. + + --instance strings id or comma separated ids of the instance(s) to migrate. Either this or the `--system`-flag must be set. Make sure to always use the same flag if you execute the command multiple times. + --system migrates the whole system. Either this or the `--instance`-flag must be set. Make sure to always use the same flag if you execute the command multiple times. + +# For the flags below use the same configuration you also use in the current deployment + + --masterkey string masterkey as argument for en/decryption keys + -m, --masterkeyFile string path to the masterkey for en/decryption keys + --masterkeyFromEnv read masterkey for en/decryption keys from environment variable (ZITADEL_MASTERKEY) + --tlsMode externalSecure start ZITADEL with (enabled), without (disabled) TLS or external component e.g. reverse proxy (external) terminating TLS, this flag will overwrite externalSecure and `tls.enabled` in configs files +``` + +## Configuration + +```yaml +# The source database the data are copied from. Use either cockroach or postgres, by default cockroach is used +Source: + cockroach: + Host: localhost # ZITADEL_SOURCE_COCKROACH_HOST + Port: 26257 # ZITADEL_SOURCE_COCKROACH_PORT + Database: zitadel # ZITADEL_SOURCE_COCKROACH_DATABASE + MaxOpenConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXOPENCONNS + MaxIdleConns: 6 # ZITADEL_SOURCE_COCKROACH_MAXIDLECONNS + EventPushConnRatio: 0.33 # ZITADEL_SOURCE_COCKROACH_EVENTPUSHCONNRATIO + ProjectionSpoolerConnRatio: 0.33 # ZITADEL_SOURCE_COCKROACH_PROJECTIONSPOOLERCONNRATIO + MaxConnLifetime: 30m # ZITADEL_SOURCE_COCKROACH_MAXCONNLIFETIME + MaxConnIdleTime: 5m # ZITADEL_SOURCE_COCKROACH_MAXCONNIDLETIME + Options: "" # ZITADEL_SOURCE_COCKROACH_OPTIONS + User: + Username: zitadel # ZITADEL_SOURCE_COCKROACH_USER_USERNAME + Password: "" # ZITADEL_SOURCE_COCKROACH_USER_PASSWORD + SSL: + Mode: disable # ZITADEL_SOURCE_COCKROACH_USER_SSL_MODE + RootCert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_ROOTCERT + Cert: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_CERT + Key: "" # ZITADEL_SOURCE_COCKROACH_USER_SSL_KEY + # Postgres is used as soon as a value is set + # The values describe the possible fields to set values + postgres: + Host: # ZITADEL_SOURCE_POSTGRES_HOST + Port: # ZITADEL_SOURCE_POSTGRES_PORT + Database: # ZITADEL_SOURCE_POSTGRES_DATABASE + MaxOpenConns: # ZITADEL_SOURCE_POSTGRES_MAXOPENCONNS + MaxIdleConns: # ZITADEL_SOURCE_POSTGRES_MAXIDLECONNS + MaxConnLifetime: # ZITADEL_SOURCE_POSTGRES_MAXCONNLIFETIME + MaxConnIdleTime: # ZITADEL_SOURCE_POSTGRES_MAXCONNIDLETIME + Options: # ZITADEL_SOURCE_POSTGRES_OPTIONS + User: + Username: # ZITADEL_SOURCE_POSTGRES_USER_USERNAME + Password: # ZITADEL_SOURCE_POSTGRES_USER_PASSWORD + SSL: + Mode: # ZITADEL_SOURCE_POSTGRES_USER_SSL_MODE + RootCert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_ROOTCERT + Cert: # ZITADEL_SOURCE_POSTGRES_USER_SSL_CERT + Key: # ZITADEL_SOURCE_POSTGRES_USER_SSL_KEY + +# The destination database the data are copied to. Use either cockroach or postgres, by default cockroach is used +Destination: + cockroach: + Host: localhost # ZITADEL_DESTINATION_COCKROACH_HOST + Port: 26257 # ZITADEL_DESTINATION_COCKROACH_PORT + Database: zitadel # ZITADEL_DESTINATION_COCKROACH_DATABASE + MaxOpenConns: 0 # ZITADEL_DESTINATION_COCKROACH_MAXOPENCONNS + MaxIdleConns: 0 # ZITADEL_DESTINATION_COCKROACH_MAXIDLECONNS + MaxConnLifetime: 30m # ZITADEL_DESTINATION_COCKROACH_MAXCONNLIFETIME + MaxConnIdleTime: 5m # ZITADEL_DESTINATION_COCKROACH_MAXCONNIDLETIME + EventPushConnRatio: 0.01 # ZITADEL_DESTINATION_COCKROACH_EVENTPUSHCONNRATIO + ProjectionSpoolerConnRatio: 0.5 # ZITADEL_DESTINATION_COCKROACH_PROJECTIONSPOOLERCONNRATIO + Options: "" # ZITADEL_DESTINATION_COCKROACH_OPTIONS + User: + Username: zitadel # ZITADEL_DESTINATION_COCKROACH_USER_USERNAME + Password: "" # ZITADEL_DESTINATION_COCKROACH_USER_PASSWORD + SSL: + Mode: disable # ZITADEL_DESTINATION_COCKROACH_USER_SSL_MODE + RootCert: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_ROOTCERT + Cert: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_CERT + Key: "" # ZITADEL_DESTINATION_COCKROACH_USER_SSL_KEY + # Postgres is used as soon as a value is set + # The values describe the possible fields to set values + postgres: + Host: # ZITADEL_DESTINATION_POSTGRES_HOST + Port: # ZITADEL_DESTINATION_POSTGRES_PORT + Database: # ZITADEL_DESTINATION_POSTGRES_DATABASE + MaxOpenConns: # ZITADEL_DESTINATION_POSTGRES_MAXOPENCONNS + MaxIdleConns: # ZITADEL_DESTINATION_POSTGRES_MAXIDLECONNS + MaxConnLifetime: # ZITADEL_DESTINATION_POSTGRES_MAXCONNLIFETIME + MaxConnIdleTime: # ZITADEL_DESTINATION_POSTGRES_MAXCONNIDLETIME + Options: # ZITADEL_DESTINATION_POSTGRES_OPTIONS + User: + Username: # ZITADEL_DESTINATION_POSTGRES_USER_USERNAME + Password: # ZITADEL_DESTINATION_POSTGRES_USER_PASSWORD + SSL: + Mode: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_MODE + RootCert: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_ROOTCERT + Cert: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_CERT + Key: # ZITADEL_DESTINATION_POSTGRES_USER_SSL_KEY + +# As cockroachdb first copies the data into memory this parameter is used to iterate through the events table and fetch only the given amount of events per iteration +EventBulkSize: 10000 # ZITADEL_EVENTBULKSIZE + +Projections: + # Defines how many projections are allowed to run in parallel + ConcurrentInstances: 7 # ZITADEL_PROJECTIONS_CONCURRENTINSTANCES + # Limits the amount of events projected by each iteration + EventBulkLimit: 1000 # ZITADEL_PROJECTIONS_EVENTBULKLIMIT + +Auth: + Spooler: + # Limits the amount of events projected by each iteration + BulkLimit: 1000 #ZITADEL_AUTH_SPOOLER_BULKLIMIT + +Admin: + Spooler: + # Limits the amount of events projected by each iteration + BulkLimit: 10 #ZITADEL_ADMIN_SPOOLER_BULKLIMIT + +Log: + Level: info +``` + +## Sub commands + +The provided sub commands allow more fine grained execution of copying the data. + +The following commands are safe to execute multiple times by adding the `--replace`-flag which replaces the data not provided by the events in the destination database. + +### `zitadel mirror auth` + +Copies the auth requests to the destination database. + +### `zitadel mirror eventstore` + +Copies the events since the last migration and unique constraints to the destination database. + +### `zitadel mirror projections` + +Executes all projections in the destination database. + +It is NOOP if the projections are already up-to-date. + +### `zitadel mirror system` + +Copies encryption keys and assets to the destination database. + +### `zitadel mirror verify` + +Prints the amount of rows of the source and destination database and the diff. Positive numbers indicate more rows in the destination table that in the source, negative numbers the opposite. + +The following tables will likely have an unequal count: + +* **projections.current_states**: If your deployment was upgraded several times, the number of entries in the destination will be lower +* **projections.locks**: If your deployment was upgraded several times, the number of entries in the destination will be lower +* **projections.keys4\***: Only not expired keys are inserted, the number of entries in the destination will be lower +* **projections.failed_events**: Should be lower or equal. +* **auth.users2**: Was replaced with auth.users3, the number of entries in the destination will be 0 +* **auth.users3**: Is the replacement of auth.users2, the number of entries in the destination will be equal or higher + +## Limitations + +It is not possible to use files as source or destination. See github issue [here](https://github.com/zitadel/zitadel/issues/7966) + +Currently the encryption keys of the source database must be copied to the destination database. See github issue [here](https://github.com/zitadel/zitadel/issues/7964) + +It is not possible to change the domain of the ZITADEL deployment. + +Once you mirrored an instance using the `--instance` flag, you have to make sure you don't mirror other preexisting instances. This means for example, you cannot mirror a few instances and then pass the `--system` flag. You have to pass all remaining instances explicitly, once you used the `--instance` flag diff --git a/docs/docs/self-hosting/manage/cli/overview.mdx b/docs/docs/self-hosting/manage/cli/overview.mdx new file mode 100644 index 0000000000..31fd71f4d0 --- /dev/null +++ b/docs/docs/self-hosting/manage/cli/overview.mdx @@ -0,0 +1,34 @@ +--- +title: ZITADEL Command Line Interface +sidebar_label: Overview +--- + +This documentation serves as your guide to interacting with Zitadel through the command line interface (CLI). The Zitadel CLI empowers you to manage various aspects of your Zitadel system efficiently from your terminal. + +This introductory section provides a brief overview of what the Zitadel CLI offers and who can benefit from using it. + +Let's dive in! + +## Download the CLI + +Download the CLI for [Linux](/docs/self-hosting/deploy/linux#install-zitadel) or [MacOS](/docs/self-hosting/deploy/macos#install-zitadel). + +## Quick start + +The easiest way to start ZITADEL is by following the [docker compose example](/docs/self-hosting/deploy/compose) which executes the commands for you. + +## Initialize the database + +The `zitadel init`-command sets up the zitadel database. The statements executed need a user with `ADMIN`-privilege. See [init phase](/docs/self-hosting/manage/updating_scaling#the-init-phase) for more information. + +## Setup ZITADEL + +The `zitadel setup`-command further sets up the database created using `zitadel init`. This command only requires the user created in the previous step. See [setup phase](/docs/self-hosting/manage/updating_scaling#the-setup-phase) for more information. + +## Start ZITADEL + +The `zitadel start`-command runs the ZITADEL server. See [runtime phase](/docs/self-hosting/manage/updating_scaling#the-runtime-phase) for more information. + +The `zitadel start-from-setup`-command first executes [the setup phase](#setup-zitadel) and afterwards runs the ZITADEL server. + +The `zitadel start-from-init`-command first executes [the init phase](#Initialize-the-database), afterwards [the setup phase](#setup-zitadel) and lastly runs the ZITADEL server. diff --git a/docs/docs/self-hosting/manage/productionchecklist.md b/docs/docs/self-hosting/manage/productionchecklist.md index aaf52d0de8..fb85557a23 100644 --- a/docs/docs/self-hosting/manage/productionchecklist.md +++ b/docs/docs/self-hosting/manage/productionchecklist.md @@ -25,7 +25,7 @@ To apply best practices to your production setup we created a step by step check - [ ] Secure database connections from outside your network and/or use an internal subnet for database connectivity - [ ] High Availability for critical infrastructure components (depending on your setup) - [ ] Loadbalancer - - [ ] [Reverse Proxies](https://zitadel.com/docs/self-hosting/manage/reverseproxy/reverse_proxy) + - [ ] [Reverse Proxies](/docs/self-hosting/manage/reverseproxy/reverse_proxy) - [ ] Web Application Firewall #### Networking @@ -41,7 +41,7 @@ To apply best practices to your production setup we created a step by step check - [ ] Add [Custom Branding](/docs/guides/manage/customize/branding) if required - [ ] Configure a valid [SMS Service](/docs/guides/manage/console/default-settings#sms) such as Twilio if needed - [ ] Configure your privacy policy, terms of service and a help Link if needed -- [ ] Keep your [masterkey](https://zitadel.com/docs/self-hosting/manage/configure) in a secure storage +- [ ] Keep your [masterkey](/docs/self-hosting/manage/configure) in a secure storage - [ ] Declare and apply zitadel configuration using the zitadel terraform [provider](https://github.com/zitadel/terraform-provider-zitadel) ### Security diff --git a/docs/docs/support/advisory/a10003.md b/docs/docs/support/advisory/a10003.md index 9be32fbd78..b132e5738d 100644 --- a/docs/docs/support/advisory/a10003.md +++ b/docs/docs/support/advisory/a10003.md @@ -21,7 +21,7 @@ If users are redirected to the Login-UI without any organizational context, they :::note If the registration (and also authentication) needs to occur on a specified organization, apps can already -specify this by providing [an organization scope](https://zitadel.com/docs/apis/openidoauth/scopes#reserved-scopes). +specify this by providing [an organization scope](/docs/apis/openidoauth/scopes#reserved-scopes). ::: ## Statement @@ -37,7 +37,7 @@ There's no action needed on your side currently as existing instances are not af Once this update has been released and deployed, newly created instances will always use the default organization and its settings as default context for the login. -Already existing instances will still use the instance settings by default and can switch to the new default by ["Activating the 'LoginDefaultOrg' feature"](https://zitadel.com/docs/apis/resources/admin/admin-service-activate-feature-login-default-org) through the Admin API. +Already existing instances will still use the instance settings by default and can switch to the new default by ["Activating the 'LoginDefaultOrg' feature"](/docs/apis/resources/admin/admin-service-activate-feature-login-default-org) through the Admin API. **This change is irreversible!** :::note diff --git a/docs/docs/support/software-release-cycles-support.md b/docs/docs/support/software-release-cycles-support.md index 6524c489fd..162b428269 100644 --- a/docs/docs/support/software-release-cycles-support.md +++ b/docs/docs/support/software-release-cycles-support.md @@ -74,7 +74,7 @@ During this phase, support is limited as we focus on testing and bug fixing. ### General available Generally available features are available to everyone and have the appropriate test coverage to be used for critical tasks. -The software will be backwards-compatible with previous versions, for exceptions we will publish a [technical advisory](https://zitadel.com/docs/support/technical_advisory). +The software will be backwards-compatible with previous versions, for exceptions we will publish a [technical advisory](/docs/support/technical_advisory). Features in General Availability are not marked explicitly. ## Release types diff --git a/docs/sidebars.js b/docs/sidebars.js index d0a0ab9cf0..d142431638 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -881,6 +881,18 @@ module.exports = { "self-hosting/manage/database/database", "self-hosting/manage/updating_scaling", "self-hosting/manage/usage_control", + { + type: "category", + label: "Command Line Interface", + collapsed: false, + link: { + type: "doc", + id: "self-hosting/manage/cli/overview" + }, + items: [ + "self-hosting/manage/cli/mirror" + ], + }, ], }, ], diff --git a/internal/admin/repository/eventsourcing/handler/handler.go b/internal/admin/repository/eventsourcing/handler/handler.go index e4791b70d9..06720144e1 100644 --- a/internal/admin/repository/eventsourcing/handler/handler.go +++ b/internal/admin/repository/eventsourcing/handler/handler.go @@ -41,14 +41,24 @@ func Register(ctx context.Context, config Config, view *view.View, static static )) } +func Projections() []*handler2.Handler { + return projections +} + func Start(ctx context.Context) { for _, projection := range projections { projection.Start(ctx) } } -func Projections() []*handler2.Handler { - return projections +func ProjectInstance(ctx context.Context) error { + for _, projection := range projections { + _, err := projection.Trigger(ctx) + if err != nil { + return err + } + } + return nil } func (config Config) overwrite(viewModel string) handler2.Config { diff --git a/internal/auth/repository/eventsourcing/handler/handler.go b/internal/auth/repository/eventsourcing/handler/handler.go index 03e367829f..dedb1bce4e 100644 --- a/internal/auth/repository/eventsourcing/handler/handler.go +++ b/internal/auth/repository/eventsourcing/handler/handler.go @@ -63,6 +63,16 @@ func Projections() []*handler2.Handler { return projections } +func ProjectInstance(ctx context.Context) error { + for _, projection := range projections { + _, err := projection.Trigger(ctx) + if err != nil { + return err + } + } + return nil +} + func (config Config) overwrite(viewModel string) handler2.Config { c := handler2.Config{ Client: config.Client, diff --git a/internal/crypto/rsa.go b/internal/crypto/rsa.go index 38d8e6a1bd..198610d8aa 100644 --- a/internal/crypto/rsa.go +++ b/internal/crypto/rsa.go @@ -171,7 +171,7 @@ func BytesToPrivateKey(priv []byte) (*rsa.PrivateKey, error) { var ErrEmpty = errors.New("cannot decode, empty data") func BytesToPublicKey(pub []byte) (*rsa.PublicKey, error) { - if pub == nil { + if len(pub) == 0 { return nil, ErrEmpty } block, _ := pem.Decode(pub) diff --git a/internal/database/cockroach/crdb.go b/internal/database/cockroach/crdb.go index 48649fac9e..3d72e3904c 100644 --- a/internal/database/cockroach/crdb.go +++ b/internal/database/cockroach/crdb.go @@ -14,7 +14,7 @@ import ( ) func init() { - config := &Config{} + config := new(Config) dialect.Register(config, config, true) } @@ -49,11 +49,12 @@ func (c *Config) MatchName(name string) bool { return false } -func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) { +func (_ *Config) Decode(configs []interface{}) (dialect.Connector, error) { + connector := new(Config) decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, - Result: c, + Result: connector, }) if err != nil { return nil, err @@ -65,7 +66,7 @@ func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) { } } - return c, nil + return connector, nil } func (c *Config) Connect(useAdmin bool, pusherRatio, spoolerRatio float64, purpose dialect.DBPurpose) (*sql.DB, error) { diff --git a/internal/database/database.go b/internal/database/database.go index 77baaa7bd2..f057b5c4e8 100644 --- a/internal/database/database.go +++ b/internal/database/database.go @@ -75,7 +75,7 @@ func (db *DB) QueryRow(scan func(*sql.Row) error, query string, args ...any) (er func (db *DB) QueryRowContext(ctx context.Context, scan func(row *sql.Row) error, query string, args ...any) (err error) { ctx, spanBeginTx := tracing.NewNamedSpan(ctx, "db.BeginTx") - tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) + tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true, Isolation: sql.LevelReadCommitted}) spanBeginTx.EndWithError(err) if err != nil { return err diff --git a/internal/database/postgres/pg.go b/internal/database/postgres/pg.go index de7942f725..8f70da0703 100644 --- a/internal/database/postgres/pg.go +++ b/internal/database/postgres/pg.go @@ -14,7 +14,7 @@ import ( ) func init() { - config := &Config{} + config := new(Config) dialect.Register(config, config, false) } @@ -50,11 +50,12 @@ func (c *Config) MatchName(name string) bool { return false } -func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) { +func (_ *Config) Decode(configs []interface{}) (dialect.Connector, error) { + connector := new(Config) decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, - Result: c, + Result: connector, }) if err != nil { return nil, err @@ -66,7 +67,7 @@ func (c *Config) Decode(configs []interface{}) (dialect.Connector, error) { } } - return c, nil + return connector, nil } func (c *Config) Connect(useAdmin bool, pusherRatio, spoolerRatio float64, purpose dialect.DBPurpose) (*sql.DB, error) { diff --git a/internal/eventstore/handler/v2/handler.go b/internal/eventstore/handler/v2/handler.go index 02afc58ca2..0e0f8ae4d1 100644 --- a/internal/eventstore/handler/v2/handler.go +++ b/internal/eventstore/handler/v2/handler.go @@ -259,9 +259,6 @@ func (h *Handler) triggerInstances(ctx context.Context, instances []string, trig for ; err != nil; _, err = h.Trigger(instanceCtx, triggerOpts...) { time.Sleep(h.retryFailedAfter) h.log().WithField("instance", instance).OnError(err).Debug("trigger failed") - if err == nil { - break - } } } } diff --git a/internal/notification/projections.go b/internal/notification/projections.go index cdb2a84b26..46434536c2 100644 --- a/internal/notification/projections.go +++ b/internal/notification/projections.go @@ -44,6 +44,16 @@ func Start(ctx context.Context) { } } +func ProjectInstance(ctx context.Context) error { + for _, projection := range projections { + _, err := projection.Trigger(ctx) + if err != nil { + return err + } + } + return nil +} + func Projections() []*handler.Handler { return projections } diff --git a/internal/query/projection/projection.go b/internal/query/projection/projection.go index de7b6135bd..30c1df6870 100644 --- a/internal/query/projection/projection.go +++ b/internal/query/projection/projection.go @@ -181,6 +181,16 @@ func Start(ctx context.Context) { } } +func ProjectInstance(ctx context.Context) error { + for _, projection := range projections { + _, err := projection.Trigger(ctx) + if err != nil { + return err + } + } + return nil +} + func ApplyCustomConfig(customConfig CustomConfig) handler.Config { return applyCustomConfig(projectionConfig, customConfig) } diff --git a/internal/query/query.go b/internal/query/query.go index 8b8313625e..c2fbcb00a3 100644 --- a/internal/query/query.go +++ b/internal/query/query.go @@ -20,7 +20,6 @@ import ( "github.com/zitadel/zitadel/internal/query/projection" "github.com/zitadel/zitadel/internal/telemetry/tracing" es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore" - "github.com/zitadel/zitadel/internal/v2/eventstore/postgres" ) type Queries struct { @@ -46,6 +45,7 @@ type Queries struct { func StartQueries( ctx context.Context, es *eventstore.Eventstore, + esV4 es_v4.Querier, querySqlClient, projectionSqlClient *database.DB, projections projection.Config, defaults sd.SystemDefaults, @@ -59,7 +59,7 @@ func StartQueries( ) (repo *Queries, err error) { repo = &Queries{ eventstore: es, - eventStoreV4: postgres.New(querySqlClient), + eventStoreV4: esV4, client: querySqlClient, DefaultLanguage: language.Und, LoginTranslationFileContents: make(map[string][]byte), diff --git a/internal/v2/eventstore/postgres/event.go b/internal/v2/eventstore/postgres/event.go index 18d86c5751..f531c47e9f 100644 --- a/internal/v2/eventstore/postgres/event.go +++ b/internal/v2/eventstore/postgres/event.go @@ -29,14 +29,14 @@ func intentToCommands(intent *intent) (commands []*command, err error) { } func marshalPayload(payload any) ([]byte, error) { - if reflect.ValueOf(payload).IsZero() { + if payload == nil || reflect.ValueOf(payload).IsZero() { return nil, nil } return json.Marshal(payload) } type command struct { - eventstore.Command + *eventstore.Command intent *intent diff --git a/internal/v2/eventstore/postgres/push.go b/internal/v2/eventstore/postgres/push.go index 269d22cdef..0f4c29316c 100644 --- a/internal/v2/eventstore/postgres/push.go +++ b/internal/v2/eventstore/postgres/push.go @@ -3,7 +3,9 @@ package postgres import ( "context" "database/sql" + "fmt" + "github.com/cockroachdb/cockroach-go/v2/crdb" "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/telemetry/tracing" @@ -28,40 +30,54 @@ func (s *Storage) Push(ctx context.Context, intent *eventstore.PushIntent) (err }() } - // allows smaller wait times on query side for instances which are not actively writing - if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil { - return err - } + var retryCount uint32 + return crdb.Execute(func() (err error) { + defer func() { + if err == nil { + return + } + if retryCount < s.config.MaxRetries { + retryCount++ + return + } + logging.WithFields("retry_count", retryCount).WithError(err).Debug("max retry count reached") + err = zerrors.ThrowInternal(err, "POSTG-VJfJz", "Errors.Internal") + }() + // allows smaller wait times on query side for instances which are not actively writing + if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil { + return err + } - intents, err := lockAggregates(ctx, tx, intent) - if err != nil { - return err - } - - if !checkSequences(intents) { - return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched") - } - - commands := make([]*command, 0, len(intents)) - for _, intent := range intents { - additionalCommands, err := intentToCommands(intent) + intents, err := lockAggregates(ctx, tx, intent) if err != nil { return err } - commands = append(commands, additionalCommands...) - } - err = uniqueConstraints(ctx, tx, commands) - if err != nil { - return err - } + if !checkSequences(intents) { + return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched") + } - return push(ctx, tx, intent, commands) + commands := make([]*command, 0, len(intents)) + for _, intent := range intents { + additionalCommands, err := intentToCommands(intent) + if err != nil { + return err + } + commands = append(commands, additionalCommands...) + } + + err = uniqueConstraints(ctx, tx, commands) + if err != nil { + return err + } + + return push(ctx, tx, intent, commands) + }) } // setAppName for the the current transaction func setAppName(ctx context.Context, tx *sql.Tx, name string) error { - _, err := tx.ExecContext(ctx, "SET LOCAL application_name TO $1", name) + _, err := tx.ExecContext(ctx, fmt.Sprintf("SET LOCAL application_name TO '%s'", name)) if err != nil { logging.WithFields("name", name).WithError(err).Debug("setting app name failed") return zerrors.ThrowInternal(err, "POSTG-G3OmZ", "Errors.Internal") @@ -154,7 +170,8 @@ func push(ctx context.Context, tx *sql.Tx, reducer eventstore.Reducer, commands cmd.sequence, cmd.position.InPositionOrder, ) - stmt.WriteString(", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp())") + + stmt.WriteString(pushPositionStmt) stmt.WriteString(`)`) } stmt.WriteString(` RETURNING created_at, "position"`) diff --git a/internal/v2/eventstore/postgres/push_test.go b/internal/v2/eventstore/postgres/push_test.go index b81b3a1517..641b36680d 100644 --- a/internal/v2/eventstore/postgres/push_test.go +++ b/internal/v2/eventstore/postgres/push_test.go @@ -36,7 +36,9 @@ func Test_uniqueConstraints(t *testing.T) { name: "command without constraints", args: args{ commands: []*command{ - {}, + { + Command: &eventstore.Command{}, + }, }, expectations: []mock.Expectation{}, }, @@ -53,7 +55,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id", "error"), }, @@ -81,7 +83,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddGlobalUniqueConstraint("test", "id", "error"), }, @@ -109,7 +111,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id", "error"), eventstore.NewAddEventUniqueConstraint("test", "id2", "error"), @@ -143,7 +145,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id", "error"), }, @@ -156,7 +158,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id2", "error"), }, @@ -189,7 +191,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveInstanceUniqueConstraints(), }, @@ -217,7 +219,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveInstanceUniqueConstraints(), }, @@ -230,7 +232,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveInstanceUniqueConstraints(), }, @@ -263,7 +265,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveUniqueConstraint("test", "id"), }, @@ -291,7 +293,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveGlobalUniqueConstraint("test", "id"), }, @@ -319,7 +321,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveUniqueConstraint("test", "id"), eventstore.NewRemoveUniqueConstraint("test", "id2"), @@ -353,7 +355,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveUniqueConstraint("test", "id"), }, @@ -366,7 +368,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewRemoveUniqueConstraint("test", "id2"), }, @@ -399,7 +401,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id", ""), }, @@ -433,7 +435,7 @@ func Test_uniqueConstraints(t *testing.T) { eventstore.AppendAggregate("", "", ""), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ UniqueConstraints: []*eventstore.UniqueConstraint{ eventstore.NewAddEventUniqueConstraint("test", "id", "My.Error"), }, @@ -786,7 +788,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -841,7 +843,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -857,7 +859,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -926,7 +928,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -942,7 +944,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "type2", "id2"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1011,7 +1013,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1067,7 +1069,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1123,7 +1125,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1139,7 +1141,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1214,7 +1216,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1230,7 +1232,7 @@ func Test_push(t *testing.T) { eventstore.AppendAggregate("owner", "testType", "testID"), ).Aggregates()[0], }, - Command: eventstore.Command{ + Command: &eventstore.Command{ Action: eventstore.Action[any]{ Creator: "gigi", Revision: 1, @@ -1286,6 +1288,7 @@ func Test_push(t *testing.T) { }, }, } + initPushStmt("postgres") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dbMock := mock.NewSQLMock(t, append([]mock.Expectation{mock.ExpectBegin(nil)}, tt.args.expectations...)...) diff --git a/internal/v2/eventstore/postgres/query.go b/internal/v2/eventstore/postgres/query.go index ca7a081c75..3545bfb2b6 100644 --- a/internal/v2/eventstore/postgres/query.go +++ b/internal/v2/eventstore/postgres/query.go @@ -194,6 +194,7 @@ func writeAggregateFilters(stmt *database.Statement, filters []*eventstore.Aggre func writeAggregateFilter(stmt *database.Statement, filter *eventstore.AggregateFilter) { conditions := definedConditions([]*condition{ + {column: "owner", condition: filter.Owners()}, {column: "aggregate_type", condition: filter.Type()}, {column: "aggregate_id", condition: filter.IDs()}, }) diff --git a/internal/v2/eventstore/postgres/storage.go b/internal/v2/eventstore/postgres/storage.go index d2bf2a1195..c983cf83f7 100644 --- a/internal/v2/eventstore/postgres/storage.go +++ b/internal/v2/eventstore/postgres/storage.go @@ -3,6 +3,8 @@ package postgres import ( "context" + "github.com/zitadel/logging" + "github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/v2/eventstore" ) @@ -10,15 +12,35 @@ import ( var ( _ eventstore.Pusher = (*Storage)(nil) _ eventstore.Querier = (*Storage)(nil) + + pushPositionStmt string ) type Storage struct { client *database.DB + config *Config } -func New(client *database.DB) *Storage { +type Config struct { + MaxRetries uint32 +} + +func New(client *database.DB, config *Config) *Storage { + initPushStmt(client.Type()) return &Storage{ client: client, + config: config, + } +} + +func initPushStmt(typ string) { + switch typ { + case "cockroach": + pushPositionStmt = ", hlc_to_timestamp(cluster_logical_timestamp()), cluster_logical_timestamp()" + case "postgres": + pushPositionStmt = ", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp())" + default: + logging.WithFields("database_type", typ).Panic("position statement for type not implemented") } } diff --git a/internal/v2/eventstore/push.go b/internal/v2/eventstore/push.go index b426078b8e..3864c0d255 100644 --- a/internal/v2/eventstore/push.go +++ b/internal/v2/eventstore/push.go @@ -87,7 +87,7 @@ type PushAggregate struct { // owner of the aggregate owner string // Commands is an ordered list of changes on the aggregate - commands []Command + commands []*Command // CurrentSequence checks the current state of the aggregate. // The following types match the current sequence of the aggregate as described: // * nil or [SequenceIgnore]: Not relevant to add the commands @@ -122,7 +122,7 @@ func (pa *PushAggregate) Owner() string { return pa.owner } -func (pa *PushAggregate) Commands() []Command { +func (pa *PushAggregate) Commands() []*Command { return pa.commands } @@ -165,7 +165,7 @@ func CurrentSequenceAtLeast(sequence uint32) PushAggregateOpt { } } -func AppendCommands(commands ...Command) PushAggregateOpt { +func AppendCommands(commands ...*Command) PushAggregateOpt { return func(pa *PushAggregate) { pa.commands = append(pa.commands, commands...) } diff --git a/internal/v2/eventstore/query.go b/internal/v2/eventstore/query.go index eddb1aedde..c9b3cecd37 100644 --- a/internal/v2/eventstore/query.go +++ b/internal/v2/eventstore/query.go @@ -255,6 +255,7 @@ func NewAggregateFilter(typ string, opts ...AggregateFilterOpt) *AggregateFilter type AggregateFilter struct { typ string ids []string + owners *filter[[]string] events []*EventFilter } @@ -273,6 +274,13 @@ func (f *AggregateFilter) IDs() database.Condition { return database.NewListContains(f.ids...) } +func (f *AggregateFilter) Owners() database.Condition { + if f.owners == nil { + return nil + } + return f.owners.condition +} + func (f *AggregateFilter) Events() []*EventFilter { return f.events } @@ -298,6 +306,61 @@ func AggregateIDs(ids ...string) AggregateFilterOpt { } } +func AggregateOwnersEqual(owners ...string) AggregateFilterOpt { + return func(f *AggregateFilter) { + var cond database.Condition + switch len(owners) { + case 0: + return + case 1: + cond = database.NewTextEqual(owners[0]) + default: + cond = database.NewListEquals(owners...) + } + f.owners = &filter[[]string]{ + condition: cond, + value: &owners, + } + } +} + +func AggregateOwnersContains(owners ...string) AggregateFilterOpt { + return func(f *AggregateFilter) { + var cond database.Condition + switch len(owners) { + case 0: + return + case 1: + cond = database.NewTextEqual(owners[0]) + default: + cond = database.NewListContains(owners...) + } + + f.owners = &filter[[]string]{ + condition: cond, + value: &owners, + } + } +} + +func AggregateOwnersNotContains(owners ...string) AggregateFilterOpt { + return func(f *AggregateFilter) { + var cond database.Condition + switch len(owners) { + case 0: + return + case 1: + cond = database.NewTextUnequal(owners[0]) + default: + cond = database.NewListNotContains(owners...) + } + f.owners = &filter[[]string]{ + condition: cond, + value: &owners, + } + } +} + func AppendEvent(opts ...EventFilterOpt) AggregateFilterOpt { return AppendEvents(NewEventFilter(opts...)) } diff --git a/internal/v2/projection/highest_position.go b/internal/v2/projection/highest_position.go new file mode 100644 index 0000000000..180d477809 --- /dev/null +++ b/internal/v2/projection/highest_position.go @@ -0,0 +1,15 @@ +package projection + +import ( + "github.com/zitadel/zitadel/internal/v2/eventstore" +) + +type HighestPosition eventstore.GlobalPosition + +var _ eventstore.Reducer = (*HighestPosition)(nil) + +// Reduce implements eventstore.Reducer. +func (h *HighestPosition) Reduce(events ...*eventstore.StorageEvent) error { + *h = HighestPosition(events[len(events)-1].Position) + return nil +} diff --git a/internal/v2/readmodel/last_successful_mirror.go b/internal/v2/readmodel/last_successful_mirror.go new file mode 100644 index 0000000000..80b436b896 --- /dev/null +++ b/internal/v2/readmodel/last_successful_mirror.go @@ -0,0 +1,72 @@ +package readmodel + +import ( + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/v2/system" + "github.com/zitadel/zitadel/internal/v2/system/mirror" +) + +type LastSuccessfulMirror struct { + ID string + Position float64 + source string +} + +func NewLastSuccessfulMirror(source string) *LastSuccessfulMirror { + return &LastSuccessfulMirror{ + source: source, + } +} + +var _ eventstore.Reducer = (*LastSuccessfulMirror)(nil) + +func (p *LastSuccessfulMirror) Filter() *eventstore.Filter { + return eventstore.NewFilter( + eventstore.AppendAggregateFilter( + system.AggregateType, + eventstore.AggregateOwnersEqual(system.AggregateOwner), + eventstore.AppendEvent( + eventstore.SetEventTypes( + mirror.SucceededType, + ), + eventstore.EventCreatorsEqual(mirror.Creator), + ), + ), + eventstore.FilterPagination( + eventstore.Descending(), + ), + ) +} + +// Reduce implements eventstore.Reducer. +func (h *LastSuccessfulMirror) Reduce(events ...*eventstore.StorageEvent) (err error) { + for _, event := range events { + if event.Type == mirror.SucceededType { + err = h.reduceSucceeded(event) + } + if err != nil { + return err + } + } + return nil +} + +func (h *LastSuccessfulMirror) reduceSucceeded(event *eventstore.StorageEvent) error { + // if position is set we skip all older events + if h.Position > 0 { + return nil + + } + succeededEvent, err := mirror.SucceededEventFromStorage(event) + if err != nil { + return err + } + + if h.source != succeededEvent.Payload.Source { + return nil + } + + h.Position = succeededEvent.Payload.Position + + return nil +} diff --git a/internal/v2/system/aggregate.go b/internal/v2/system/aggregate.go new file mode 100644 index 0000000000..f5fb2ea13b --- /dev/null +++ b/internal/v2/system/aggregate.go @@ -0,0 +1,8 @@ +package system + +const ( + AggregateType = "system" + AggregateOwner = "SYSTEM" + AggregateInstance = "" + EventTypePrefix = AggregateType + "." +) diff --git a/internal/v2/system/mirror/aggregate.go b/internal/v2/system/mirror/aggregate.go new file mode 100644 index 0000000000..2e51b84515 --- /dev/null +++ b/internal/v2/system/mirror/aggregate.go @@ -0,0 +1,8 @@ +package mirror + +import "github.com/zitadel/zitadel/internal/v2/system" + +const ( + Creator = "MIRROR" + eventTypePrefix = system.EventTypePrefix + "mirror." +) diff --git a/internal/v2/system/mirror/failed.go b/internal/v2/system/mirror/failed.go new file mode 100644 index 0000000000..141a45c509 --- /dev/null +++ b/internal/v2/system/mirror/failed.go @@ -0,0 +1,52 @@ +package mirror + +import ( + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/zerrors" +) + +type failedPayload struct { + Cause string `json:"cause"` + // Source is the name of the database data are mirrored to + Source string `json:"source"` +} + +const FailedType = eventTypePrefix + "failed" + +type FailedEvent eventstore.Event[failedPayload] + +var _ eventstore.TypeChecker = (*FailedEvent)(nil) + +func (e *FailedEvent) ActionType() string { + return FailedType +} + +func FailedEventFromStorage(event *eventstore.StorageEvent) (e *FailedEvent, _ error) { + if event.Type != e.ActionType() { + return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-bwB9l", "Errors.Invalid.Event.Type") + } + + payload, err := eventstore.UnmarshalPayload[failedPayload](event.Payload) + if err != nil { + return nil, err + } + + return &FailedEvent{ + StorageEvent: event, + Payload: payload, + }, nil +} + +func NewFailedCommand(source string, cause error) *eventstore.Command { + return &eventstore.Command{ + Action: eventstore.Action[any]{ + Creator: Creator, + Type: FailedType, + Payload: failedPayload{ + Cause: cause.Error(), + Source: source, + }, + Revision: 1, + }, + } +} diff --git a/internal/v2/system/mirror/started.go b/internal/v2/system/mirror/started.go new file mode 100644 index 0000000000..1b18d0a548 --- /dev/null +++ b/internal/v2/system/mirror/started.go @@ -0,0 +1,68 @@ +package mirror + +import ( + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/zerrors" +) + +type startedPayload struct { + // Destination is the name of the database data are mirrored to + Destination string `json:"destination"` + // Either Instances or System needs to be set + Instances []string `json:"instances,omitempty"` + System bool `json:"system,omitempty"` +} + +const StartedType = eventTypePrefix + "started" + +type StartedEvent eventstore.Event[startedPayload] + +var _ eventstore.TypeChecker = (*StartedEvent)(nil) + +func (e *StartedEvent) ActionType() string { + return StartedType +} + +func StartedEventFromStorage(event *eventstore.StorageEvent) (e *StartedEvent, _ error) { + if event.Type != e.ActionType() { + return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-bwB9l", "Errors.Invalid.Event.Type") + } + + payload, err := eventstore.UnmarshalPayload[startedPayload](event.Payload) + if err != nil { + return nil, err + } + + return &StartedEvent{ + StorageEvent: event, + Payload: payload, + }, nil +} + +func NewStartedSystemCommand(destination string) *eventstore.Command { + return newStartedCommand(&startedPayload{ + Destination: destination, + System: true, + }) +} + +func NewStartedInstancesCommand(destination string, instances []string) (*eventstore.Command, error) { + if len(instances) == 0 { + return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-8YkrE", "Errors.Mirror.NoInstances") + } + return newStartedCommand(&startedPayload{ + Destination: destination, + Instances: instances, + }), nil +} + +func newStartedCommand(payload *startedPayload) *eventstore.Command { + return &eventstore.Command{ + Action: eventstore.Action[any]{ + Creator: Creator, + Type: StartedType, + Revision: 1, + Payload: *payload, + }, + } +} diff --git a/internal/v2/system/mirror/succeeded.go b/internal/v2/system/mirror/succeeded.go new file mode 100644 index 0000000000..6d0fba2c25 --- /dev/null +++ b/internal/v2/system/mirror/succeeded.go @@ -0,0 +1,53 @@ +package mirror + +import ( + "github.com/zitadel/zitadel/internal/v2/eventstore" + "github.com/zitadel/zitadel/internal/zerrors" +) + +type succeededPayload struct { + // Source is the name of the database data are mirrored from + Source string `json:"source"` + // Position until data will be mirrored + Position float64 `json:"position"` +} + +const SucceededType = eventTypePrefix + "succeeded" + +type SucceededEvent eventstore.Event[succeededPayload] + +var _ eventstore.TypeChecker = (*SucceededEvent)(nil) + +func (e *SucceededEvent) ActionType() string { + return SucceededType +} + +func SucceededEventFromStorage(event *eventstore.StorageEvent) (e *SucceededEvent, _ error) { + if event.Type != e.ActionType() { + return nil, zerrors.ThrowInvalidArgument(nil, "MIRRO-xh5IW", "Errors.Invalid.Event.Type") + } + + payload, err := eventstore.UnmarshalPayload[succeededPayload](event.Payload) + if err != nil { + return nil, err + } + + return &SucceededEvent{ + StorageEvent: event, + Payload: payload, + }, nil +} + +func NewSucceededCommand(source string, position float64) *eventstore.Command { + return &eventstore.Command{ + Action: eventstore.Action[any]{ + Creator: Creator, + Type: SucceededType, + Revision: 1, + Payload: succeededPayload{ + Source: source, + Position: position, + }, + }, + } +}