From 7eb54e4c7beb0203cdbc82b0688252c9e98a6e16 Mon Sep 17 00:00:00 2001 From: Shubham Singh Sugara <37795429+shubhamsugara22@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:12:08 +0530 Subject: [PATCH 01/30] fix: Update Defaults.yaml (#8731) # Which Problems Are Solved The primary issue addressed in this PR is that the defaults.yaml file contains escaped characters (like `<` for < and `>` for >) in message texts, which prevents valid HTML rendering in certain parts of the Zitadel platform. These escaped characters are used in user-facing content (e.g., email templates or notifications), resulting in improperly displayed text, where the HTML elements like line breaks or bold text don't render correctly. # How the Problems Are Solved The solution involves replacing the escaped characters with their corresponding HTML tags in the defaults.yaml file, ensuring that the HTML renders correctly in the emails or user interfaces where these messages are displayed. This update ensures that: - The HTML in these message templates is rendered properly, improving the user experience. - The content looks professional and adheres to web standards for displaying HTML content. # Additional Changes N/A # Additional Context N/A - Closes #8531 Co-authored-by: Max Peintner --- cmd/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/defaults.yaml b/cmd/defaults.yaml index 81c312137c..90c2db1f01 100644 --- a/cmd/defaults.yaml +++ b/cmd/defaults.yaml @@ -854,7 +854,7 @@ DefaultInstance: PreHeader: User initialisieren Subject: User initialisieren Greeting: Hallo {{.DisplayName}}, - Text: Dieser Benutzer wurde soeben im Zitadel erstellt. Mit dem Benutzernamen <br><strong>{{.PreferredLoginName}}</strong><br> kannst du dich anmelden. Nutze den untenstehenden Button, um die Initialisierung abzuschliessen <br>(Code <strong>{{.Code}}</strong>).<br> Falls du dieses Mail nicht angefordert hast, kannst du es einfach ignorieren. + Text: Dieser Benutzer wurde soeben im Zitadel erstellt. Mit dem Benutzernamen
{{.PreferredLoginName}}
kannst du dich anmelden. Nutze den untenstehenden Button, um die Initialisierung abzuschliessen
(Code {{.Code}}).
Falls du dieses Mail nicht angefordert hast, kannst du es einfach ignorieren. ButtonText: Initialisierung abschliessen - MessageTextType: PasswordReset Language: de @@ -862,7 +862,7 @@ DefaultInstance: PreHeader: Passwort zurücksetzen Subject: Passwort zurücksetzen Greeting: Hallo {{.DisplayName}}, - Text: Wir haben eine Anfrage für das Zurücksetzen deines Passwortes bekommen. Du kannst den untenstehenden Button verwenden, um dein Passwort zurückzusetzen <br>(Code <strong>{{.Code}}</strong>).<br> Falls du dieses Mail nicht angefordert hast, kannst du es ignorieren. + Text: Wir haben eine Anfrage für das Zurücksetzen deines Passwortes bekommen. Du kannst den untenstehenden Button verwenden, um dein Passwort zurückzusetzen
(Code {{.Code}}).
Falls du dieses Mail nicht angefordert hast, kannst du es ignorieren. ButtonText: Passwort zurücksetzen - MessageTextType: VerifyEmail Language: de @@ -870,7 +870,7 @@ DefaultInstance: PreHeader: Email verifizieren Subject: Email verifizieren Greeting: Hallo {{.DisplayName}}, - Text: Eine neue E-Mail Adresse wurde hinzugefügt. Bitte verwende den untenstehenden Button um diese zu verifizieren <br>(Code <strong>{{.Code}}</strong>).<br> Falls du deine E-Mail Adresse nicht selber hinzugefügt hast, kannst du dieses E-Mail ignorieren. + Text: Eine neue E-Mail Adresse wurde hinzugefügt. Bitte verwende den untenstehenden Button um diese zu verifizieren
(Code {{.Code}}).
Falls du deine E-Mail Adresse nicht selber hinzugefügt hast, kannst du dieses E-Mail ignorieren. ButtonText: Email verifizieren - MessageTextType: VerifyPhone Language: de From 3c4a92a981243b4bb30f83a3ad43c1cda9d895cf Mon Sep 17 00:00:00 2001 From: Fabi Date: Tue, 15 Oct 2024 11:42:16 +0200 Subject: [PATCH 02/30] chore(github): add type to issue templates (#8775) # Which Problems Are Solved Github introduced the new issue types, which we want to add to our issues. Starting point is to change the templates, so we can add the right types. --- .github/ISSUE_TEMPLATE/docs.yaml | 1 + .../{proposal.yaml => enhancement.yaml} | 1 + .github/ISSUE_TEMPLATE/improvement.yaml | 54 ------------------- 3 files changed, 2 insertions(+), 54 deletions(-) rename .github/ISSUE_TEMPLATE/{proposal.yaml => enhancement.yaml} (98%) delete mode 100644 .github/ISSUE_TEMPLATE/improvement.yaml diff --git a/.github/ISSUE_TEMPLATE/docs.yaml b/.github/ISSUE_TEMPLATE/docs.yaml index 04c1c0cdb1..d3f82b9940 100644 --- a/.github/ISSUE_TEMPLATE/docs.yaml +++ b/.github/ISSUE_TEMPLATE/docs.yaml @@ -1,6 +1,7 @@ name: 📄 Documentation description: Create an issue for missing or wrong documentation. labels: ["docs"] +type: task body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/proposal.yaml b/.github/ISSUE_TEMPLATE/enhancement.yaml similarity index 98% rename from .github/ISSUE_TEMPLATE/proposal.yaml rename to .github/ISSUE_TEMPLATE/enhancement.yaml index 5abded3eac..5f565e604c 100644 --- a/.github/ISSUE_TEMPLATE/proposal.yaml +++ b/.github/ISSUE_TEMPLATE/enhancement.yaml @@ -1,6 +1,7 @@ name: 💡 Proposal / Feature request description: "Create an issue for a feature request/proposal." labels: ["enhancement"] +type: enhancement body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/improvement.yaml b/.github/ISSUE_TEMPLATE/improvement.yaml deleted file mode 100644 index 2f68cb6170..0000000000 --- a/.github/ISSUE_TEMPLATE/improvement.yaml +++ /dev/null @@ -1,54 +0,0 @@ -name: 🛠️ Improvement -description: "Create an new issue for an improvment in ZITADEL" -labels: ["improvement"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this improvement request - - type: checkboxes - id: preflight - attributes: - label: Preflight Checklist - options: - - label: - I could not find a solution in the existing issues, docs, nor discussions - required: true - - label: - I have joined the [ZITADEL chat](https://zitadel.com/chat) - - type: textarea - id: problem - attributes: - label: Describe your problem - description: Please describe your problem this improvement is supposed to solve. - placeholder: Describe the problem you have - validations: - required: true - - type: textarea - id: solution - attributes: - label: Describe your ideal solution - description: Which solution do you propose? - placeholder: As a [type of user], I want [some goal] so that [some reason]. - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Which version of the ZITADEL are you using. - - type: dropdown - id: environment - attributes: - label: Environment - description: How do you use ZITADEL? - options: - - ZITADEL Cloud - - Self-hosted - validations: - required: true - - type: textarea - id: additional - attributes: - label: Additional Context - description: Please add any other infos that could be useful. From 4ebc23aa1fffaa7502cc7be5cdf13ce600c4ef2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20M=C3=B6hlmann?= Date: Tue, 15 Oct 2024 15:26:16 +0300 Subject: [PATCH 03/30] fix(load-test): correct k6 command (#8760) --- load-test/Makefile | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/load-test/Makefile b/load-test/Makefile index 61494e27cb..672ba151fa 100644 --- a/load-test/Makefile +++ b/load-test/Makefile @@ -4,42 +4,43 @@ ZITADEL_HOST ?= ADMIN_LOGIN_NAME ?= ADMIN_PASSWORD ?= +K6 := ./../../xk6-modules/k6 + .PHONY: human_password_login human_password_login: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/human_password_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/human_password_login.js --vus ${VUS} --duration ${DURATION} .PHONY: machine_pat_login machine_pat_login: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_pat_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_pat_login.js --vus ${VUS} --duration ${DURATION} .PHONY: machine_client_credentials_login machine_client_credentials_login: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_client_credentials_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_client_credentials_login.js --vus ${VUS} --duration ${DURATION} .PHONY: user_info user_info: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/user_info.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/user_info.js --vus ${VUS} --duration ${DURATION} .PHONY: manipulate_user manipulate_user: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/manipulate_user.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/manipulate_user.js --vus ${VUS} --duration ${DURATION} .PHONY: introspect introspect: ensure_modules bundle go install go.k6.io/xk6/cmd/xk6@latest cd ../../xk6-modules && xk6 build --with xk6-zitadel=. - ./../../xk6-modules/k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/introspection.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/introspection.js --vus ${VUS} --duration ${DURATION} .PHONY: add_session add_session: bundle - k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/session.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/session.js --vus ${VUS} --duration ${DURATION} .PHONY: machine_jwt_profile_grant machine_jwt_profile_grant: ensure_modules ensure_key_pair bundle go install go.k6.io/xk6/cmd/xk6@latest cd ../../xk6-modules && xk6 build --with xk6-zitadel=. - ./../../xk6-modules/k6 run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant.js --iterations 1 - # --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant.js --vus ${VUS} --duration ${DURATION} .PHONY: machine_jwt_profile_grant_single_user machine_jwt_profile_grant_single_user: ensure_modules ensure_key_pair bundle @@ -64,6 +65,8 @@ endif bundle: npm i npm run bundle + go install go.k6.io/xk6/cmd/xk6@latest + cd ../../xk6-modules && xk6 build --with xk6-zitadel=. .PHONY: ensure_key_pair ensure_key_pair: @@ -75,4 +78,4 @@ ifeq (,$(wildcard $(PWD)/.keys/key.pem)) endif ifeq (,$(wildcard $(PWD)/.keys/key.pem.pub)) openssl rsa -in .keys/key.pem -outform PEM -pubout -out .keys/key.pem.pub -endif \ No newline at end of file +endif From c21e17151919cc8e843141e56069c901822a1e02 Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:09:32 +0200 Subject: [PATCH 04/30] fix: add allowRegister check for register handling in login (#8782) # Which Problems Are Solved There is currently the possibility that you can jump to the register path, even if register is disallowed through the settings. # How the Problems Are Solved Check before handling the HTTP requests if register is allowed. # Additional Changes Function to determine the resourceowner for all register related functionality in the login. # Additional Context closes #8123 --- .../api/ui/login/external_provider_handler.go | 29 ++--------- internal/api/ui/login/register_handler.go | 50 +++++++++++++------ 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/internal/api/ui/login/external_provider_handler.go b/internal/api/ui/login/external_provider_handler.go index 1f835c855f..15046d25e8 100644 --- a/internal/api/ui/login/external_provider_handler.go +++ b/internal/api/ui/login/external_provider_handler.go @@ -525,12 +525,7 @@ func (l *Login) autoLinkUser(w http.ResponseWriter, r *http.Request, authReq *do // - creation by user // - linking to existing user func (l *Login) externalUserNotExisting(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, provider *query.IDPTemplate, externalUser *domain.ExternalUser, changed bool) { - resourceOwner := authz.GetInstance(r.Context()).DefaultOrganisationID() - - if authReq.RequestedOrgID != "" && authReq.RequestedOrgID != resourceOwner { - resourceOwner = authReq.RequestedOrgID - } - + resourceOwner := determineResourceOwner(r.Context(), authReq) orgIAMPolicy, err := l.getOrgDomainPolicy(r, resourceOwner) if err != nil { l.renderExternalNotFoundOption(w, r, authReq, nil, nil, nil, err) @@ -587,35 +582,21 @@ func (l *Login) renderExternalNotFoundOption(w http.ResponseWriter, r *http.Requ if err != nil { errID, errMessage = l.getErrorMessage(r, err) } + resourceOwner := determineResourceOwner(r.Context(), authReq) if orgIAMPolicy == nil { - resourceOwner := authz.GetInstance(r.Context()).DefaultOrganisationID() - - if authReq.RequestedOrgID != "" && authReq.RequestedOrgID != resourceOwner { - resourceOwner = authReq.RequestedOrgID - } - orgIAMPolicy, err = l.getOrgDomainPolicy(r, resourceOwner) if err != nil { l.renderError(w, r, authReq, err) return } - } if human == nil || idpLink == nil { - // TODO (LS): how do we get multiple and why do we use the last of them (taken as is)? linkingUser := authReq.LinkingUsers[len(authReq.LinkingUsers)-1] human, idpLink, _ = mapExternalUserToLoginUser(linkingUser, orgIAMPolicy.UserLoginMustBeDomain) } - var resourceOwner string - if authReq != nil { - resourceOwner = authReq.RequestedOrgID - } - if resourceOwner == "" { - resourceOwner = authz.GetInstance(r.Context()).DefaultOrganisationID() - } labelPolicy, err := l.getLabelPolicy(r, resourceOwner) if err != nil { l.renderError(w, r, authReq, err) @@ -718,11 +699,7 @@ func (l *Login) handleExternalNotFoundOptionCheck(w http.ResponseWriter, r *http // // it is called from either the [autoCreateExternalUser] or [handleExternalNotFoundOptionCheck] func (l *Login) registerExternalUser(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, externalUser *domain.ExternalUser) { - resourceOwner := authz.GetInstance(r.Context()).DefaultOrganisationID() - - if authReq.RequestedOrgID != "" && authReq.RequestedOrgID != resourceOwner { - resourceOwner = authReq.RequestedOrgID - } + resourceOwner := determineResourceOwner(r.Context(), authReq) orgIamPolicy, err := l.getOrgDomainPolicy(r, resourceOwner) if err != nil { diff --git a/internal/api/ui/login/register_handler.go b/internal/api/ui/login/register_handler.go index d487903a85..89e0eec7b3 100644 --- a/internal/api/ui/login/register_handler.go +++ b/internal/api/ui/login/register_handler.go @@ -1,6 +1,7 @@ package login import ( + "context" "net/http" "golang.org/x/text/language" @@ -40,6 +41,13 @@ type registerData struct { OrgRegister bool } +func determineResourceOwner(ctx context.Context, authRequest *domain.AuthRequest) string { + if authRequest != nil && authRequest.RequestedOrgID != "" { + return authRequest.RequestedOrgID + } + return authz.GetInstance(ctx).DefaultOrganisationID() +} + func (l *Login) handleRegister(w http.ResponseWriter, r *http.Request) { data := new(registerFormData) authRequest, err := l.getAuthRequestAndParseData(r, data) @@ -47,9 +55,30 @@ func (l *Login) handleRegister(w http.ResponseWriter, r *http.Request) { l.renderError(w, r, authRequest, err) return } + if err := l.checkRegistrationAllowed(r, determineResourceOwner(r.Context(), authRequest), authRequest); err != nil { + l.renderError(w, r, authRequest, err) + return + } l.renderRegister(w, r, authRequest, data, nil) } +func (l *Login) checkRegistrationAllowed(r *http.Request, orgID string, authReq *domain.AuthRequest) error { + if authReq != nil { + if registrationAllowed(authReq) { + return nil + } + return zerrors.ThrowPreconditionFailed(nil, "VIEW-RRGRXz4kGw", "Errors.Org.LoginPolicy.RegistrationNotAllowed") + } + loginPolicy, err := l.getLoginPolicy(r, orgID) + if err != nil { + return err + } + if loginPolicy.AllowRegister && loginPolicy.AllowUsernamePassword { + return nil + } + return zerrors.ThrowPreconditionFailed(nil, "VIEW-Vq3bduAacD", "Errors.Org.LoginPolicy.RegistrationNotAllowed") +} + func (l *Login) handleRegisterCheck(w http.ResponseWriter, r *http.Request) { data := new(registerFormData) authRequest, err := l.getAuthRequestAndParseData(r, data) @@ -57,17 +86,16 @@ func (l *Login) handleRegisterCheck(w http.ResponseWriter, r *http.Request) { l.renderError(w, r, authRequest, err) return } + resourceOwner := determineResourceOwner(r.Context(), authRequest) + if err := l.checkRegistrationAllowed(r, resourceOwner, authRequest); err != nil { + l.renderError(w, r, authRequest, err) + return + } if data.Password != data.Password2 { err := zerrors.ThrowInvalidArgument(nil, "VIEW-KaGue", "Errors.User.Password.ConfirmationWrong") l.renderRegister(w, r, authRequest, data, err) return } - - resourceOwner := authz.GetInstance(r.Context()).DefaultOrganisationID() - - if authRequest != nil && authRequest.RequestedOrgID != "" && authRequest.RequestedOrgID != resourceOwner { - resourceOwner = authRequest.RequestedOrgID - } // For consistency with the external authentication flow, // the setMetadata() function is provided on the pre creation hook, for now, // like for the ExternalAuthentication flow. @@ -126,15 +154,7 @@ func (l *Login) renderRegister(w http.ResponseWriter, r *http.Request, authReque formData.Language = l.renderer.ReqLang(translator, r).String() } - var resourceOwner string - if authRequest != nil { - resourceOwner = authRequest.RequestedOrgID - } - - if resourceOwner == "" { - resourceOwner = authz.GetInstance(r.Context()).DefaultOrganisationID() - } - + resourceOwner := determineResourceOwner(r.Context(), authRequest) data := registerData{ baseData: l.getBaseData(r, authRequest, translator, "RegistrationUser.Title", "RegistrationUser.Description", errID, errMessage), registerFormData: *formData, From cc8d4fe17cd75b351eb86241dfadcc63fb0a5268 Mon Sep 17 00:00:00 2001 From: Silvan Date: Thu, 17 Oct 2024 21:15:55 +0200 Subject: [PATCH 05/30] chore(load-test): add csv output (#8783) # Which Problems Are Solved Load tests currently do not output details about the data messured. # How the Problems Are Solved Added the `--out` flag to all load tests --- .gitignore | 1 + load-test/Makefile | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index c08cbae77f..17aee6bbe9 100644 --- a/.gitignore +++ b/.gitignore @@ -87,4 +87,5 @@ go.work.sum load-test/node_modules load-test/yarn-error.log load-test/dist +load-test/output/* .vercel diff --git a/load-test/Makefile b/load-test/Makefile index 672ba151fa..3fece26aa3 100644 --- a/load-test/Makefile +++ b/load-test/Makefile @@ -3,50 +3,51 @@ DURATION ?= "200s" ZITADEL_HOST ?= ADMIN_LOGIN_NAME ?= ADMIN_PASSWORD ?= +DATE := $(shell date '+%d-%H:%M:%S') K6 := ./../../xk6-modules/k6 .PHONY: human_password_login human_password_login: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/human_password_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/human_password_login.js --vus ${VUS} --duration ${DURATION} --out csv=output/human_password_login_${DATE}.csv .PHONY: machine_pat_login machine_pat_login: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_pat_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_pat_login.js --vus ${VUS} --duration ${DURATION} --out csv=output/machine_pat_login_${DATE}.csv .PHONY: machine_client_credentials_login machine_client_credentials_login: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_client_credentials_login.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_client_credentials_login.js --vus ${VUS} --duration ${DURATION} --out csv=output/machine_client_credentials_login_${DATE}.csv .PHONY: user_info user_info: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/user_info.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/user_info.js --vus ${VUS} --duration ${DURATION} --out csv=output/user_info_${DATE}.csv .PHONY: manipulate_user manipulate_user: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/manipulate_user.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/manipulate_user.js --vus ${VUS} --duration ${DURATION} --out csv=output/manipulate_user_${DATE}.csv .PHONY: introspect introspect: ensure_modules bundle go install go.k6.io/xk6/cmd/xk6@latest cd ../../xk6-modules && xk6 build --with xk6-zitadel=. - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/introspection.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/introspection.js --vus ${VUS} --duration ${DURATION} --out csv=output/introspect_${DATE}.csv .PHONY: add_session add_session: bundle - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/session.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/session.js --vus ${VUS} --duration ${DURATION} --out csv=output/add_session_${DATE}.csv .PHONY: machine_jwt_profile_grant machine_jwt_profile_grant: ensure_modules ensure_key_pair bundle go install go.k6.io/xk6/cmd/xk6@latest cd ../../xk6-modules && xk6 build --with xk6-zitadel=. - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant.js --vus ${VUS} --duration ${DURATION} --out csv=output/machine_jwt_profile_grant_${DATE}.csv .PHONY: machine_jwt_profile_grant_single_user machine_jwt_profile_grant_single_user: ensure_modules ensure_key_pair bundle go install go.k6.io/xk6/cmd/xk6@latest cd ../../xk6-modules && xk6 build --with xk6-zitadel=. - ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant_single_user.js --vus ${VUS} --duration ${DURATION} + ${K6} run --summary-trend-stats "min,avg,max,p(50),p(95),p(99)" dist/machine_jwt_profile_grant_single_user.js --vus ${VUS} --duration ${DURATION} --out csv=output/machine_jwt_profile_grant_single_user_${DATE}.csv .PHONY: lint lint: @@ -63,6 +64,7 @@ endif .PHONY: bundle bundle: + mkdir -p output npm i npm run bundle go install go.k6.io/xk6/cmd/xk6@latest From 8d973636428484193b5c9b5fe6c9ba8f70622662 Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Thu, 17 Oct 2024 23:20:57 +0200 Subject: [PATCH 06/30] chore: improve integration tests (#8727) Improve integration tests: - spliting the tests in TokenExchange to isolated instances and in parallel - corrected some test structure so that the check for Details is no done anymore if the test already failed - replace required-calls with assert-calls to not stop the testing - add gofakeit for application, project and usernames(emails) - add eventually checks for testing in actions v2, so the request only get called when the execution is defined - check for length of results in list/search endpoints to avoid index errors --- .../admin/integration_test/iam_member_test.go | 22 +- .../integration_test/iam_settings_test.go | 26 +- .../admin/integration_test/import_test.go | 3 +- .../admin/integration_test/server_test.go | 9 +- .../v2/integration_test/feature_test.go | 12 +- .../v2beta/integration_test/feature_test.go | 6 +- .../idp/v2/integration_test/query_test.go | 25 +- .../management/integration_test/org_test.go | 11 +- .../grpc/org/v2/integration_test/org_test.go | 12 +- .../org/v2/integration_test/query_test.go | 22 +- internal/api/grpc/org/v2/org_test.go | 5 +- .../org/v2beta/integration_test/org_test.go | 12 +- .../integration_test/execution_target_test.go | 46 ++- .../integration_test/execution_test.go | 1 - .../v3alpha/integration_test/query_test.go | 64 ++- .../v3alpha/integration_test/server_test.go | 9 +- .../v3alpha/integration_test/email_test.go | 12 +- .../v3alpha/integration_test/phone_test.go | 12 +- .../v3alpha/integration_test/server_test.go | 10 +- .../v3alpha/integration_test/user_test.go | 25 +- .../v3alpha/integration_test/query_test.go | 50 +-- .../v3alpha/integration_test/server_test.go | 11 +- .../webkey_integration_test.go | 8 +- .../v2/integration_test/session_test.go | 5 +- .../v2beta/integration_test/session_test.go | 5 +- .../v2/integration_test/settings_test.go | 7 +- .../v2beta/integration_test/settings_test.go | 7 +- .../system/integration_test/instance_test.go | 2 +- .../integration_test/limits_block_test.go | 4 +- .../user/v2/integration_test/email_test.go | 23 +- .../user/v2/integration_test/idp_link_test.go | 44 +-- .../user/v2/integration_test/passkey_test.go | 18 +- .../user/v2/integration_test/password_test.go | 5 +- .../user/v2/integration_test/phone_test.go | 26 +- .../user/v2/integration_test/query_test.go | 84 ++-- .../user/v2/integration_test/user_test.go | 86 +++-- .../v2beta/integration_test/email_test.go | 23 +- .../v2beta/integration_test/password_test.go | 7 +- .../v2beta/integration_test/phone_test.go | 28 +- .../v2beta/integration_test/query_test.go | 76 ++-- .../user/v2beta/integration_test/user_test.go | 74 ++-- internal/api/idp/integration_test/idp_test.go | 21 +- .../api/oidc/integration_test/oidc_test.go | 3 +- .../integration_test/token_exchange_test.go | 365 +++++++++--------- .../oidc/integration_test/userinfo_test.go | 10 +- internal/integration/context.go | 30 ++ .../integration_test/telemetry_pusher_test.go | 2 + 47 files changed, 719 insertions(+), 649 deletions(-) create mode 100644 internal/integration/context.go diff --git a/internal/api/grpc/admin/integration_test/iam_member_test.go b/internal/api/grpc/admin/integration_test/iam_member_test.go index c65ede3d26..1b6440923e 100644 --- a/internal/api/grpc/admin/integration_test/iam_member_test.go +++ b/internal/api/grpc/admin/integration_test/iam_member_test.go @@ -29,7 +29,7 @@ var iamRoles = []string{ func TestServer_ListIAMMemberRoles(t *testing.T) { got, err := Client.ListIAMMemberRoles(AdminCTX, &admin_pb.ListIAMMemberRolesRequest{}) - require.NoError(t, err) + assert.NoError(t, err) assert.ElementsMatch(t, iamRoles, got.GetRoles()) } @@ -92,23 +92,23 @@ func TestServer_ListIAMMembers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, 20*time.Second) assert.EventuallyWithT(t, func(ct *assert.CollectT) { got, err := Client.ListIAMMembers(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(ct, err) + require.Error(ct, err) return } require.NoError(ct, err) wantResult := tt.want.GetResult() gotResult := got.GetResult() - if assert.Len(ct, gotResult, len(wantResult)) { - for i, want := range wantResult { - assert.Equal(ct, want.GetUserId(), gotResult[i].GetUserId()) - assert.ElementsMatch(ct, want.GetRoles(), gotResult[i].GetRoles()) - } + require.Len(ct, gotResult, len(wantResult)) + for i, want := range wantResult { + assert.Equal(ct, want.GetUserId(), gotResult[i].GetUserId()) + assert.ElementsMatch(ct, want.GetRoles(), gotResult[i].GetRoles()) } - }, time.Minute, time.Second) + }, retryDuration, tick) }) } } @@ -178,7 +178,7 @@ func TestServer_AddIAMMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.AddIAMMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -259,7 +259,7 @@ func TestServer_UpdateIAMMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.UpdateIAMMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -316,7 +316,7 @@ func TestServer_RemoveIAMMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.RemoveIAMMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/admin/integration_test/iam_settings_test.go b/internal/api/grpc/admin/integration_test/iam_settings_test.go index 2787f94755..93da4aed8a 100644 --- a/internal/api/grpc/admin/integration_test/iam_settings_test.go +++ b/internal/api/grpc/admin/integration_test/iam_settings_test.go @@ -5,6 +5,7 @@ package admin_test import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -53,16 +54,19 @@ func TestServer_GetSecurityPolicy(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp, err := instance.Client.Admin.GetSecurityPolicy(tt.ctx, &admin_pb.GetSecurityPolicyRequest{}) - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - got, want := resp.GetPolicy(), tt.want.GetPolicy() - assert.Equal(t, want.GetEnableIframeEmbedding(), got.GetEnableIframeEmbedding(), "enable iframe embedding") - assert.Equal(t, want.GetAllowedOrigins(), got.GetAllowedOrigins(), "allowed origins") - assert.Equal(t, want.GetEnableImpersonation(), got.GetEnableImpersonation(), "enable impersonation") + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, 5*time.Second) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + resp, err := instance.Client.Admin.GetSecurityPolicy(tt.ctx, &admin_pb.GetSecurityPolicyRequest{}) + if tt.wantErr { + require.Error(ttt, err) + return + } + require.NoError(ttt, err) + got, want := resp.GetPolicy(), tt.want.GetPolicy() + assert.Equal(ttt, want.GetEnableIframeEmbedding(), got.GetEnableIframeEmbedding(), "enable iframe embedding") + assert.Equal(ttt, want.GetAllowedOrigins(), got.GetAllowedOrigins(), "allowed origins") + assert.Equal(ttt, want.GetEnableImpersonation(), got.GetEnableImpersonation(), "enable impersonation") + }, retryDuration, tick, "timeout waiting for expected target result") }) } } @@ -162,7 +166,7 @@ func TestServer_SetSecurityPolicy(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := instance.Client.Admin.SetSecurityPolicy(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/admin/integration_test/import_test.go b/internal/api/grpc/admin/integration_test/import_test.go index 1ee7d7d88e..7d323e5ab8 100644 --- a/internal/api/grpc/admin/integration_test/import_test.go +++ b/internal/api/grpc/admin/integration_test/import_test.go @@ -8,7 +8,6 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/google/uuid" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/zitadel/zitadel/internal/integration" @@ -474,7 +473,7 @@ func TestServer_ImportData(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.ImportData(AdminCTX, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/admin/integration_test/server_test.go b/internal/api/grpc/admin/integration_test/server_test.go index 5d751fb485..e29b4d5c78 100644 --- a/internal/api/grpc/admin/integration_test/server_test.go +++ b/internal/api/grpc/admin/integration_test/server_test.go @@ -35,19 +35,18 @@ func TestMain(m *testing.M) { } func await(t *testing.T, ctx context.Context, cb func(*assert.CollectT)) { - deadline, ok := ctx.Deadline() - require.True(t, ok, "context must have deadline") + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) require.EventuallyWithT( t, func(tt *assert.CollectT) { defer func() { // Panics are not recovered and don't mark the test as failed, so we need to do that ourselves - require.Nil(t, recover(), "panic in await callback") + assert.Nil(tt, recover(), "panic in await callback") }() cb(tt) }, - time.Until(deadline), - time.Second, + retryDuration, + tick, "awaiting successful callback failed", ) } diff --git a/internal/api/grpc/feature/v2/integration_test/feature_test.go b/internal/api/grpc/feature/v2/integration_test/feature_test.go index a97306fdae..2af4f642c4 100644 --- a/internal/api/grpc/feature/v2/integration_test/feature_test.go +++ b/internal/api/grpc/feature/v2/integration_test/feature_test.go @@ -96,7 +96,7 @@ func TestServer_SetSystemFeatures(t *testing.T) { }) got, err := Client.SetSystemFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -137,7 +137,7 @@ func TestServer_ResetSystemFeatures(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.ResetSystemFeatures(tt.ctx, &feature.ResetSystemFeaturesRequest{}) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -211,7 +211,7 @@ func TestServer_GetSystemFeatures(t *testing.T) { } got, err := Client.GetSystemFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -278,7 +278,7 @@ func TestServer_SetInstanceFeatures(t *testing.T) { }) got, err := Client.SetInstanceFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -319,7 +319,7 @@ func TestServer_ResetInstanceFeatures(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.ResetInstanceFeatures(tt.ctx, &feature.ResetInstanceFeaturesRequest{}) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -480,7 +480,7 @@ func TestServer_GetInstanceFeatures(t *testing.T) { } got, err := Client.GetInstanceFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/feature/v2beta/integration_test/feature_test.go b/internal/api/grpc/feature/v2beta/integration_test/feature_test.go index ecb8b5a993..69e05352d0 100644 --- a/internal/api/grpc/feature/v2beta/integration_test/feature_test.go +++ b/internal/api/grpc/feature/v2beta/integration_test/feature_test.go @@ -99,7 +99,7 @@ func TestServer_SetInstanceFeatures(t *testing.T) { }) got, err := Client.SetInstanceFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -140,7 +140,7 @@ func TestServer_ResetInstanceFeatures(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.ResetInstanceFeatures(tt.ctx, &feature.ResetInstanceFeaturesRequest{}) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -292,7 +292,7 @@ func TestServer_GetInstanceFeatures(t *testing.T) { } got, err := Client.GetInstanceFeatures(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/idp/v2/integration_test/query_test.go b/internal/api/grpc/idp/v2/integration_test/query_test.go index c1288f537e..7bfa286b5e 100644 --- a/internal/api/grpc/idp/v2/integration_test/query_test.go +++ b/internal/api/grpc/idp/v2/integration_test/query_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" @@ -67,7 +68,7 @@ func TestServer_GetIDPByID(t *testing.T) { IamCTX, &idp.GetIDPByIDRequest{}, func(ctx context.Context, request *idp.GetIDPByIDRequest) *idpAttr { - name := fmt.Sprintf("GetIDPByID%d", time.Now().UnixNano()) + name := fmt.Sprintf("GetIDPByID-%s", gofakeit.AppName()) resp := Instance.AddGenericOAuthProvider(ctx, name) request.Id = resp.Id return &idpAttr{ @@ -115,7 +116,7 @@ func TestServer_GetIDPByID(t *testing.T) { UserCTX, &idp.GetIDPByIDRequest{}, func(ctx context.Context, request *idp.GetIDPByIDRequest) *idpAttr { - name := fmt.Sprintf("GetIDPByID%d", time.Now().UnixNano()) + name := fmt.Sprintf("GetIDPByID-%s", gofakeit.AppName()) resp := Instance.AddGenericOAuthProvider(IamCTX, name) request.Id = resp.Id return &idpAttr{ @@ -136,7 +137,7 @@ func TestServer_GetIDPByID(t *testing.T) { CTX, &idp.GetIDPByIDRequest{}, func(ctx context.Context, request *idp.GetIDPByIDRequest) *idpAttr { - name := fmt.Sprintf("GetIDPByID%d", time.Now().UnixNano()) + name := fmt.Sprintf("GetIDPByID-%s", gofakeit.AppName()) resp := Instance.AddOrgGenericOAuthProvider(ctx, name) request.Id = resp.Id return &idpAttr{ @@ -184,7 +185,7 @@ func TestServer_GetIDPByID(t *testing.T) { UserCTX, &idp.GetIDPByIDRequest{}, func(ctx context.Context, request *idp.GetIDPByIDRequest) *idpAttr { - name := fmt.Sprintf("GetIDPByID%d", time.Now().UnixNano()) + name := fmt.Sprintf("GetIDPByID-%s", gofakeit.AppName()) resp := Instance.AddOrgGenericOAuthProvider(CTX, name) request.Id = resp.Id return &idpAttr{ @@ -203,20 +204,14 @@ func TestServer_GetIDPByID(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { idpAttr := tt.args.dep(tt.args.ctx, tt.args.req) - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(CTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, getErr := Client.GetIDPByID(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.GetIDPByID(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, getErr) - if getErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) // set provided info from creation tt.want.Idp.Details = idpAttr.Details @@ -229,7 +224,7 @@ func TestServer_GetIDPByID(t *testing.T) { tt.want.Idp.Details = got.Idp.Details // to check the rest of the content assert.Equal(ttt, tt.want.Idp, got.Idp) - }, retryDuration, time.Second) + }, retryDuration, tick) }) } } diff --git a/internal/api/grpc/management/integration_test/org_test.go b/internal/api/grpc/management/integration_test/org_test.go index 310fcf94b4..8288ceb9e9 100644 --- a/internal/api/grpc/management/integration_test/org_test.go +++ b/internal/api/grpc/management/integration_test/org_test.go @@ -97,10 +97,11 @@ func TestServer_ListOrgMembers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) assert.EventuallyWithT(t, func(ct *assert.CollectT) { got, err := Client.ListOrgMembers(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(ct, err) + require.Error(ct, err) return } require.NoError(ct, err) @@ -113,7 +114,7 @@ func TestServer_ListOrgMembers(t *testing.T) { assert.ElementsMatch(ct, want.GetRoles(), gotResult[i].GetRoles()) } } - }, time.Minute, time.Second) + }, retryDuration, tick) }) } } @@ -183,7 +184,7 @@ func TestServer_AddOrgMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.AddOrgMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -264,7 +265,7 @@ func TestServer_UpdateOrgMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.UpdateOrgMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) @@ -321,7 +322,7 @@ func TestServer_RemoveIAMMember(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.RemoveOrgMember(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/org/v2/integration_test/org_test.go b/internal/api/grpc/org/v2/integration_test/org_test.go index e84da7a811..165eb1471f 100644 --- a/internal/api/grpc/org/v2/integration_test/org_test.go +++ b/internal/api/grpc/org/v2/integration_test/org_test.go @@ -4,11 +4,11 @@ package org_test import ( "context" - "fmt" "os" "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -75,7 +75,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "invalid admin type", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ {}, }, @@ -86,7 +86,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "admin with init", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ { UserType: &org.AddOrganizationRequest_Admin_Human{ @@ -96,7 +96,7 @@ func TestServer_AddOrganization(t *testing.T) { FamilyName: "lastname", }, Email: &user.SetHumanEmail{ - Email: fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + Email: gofakeit.Email(), Verification: &user.SetHumanEmail_ReturnCode{ ReturnCode: &user.ReturnEmailVerificationCode{}, }, @@ -121,7 +121,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "existing user and new human with idp", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ { UserType: &org.AddOrganizationRequest_Admin_UserId{UserId: User.GetUserId()}, @@ -134,7 +134,7 @@ func TestServer_AddOrganization(t *testing.T) { FamilyName: "lastname", }, Email: &user.SetHumanEmail{ - Email: fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + Email: gofakeit.Email(), Verification: &user.SetHumanEmail_IsVerified{ IsVerified: true, }, diff --git a/internal/api/grpc/org/v2/integration_test/query_test.go b/internal/api/grpc/org/v2/integration_test/query_test.go index e075ba66ea..e52ea40018 100644 --- a/internal/api/grpc/org/v2/integration_test/query_test.go +++ b/internal/api/grpc/org/v2/integration_test/query_test.go @@ -83,10 +83,10 @@ func TestServer_ListOrganizations(t *testing.T) { func(ctx context.Context, request *org.ListOrganizationsRequest) ([]orgAttr, error) { count := 3 orgs := make([]orgAttr, count) - prefix := fmt.Sprintf("ListOrgs%d", time.Now().UnixNano()) + prefix := fmt.Sprintf("ListOrgs-%s", gofakeit.AppName()) for i := 0; i < count; i++ { name := prefix + strconv.Itoa(i) - orgResp := Instance.CreateOrganization(ctx, name, fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(ctx, name, gofakeit.Email()) orgs[i] = orgAttr{ ID: orgResp.GetOrganizationId(), Name: name, @@ -399,25 +399,19 @@ func TestServer_ListOrganizations(t *testing.T) { } } - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(CTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, listErr := Client.ListOrganizations(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.ListOrganizations(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, listErr) - if listErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions tt.want.Details.TotalResult = got.Details.TotalResult // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) for i := range tt.want.Result { // domain from result, as it is generated though the create @@ -430,7 +424,7 @@ func TestServer_ListOrganizations(t *testing.T) { assert.Contains(ttt, got.Result, tt.want.Result[i]) } integration.AssertListDetails(t, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected user result") + }, retryDuration, tick, "timeout waiting for expected user result") }) } } diff --git a/internal/api/grpc/org/v2/org_test.go b/internal/api/grpc/org/v2/org_test.go index 451c4006b3..b384f858de 100644 --- a/internal/api/grpc/org/v2/org_test.go +++ b/internal/api/grpc/org/v2/org_test.go @@ -6,7 +6,6 @@ import ( "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" "github.com/zitadel/zitadel/internal/command" @@ -110,7 +109,7 @@ func Test_addOrganizationRequestToCommand(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := addOrganizationRequestToCommand(tt.args.request) - require.ErrorIs(t, err, tt.wantErr) + assert.ErrorIs(t, err, tt.wantErr) assert.Equal(t, tt.want, got) }) } @@ -165,7 +164,7 @@ func Test_createdOrganizationToPb(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := createdOrganizationToPb(tt.args.createdOrg) - require.ErrorIs(t, err, tt.wantErr) + assert.ErrorIs(t, err, tt.wantErr) assert.Equal(t, tt.want, got) }) } diff --git a/internal/api/grpc/org/v2beta/integration_test/org_test.go b/internal/api/grpc/org/v2beta/integration_test/org_test.go index 95b2bed3b2..5998b17a71 100644 --- a/internal/api/grpc/org/v2beta/integration_test/org_test.go +++ b/internal/api/grpc/org/v2beta/integration_test/org_test.go @@ -4,11 +4,11 @@ package org_test import ( "context" - "fmt" "os" "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -72,7 +72,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "invalid admin type", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ {}, }, @@ -83,7 +83,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "admin with init", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ { UserType: &org.AddOrganizationRequest_Admin_Human{ @@ -93,7 +93,7 @@ func TestServer_AddOrganization(t *testing.T) { FamilyName: "lastname", }, Email: &user_v2beta.SetHumanEmail{ - Email: fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + Email: gofakeit.Email(), Verification: &user_v2beta.SetHumanEmail_ReturnCode{ ReturnCode: &user_v2beta.ReturnEmailVerificationCode{}, }, @@ -118,7 +118,7 @@ func TestServer_AddOrganization(t *testing.T) { name: "existing user and new human with idp", ctx: CTX, req: &org.AddOrganizationRequest{ - Name: fmt.Sprintf("%d", time.Now().UnixNano()), + Name: gofakeit.AppName(), Admins: []*org.AddOrganizationRequest_Admin{ { UserType: &org.AddOrganizationRequest_Admin_UserId{UserId: User.GetUserId()}, @@ -131,7 +131,7 @@ func TestServer_AddOrganization(t *testing.T) { FamilyName: "lastname", }, Email: &user_v2beta.SetHumanEmail{ - Email: fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + Email: gofakeit.Email(), Verification: &user_v2beta.SetHumanEmail_IsVerified{ IsVerified: true, }, diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go index 169ee0e5d2..042b7a416e 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go @@ -65,6 +65,8 @@ func TestServer_ExecutionTarget(t *testing.T) { targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, false) instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) + waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod)) + // expected response from the GetTarget expectedResponse := &action.GetTargetResponse{ Target: &action.GetTarget{ @@ -120,6 +122,7 @@ func TestServer_ExecutionTarget(t *testing.T) { targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, false) instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) + waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) return func() { closeRequest() closeResponse() @@ -163,6 +166,7 @@ func TestServer_ExecutionTarget(t *testing.T) { // GetTarget with used target request.Id = targetRequest.GetDetails().GetId() + waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod)) return func() { closeRequest() }, nil @@ -232,6 +236,7 @@ func TestServer_ExecutionTarget(t *testing.T) { targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, true) instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) + waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) return func() { closeResponse() }, nil @@ -250,25 +255,20 @@ func TestServer_ExecutionTarget(t *testing.T) { require.NoError(t, err) defer close() } - retryDuration := 5 * time.Second - if ctxDeadline, ok := isolatedIAMOwnerCTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } - + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.GetTarget(tt.ctx, tt.req) if tt.wantErr { - assert.Error(ttt, err, "Error: "+err.Error()) - } else { - assert.NoError(ttt, err) - } - if err != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) - integration.AssertResourceDetails(t, tt.want.GetTarget().GetDetails(), got.GetTarget().GetDetails()) - assert.Equal(t, tt.want.GetTarget().GetConfig(), got.GetTarget().GetConfig()) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected execution result") + integration.AssertResourceDetails(ttt, tt.want.GetTarget().GetDetails(), got.GetTarget().GetDetails()) + tt.want.Target.Details = got.GetTarget().GetDetails() + assert.EqualExportedValues(ttt, tt.want.GetTarget().GetConfig(), got.GetTarget().GetConfig()) + + }, retryDuration, tick, "timeout waiting for expected execution result") if tt.clean != nil { tt.clean(tt.ctx) @@ -277,6 +277,26 @@ func TestServer_ExecutionTarget(t *testing.T) { } } +func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Second) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{ + Filters: []*action.ExecutionSearchFilter{ + {Filter: &action.ExecutionSearchFilter_InConditionsFilter{ + InConditionsFilter: &action.InConditionsFilter{Conditions: []*action.Condition{condition}}, + }}, + }, + }) + if !assert.NoError(ttt, err) { + return + } + if assert.Len(ttt, got.GetResult(), 1) { + return + } + }, retryDuration, tick, "timeout waiting for expected execution result") + return +} + func conditionRequestFullMethod(fullMethod string) *action.Condition { return &action.Condition{ ConditionType: &action.Condition_Request{ diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_test.go index 9ab8ebc7ef..19d97c3857 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_test.go @@ -196,7 +196,6 @@ func TestServer_SetExecution_Request(t *testing.T) { require.Error(t, err) return } - require.NoError(t, err) integration.AssertResourceDetails(t, tt.want.Details, got.Details) diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go index 37d84eec05..3756a144d8 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go @@ -216,16 +216,20 @@ func TestServer_GetTarget(t *testing.T) { err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want) require.NoError(t, err) } - got, getErr := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req) - if tt.wantErr { - assert.Error(t, getErr, "Error: "+getErr.Error()) - } else { - assert.NoError(t, getErr) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req) + if tt.wantErr { + require.Error(ttt, err, "Error: "+err.Error()) + return + } + require.NoError(ttt, err) + wantTarget := tt.want.GetTarget() gotTarget := got.GetTarget() - integration.AssertResourceDetails(t, wantTarget.GetDetails(), gotTarget.GetDetails()) - assert.Equal(t, wantTarget.GetConfig(), gotTarget.GetConfig()) - } + integration.AssertResourceDetails(ttt, wantTarget.GetDetails(), gotTarget.GetDetails()) + assert.EqualExportedValues(ttt, wantTarget.GetConfig(), gotTarget.GetConfig()) + }, retryDuration, tick, "timeout waiting for expected target result") }) } } @@ -474,31 +478,24 @@ func TestServer_ListTargets(t *testing.T) { require.NoError(t, err) } - retryDuration := 5 * time.Second - if ctxDeadline, ok := isolatedIAMOwnerCTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } - + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, listErr := instance.Client.ActionV3Alpha.SearchTargets(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(ttt, listErr, "Error: "+listErr.Error()) - } else { - assert.NoError(ttt, listErr) - } - if listErr != nil { + require.Error(ttt, listErr, "Error: "+listErr.Error()) return } + require.NoError(ttt, listErr) + // always first check length, otherwise its failed anyway - if !assert.Len(ttt, got.Result, len(tt.want.Result)) { - return - } + require.Len(ttt, got.Result, len(tt.want.Result)) + for i := range tt.want.Result { integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[i].GetDetails()) - assert.Equal(ttt, tt.want.Result[i].GetConfig(), got.Result[i].GetConfig()) + assert.EqualExportedValues(ttt, tt.want.Result[i].GetConfig(), got.Result[i].GetConfig()) } integration.AssertResourceListDetails(ttt, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected execution result") + }, retryDuration, tick, "timeout waiting for expected execution result") }) } } @@ -866,32 +863,27 @@ func TestServer_SearchExecutions(t *testing.T) { require.NoError(t, err) } - retryDuration := 5 * time.Second - if ctxDeadline, ok := isolatedIAMOwnerCTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } - + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, listErr := instance.Client.ActionV3Alpha.SearchExecutions(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(ttt, listErr, "Error: "+listErr.Error()) - } else { - assert.NoError(ttt, listErr) - } - if listErr != nil { + require.Error(ttt, listErr, "Error: "+listErr.Error()) return } + require.NoError(ttt, listErr) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) for i := range tt.want.Result { // as not sorted, all elements have to be checked // workaround as oneof elements can only be checked with assert.EqualExportedValues() if j, found := containExecution(got.Result, tt.want.Result[i]); found { - assert.EqualExportedValues(t, tt.want.Result[i], got.Result[j]) + integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[j].GetDetails()) + got.Result[j].Details = tt.want.Result[i].GetDetails() + assert.EqualExportedValues(ttt, tt.want.Result[i], got.Result[j]) } } integration.AssertResourceListDetails(ttt, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected execution result") + }, retryDuration, tick, "timeout waiting for expected execution result") }) } } diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go index 357f532356..3c1c32062d 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go @@ -43,10 +43,8 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { Actions: gu.Ptr(true), }) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := ctx.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { f, err := instance.Client.FeatureV2.GetInstanceFeatures(ctx, &feature.GetInstanceFeaturesRequest{ @@ -59,12 +57,13 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { time.Second, "timed out waiting for ensuring instance feature") + retryDuration, tick = integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { _, err := instance.Client.ActionV3Alpha.ListExecutionMethods(ctx, &action.ListExecutionMethodsRequest{}) assert.NoError(ttt, err) }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature call") } diff --git a/internal/api/grpc/resources/user/v3alpha/integration_test/email_test.go b/internal/api/grpc/resources/user/v3alpha/integration_test/email_test.go index c5bec9008e..4b5a342905 100644 --- a/internal/api/grpc/resources/user/v3alpha/integration_test/email_test.go +++ b/internal/api/grpc/resources/user/v3alpha/integration_test/email_test.go @@ -350,10 +350,10 @@ func TestServer_SetContactEmail(t *testing.T) { } got, err := instance.Client.UserV3Alpha.SetContactEmail(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) if tt.res.returnCode { assert.NotNil(t, got.VerificationCode) @@ -545,10 +545,10 @@ func TestServer_VerifyContactEmail(t *testing.T) { } got, err := instance.Client.UserV3Alpha.VerifyContactEmail(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) }) } @@ -757,10 +757,10 @@ func TestServer_ResendContactEmailCode(t *testing.T) { } got, err := instance.Client.UserV3Alpha.ResendContactEmailCode(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) if tt.res.returnCode { assert.NotNil(t, got.VerificationCode) diff --git a/internal/api/grpc/resources/user/v3alpha/integration_test/phone_test.go b/internal/api/grpc/resources/user/v3alpha/integration_test/phone_test.go index d61135d30d..9fcacd7457 100644 --- a/internal/api/grpc/resources/user/v3alpha/integration_test/phone_test.go +++ b/internal/api/grpc/resources/user/v3alpha/integration_test/phone_test.go @@ -277,10 +277,10 @@ func TestServer_SetContactPhone(t *testing.T) { } got, err := instance.Client.UserV3Alpha.SetContactPhone(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) if tt.res.returnCode { assert.NotNil(t, got.VerificationCode) @@ -474,10 +474,10 @@ func TestServer_VerifyContactPhone(t *testing.T) { } got, err := instance.Client.UserV3Alpha.VerifyContactPhone(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) }) } @@ -686,10 +686,10 @@ func TestServer_ResendContactPhoneCode(t *testing.T) { } got, err := instance.Client.UserV3Alpha.ResendContactPhoneCode(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) if tt.res.returnCode { assert.NotNil(t, got.VerificationCode) diff --git a/internal/api/grpc/resources/user/v3alpha/integration_test/server_test.go b/internal/api/grpc/resources/user/v3alpha/integration_test/server_test.go index fee1a38430..467a563dcf 100644 --- a/internal/api/grpc/resources/user/v3alpha/integration_test/server_test.go +++ b/internal/api/grpc/resources/user/v3alpha/integration_test/server_test.go @@ -43,10 +43,7 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { UserSchema: gu.Ptr(true), }) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := ctx.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { f, err := instance.Client.FeatureV2.GetInstanceFeatures(ctx, &feature.GetInstanceFeaturesRequest{ @@ -58,15 +55,16 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { } }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature") + retryDuration, tick = integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { _, err := instance.Client.UserV3Alpha.SearchUsers(ctx, &user.SearchUsersRequest{}) assert.NoError(ttt, err) }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature call") } diff --git a/internal/api/grpc/resources/user/v3alpha/integration_test/user_test.go b/internal/api/grpc/resources/user/v3alpha/integration_test/user_test.go index 95e98b1a9e..34d12446c5 100644 --- a/internal/api/grpc/resources/user/v3alpha/integration_test/user_test.go +++ b/internal/api/grpc/resources/user/v3alpha/integration_test/user_test.go @@ -224,6 +224,7 @@ func TestServer_CreateUser(t *testing.T) { if tt.res.returnCodePhone { require.NotNil(t, got.PhoneCode) } + }) } } @@ -629,10 +630,10 @@ func TestServer_PatchUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.PatchUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.res.want, got.Details) if tt.res.returnCodeEmail { assert.NotNil(t, got.EmailCode) @@ -848,10 +849,10 @@ func TestServer_DeleteUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.DeleteUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.want, got.Details) }) } @@ -1059,10 +1060,10 @@ func TestServer_LockUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.LockUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.want, got.Details) }) } @@ -1242,10 +1243,10 @@ func TestServer_UnlockUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.UnlockUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.want, got.Details) }) } @@ -1444,10 +1445,10 @@ func TestServer_DeactivateUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.DeactivateUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.want, got.Details) }) } @@ -1627,10 +1628,10 @@ func TestServer_ActivateUser(t *testing.T) { } got, err := instance.Client.UserV3Alpha.ActivateUser(tt.ctx, tt.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } - assert.NoError(t, err) + require.NoError(t, err) integration.AssertResourceDetails(t, tt.want, got.Details) }) } diff --git a/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go index 36cf032660..436af3bc6f 100644 --- a/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go @@ -188,31 +188,26 @@ func TestServer_ListUserSchemas(t *testing.T) { require.NoError(t, err) } - retryDuration := 20 * time.Second - if ctxDeadline, ok := isolatedIAMOwnerCTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } - + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 20*time.Second) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.UserSchemaV3.SearchUserSchemas(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(ttt, err) return } - assert.NoError(ttt, err) - + require.NoError(ttt, err) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) for i := range tt.want.Result { - want := tt.want.Result[i] - got := got.Result[i] + wantSchema := tt.want.Result[i] + gotSchema := got.Result[i] - integration.AssertResourceDetails(t, want.GetDetails(), got.GetDetails()) - want.Details = got.Details - grpc.AllFieldsEqual(t, want.ProtoReflect(), got.ProtoReflect(), grpc.CustomMappers) + integration.AssertResourceDetails(ttt, wantSchema.GetDetails(), gotSchema.GetDetails()) + wantSchema.Details = gotSchema.GetDetails() + grpc.AllFieldsEqual(ttt, wantSchema.ProtoReflect(), gotSchema.ProtoReflect(), grpc.CustomMappers) } - integration.AssertListDetails(t, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected user schema result") + integration.AssertListDetails(ttt, tt.want, got) + }, retryDuration, tick, "timeout waiting for expected user schema result") }) } } @@ -300,24 +295,21 @@ func TestServer_GetUserSchema(t *testing.T) { require.NoError(t, err) } - retryDuration := 5 * time.Second - if ctxDeadline, ok := isolatedIAMOwnerCTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } - + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.UserSchemaV3.GetUserSchema(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err, "Error: "+err.Error()) - } else { - assert.NoError(t, err) - wantSchema := tt.want.GetUserSchema() - gotSchema := got.GetUserSchema() - integration.AssertResourceDetails(t, wantSchema.GetDetails(), gotSchema.GetDetails()) - tt.want.UserSchema.Details = got.GetUserSchema().GetDetails() - grpc.AllFieldsEqual(t, tt.want.ProtoReflect(), got.ProtoReflect(), grpc.CustomMappers) + require.Error(ttt, err, "Error: "+err.Error()) + return } - }, retryDuration, time.Millisecond*100, "timeout waiting for expected user schema result") + require.NoError(ttt, err) + + wantSchema := tt.want.GetUserSchema() + gotSchema := got.GetUserSchema() + integration.AssertResourceDetails(ttt, wantSchema.GetDetails(), gotSchema.GetDetails()) + wantSchema.Details = got.GetUserSchema().GetDetails() + grpc.AllFieldsEqual(ttt, wantSchema.ProtoReflect(), gotSchema.ProtoReflect(), grpc.CustomMappers) + }, retryDuration, tick, "timeout waiting for expected user schema result") }) } } diff --git a/internal/api/grpc/resources/userschema/v3alpha/integration_test/server_test.go b/internal/api/grpc/resources/userschema/v3alpha/integration_test/server_test.go index c562f9613a..3bab8d8c05 100644 --- a/internal/api/grpc/resources/userschema/v3alpha/integration_test/server_test.go +++ b/internal/api/grpc/resources/userschema/v3alpha/integration_test/server_test.go @@ -43,10 +43,8 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { UserSchema: gu.Ptr(true), }) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := ctx.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { f, err := instance.Client.FeatureV2.GetInstanceFeatures(ctx, &feature.GetInstanceFeaturesRequest{ @@ -58,15 +56,16 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { } }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature") + retryDuration, tick = integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { _, err := instance.Client.UserSchemaV3.SearchUserSchemas(ctx, &schema.SearchUserSchemasRequest{}) assert.NoError(ttt, err) }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature call") } diff --git a/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go b/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go index a545f1fb06..2a178ea285 100644 --- a/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go +++ b/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go @@ -191,6 +191,8 @@ func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, co }) require.NoError(t, err) } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(iamCTX, time.Minute) assert.EventuallyWithT(t, func(collect *assert.CollectT) { resp, err := instance.Client.WebKeyV3Alpha.ListWebKeys(iamCTX, &webkey.ListWebKeysRequest{}) if enableFeature { @@ -199,7 +201,7 @@ func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, co } else { assert.Error(collect, err) } - }, time.Minute, time.Second) + }, retryDuration, tick) return instance, iamCTX } @@ -213,6 +215,8 @@ func assertFeatureDisabledError(t *testing.T, err error) { } func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any) { + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) assert.EventuallyWithT(t, func(collect *assert.CollectT) { resp, err := instance.Client.WebKeyV3Alpha.ListWebKeys(ctx, &webkey.ListWebKeysRequest{}) require.NoError(collect, err) @@ -243,5 +247,5 @@ func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integrati if expectActiveKeyID != "" { assert.Equal(collect, expectActiveKeyID, gotActiveKeyID) } - }, time.Minute, time.Second) + }, retryDuration, tick) } diff --git a/internal/api/grpc/session/v2/integration_test/session_test.go b/internal/api/grpc/session/v2/integration_test/session_test.go index 32aa1adf55..ccd08f3471 100644 --- a/internal/api/grpc/session/v2/integration_test/session_test.go +++ b/internal/api/grpc/session/v2/integration_test/session_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/pquerna/otp/totp" "github.com/stretchr/testify/assert" @@ -348,8 +349,8 @@ func TestServer_CreateSession(t *testing.T) { func TestServer_CreateSession_lock_user(t *testing.T) { // create a separate org so we don't interfere with any other test org := Instance.CreateOrganization(IAMOwnerCTX, - fmt.Sprintf("TestServer_CreateSession_lock_user_%d", time.Now().UnixNano()), - fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + fmt.Sprintf("TestServer_CreateSession_lock_user_%s", gofakeit.AppName()), + gofakeit.Email(), ) userID := org.CreatedAdmins[0].GetUserId() Instance.SetUserPassword(IAMOwnerCTX, userID, integration.UserPassword, false) diff --git a/internal/api/grpc/session/v2beta/integration_test/session_test.go b/internal/api/grpc/session/v2beta/integration_test/session_test.go index 11ed9f7ed4..52e355204d 100644 --- a/internal/api/grpc/session/v2beta/integration_test/session_test.go +++ b/internal/api/grpc/session/v2beta/integration_test/session_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/pquerna/otp/totp" "github.com/stretchr/testify/assert" @@ -348,8 +349,8 @@ func TestServer_CreateSession(t *testing.T) { func TestServer_CreateSession_lock_user(t *testing.T) { // create a separate org so we don't interfere with any other test org := Instance.CreateOrganization(IAMOwnerCTX, - fmt.Sprintf("TestServer_CreateSession_lock_user_%d", time.Now().UnixNano()), - fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()), + fmt.Sprintf("TestServer_CreateSession_lock_user_%s", gofakeit.AppName()), + gofakeit.Email(), ) userID := org.CreatedAdmins[0].GetUserId() Instance.SetUserPassword(IAMOwnerCTX, userID, integration.UserPassword, false) diff --git a/internal/api/grpc/settings/v2/integration_test/settings_test.go b/internal/api/grpc/settings/v2/integration_test/settings_test.go index 99af36a004..8ae576d104 100644 --- a/internal/api/grpc/settings/v2/integration_test/settings_test.go +++ b/internal/api/grpc/settings/v2/integration_test/settings_test.go @@ -53,10 +53,11 @@ func TestServer_GetSecuritySettings(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, 20*time.Second) assert.EventuallyWithT(t, func(ct *assert.CollectT) { resp, err := Client.GetSecuritySettings(tt.ctx, &settings.GetSecuritySettingsRequest{}) if tt.wantErr { - assert.Error(ct, err) + require.Error(ct, err) return } require.NoError(ct, err) @@ -64,7 +65,7 @@ func TestServer_GetSecuritySettings(t *testing.T) { assert.Equal(ct, want.GetEmbeddedIframe().GetEnabled(), got.GetEmbeddedIframe().GetEnabled(), "enable iframe embedding") assert.Equal(ct, want.GetEmbeddedIframe().GetAllowedOrigins(), got.GetEmbeddedIframe().GetAllowedOrigins(), "allowed origins") assert.Equal(ct, want.GetEnableImpersonation(), got.GetEnableImpersonation(), "enable impersonation") - }, time.Minute, time.Second/10) + }, retryDuration, tick) }) } } @@ -167,7 +168,7 @@ func TestServer_SetSecuritySettings(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.SetSecuritySettings(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/settings/v2beta/integration_test/settings_test.go b/internal/api/grpc/settings/v2beta/integration_test/settings_test.go index d53dec1a1c..9f6968f5e0 100644 --- a/internal/api/grpc/settings/v2beta/integration_test/settings_test.go +++ b/internal/api/grpc/settings/v2beta/integration_test/settings_test.go @@ -53,10 +53,11 @@ func TestServer_GetSecuritySettings(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, time.Minute) assert.EventuallyWithT(t, func(ct *assert.CollectT) { resp, err := Client.GetSecuritySettings(tt.ctx, &settings.GetSecuritySettingsRequest{}) if tt.wantErr { - assert.Error(ct, err) + require.Error(ct, err) return } require.NoError(ct, err) @@ -64,7 +65,7 @@ func TestServer_GetSecuritySettings(t *testing.T) { assert.Equal(ct, want.GetEmbeddedIframe().GetEnabled(), got.GetEmbeddedIframe().GetEnabled(), "enable iframe embedding") assert.Equal(ct, want.GetEmbeddedIframe().GetAllowedOrigins(), got.GetEmbeddedIframe().GetAllowedOrigins(), "allowed origins") assert.Equal(ct, want.GetEnableImpersonation(), got.GetEnableImpersonation(), "enable impersonation") - }, time.Minute, time.Second/10) + }, retryDuration, tick) }) } } @@ -167,7 +168,7 @@ func TestServer_SetSecuritySettings(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := Client.SetSecuritySettings(tt.args.ctx, tt.args.req) if tt.wantErr { - assert.Error(t, err) + require.Error(t, err) return } require.NoError(t, err) diff --git a/internal/api/grpc/system/integration_test/instance_test.go b/internal/api/grpc/system/integration_test/instance_test.go index c90201a00b..18d2f68bb9 100644 --- a/internal/api/grpc/system/integration_test/instance_test.go +++ b/internal/api/grpc/system/integration_test/instance_test.go @@ -104,7 +104,7 @@ func TestServer_ListInstances(t *testing.T) { } require.NoError(t, err) got := resp.GetResult() - assert.Len(t, got, len(tt.want)) + require.Len(t, got, len(tt.want)) for i := 0; i < len(tt.want); i++ { assert.Equalf(t, tt.want[i].GetId(), got[i].GetId(), "instance[%d] id", i) } diff --git a/internal/api/grpc/system/integration_test/limits_block_test.go b/internal/api/grpc/system/integration_test/limits_block_test.go index b8d17a1167..46b213f603 100644 --- a/internal/api/grpc/system/integration_test/limits_block_test.go +++ b/internal/api/grpc/system/integration_test/limits_block_test.go @@ -140,13 +140,13 @@ func TestServer_Limits_Block(t *testing.T) { InstanceId: isoInstance.ID(), Block: gu.Ptr(true), }) - require.NoError(t, err) + assert.NoError(t, err) // The following call ensures that an undefined bool is not deserialized to false _, err = integration.SystemClient().SetLimits(CTX, &system.SetLimitsRequest{ InstanceId: isoInstance.ID(), AuditLogRetention: durationpb.New(time.Hour), }) - require.NoError(t, err) + assert.NoError(t, err) for _, tt := range tests { var isFirst bool t.Run(tt.name+" with blocking", func(t *testing.T) { diff --git a/internal/api/grpc/user/v2/integration_test/email_test.go b/internal/api/grpc/user/v2/integration_test/email_test.go index cbb82d2636..5092dbf40d 100644 --- a/internal/api/grpc/user/v2/integration_test/email_test.go +++ b/internal/api/grpc/user/v2/integration_test/email_test.go @@ -3,10 +3,9 @@ package user_test import ( - "fmt" "testing" - "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -134,12 +133,15 @@ func TestServer_SetEmail(t *testing.T) { got, err := Client.SetEmail(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -149,7 +151,7 @@ func TestServer_ResendEmailCode(t *testing.T) { t.Parallel() userID := Instance.CreateHumanUser(CTX).GetUserId() - verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())).GetUserId() + verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId() tests := []struct { name string @@ -237,12 +239,15 @@ func TestServer_ResendEmailCode(t *testing.T) { got, err := Client.ResendEmailCode(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -294,9 +299,9 @@ func TestServer_VerifyEmail(t *testing.T) { got, err := Client.VerifyEmail(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } diff --git a/internal/api/grpc/user/v2/integration_test/idp_link_test.go b/internal/api/grpc/user/v2/integration_test/idp_link_test.go index cee42c2ae6..e9022f31f8 100644 --- a/internal/api/grpc/user/v2/integration_test/idp_link_test.go +++ b/internal/api/grpc/user/v2/integration_test/idp_link_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/metadata" @@ -91,10 +92,9 @@ func TestServer_AddIDPLink(t *testing.T) { got, err := Client.AddIDPLink(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } - + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -103,20 +103,20 @@ func TestServer_AddIDPLink(t *testing.T) { func TestServer_ListIDPLinks(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks-%s", gofakeit.AppName()), gofakeit.Email()) instanceIdpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id) - userInstanceResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userInstanceResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) _, err := Instance.CreateUserIDPlink(IamCTX, userInstanceResp.GetUserId(), "external_instance", instanceIdpResp.Id, "externalUsername_instance") require.NoError(t, err) ctxOrg := metadata.AppendToOutgoingContext(IamCTX, "x-zitadel-orgid", orgResp.GetOrganizationId()) orgIdpResp := Instance.AddOrgGenericOAuthProvider(ctxOrg, orgResp.OrganizationId) - userOrgResp := Instance.CreateHumanUserVerified(ctxOrg, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userOrgResp := Instance.CreateHumanUserVerified(ctxOrg, orgResp.OrganizationId, gofakeit.Email()) _, err = Instance.CreateUserIDPlink(ctxOrg, userOrgResp.GetUserId(), "external_org", orgIdpResp.Id, "externalUsername_org") require.NoError(t, err) - userMultipleResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userMultipleResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) _, err = Instance.CreateUserIDPlink(IamCTX, userMultipleResp.GetUserId(), "external_multi", instanceIdpResp.Id, "externalUsername_multi") require.NoError(t, err) _, err = Instance.CreateUserIDPlink(ctxOrg, userMultipleResp.GetUserId(), "external_multi", orgIdpResp.Id, "externalUsername_multi") @@ -236,27 +236,21 @@ func TestServer_ListIDPLinks(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(CTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, listErr := Client.ListIDPLinks(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.ListIDPLinks(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, listErr) - if listErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) for i := range tt.want.Result { assert.Contains(ttt, got.Result, tt.want.Result[i]) } integration.AssertListDetails(t, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected idplinks result") + }, retryDuration, tick, "timeout waiting for expected idplinks result") }) } } @@ -264,20 +258,20 @@ func TestServer_ListIDPLinks(t *testing.T) { func TestServer_RemoveIDPLink(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks-%s", gofakeit.AppName()), gofakeit.Email()) instanceIdpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id) - userInstanceResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userInstanceResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) _, err := Instance.CreateUserIDPlink(IamCTX, userInstanceResp.GetUserId(), "external_instance", instanceIdpResp.Id, "externalUsername_instance") require.NoError(t, err) ctxOrg := metadata.AppendToOutgoingContext(IamCTX, "x-zitadel-orgid", orgResp.GetOrganizationId()) orgIdpResp := Instance.AddOrgGenericOAuthProvider(ctxOrg, orgResp.OrganizationId) - userOrgResp := Instance.CreateHumanUserVerified(ctxOrg, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userOrgResp := Instance.CreateHumanUserVerified(ctxOrg, orgResp.OrganizationId, gofakeit.Email()) _, err = Instance.CreateUserIDPlink(ctxOrg, userOrgResp.GetUserId(), "external_org", orgIdpResp.Id, "externalUsername_org") require.NoError(t, err) - userNoLinkResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listidplinks.com", time.Now().UnixNano())) + userNoLinkResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) type args struct { ctx context.Context @@ -363,9 +357,9 @@ func TestServer_RemoveIDPLink(t *testing.T) { got, err := Client.RemoveIDPLink(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) diff --git a/internal/api/grpc/user/v2/integration_test/passkey_test.go b/internal/api/grpc/user/v2/integration_test/passkey_test.go index bba548dfca..585e5ba413 100644 --- a/internal/api/grpc/user/v2/integration_test/passkey_test.go +++ b/internal/api/grpc/user/v2/integration_test/passkey_test.go @@ -584,27 +584,21 @@ func TestServer_ListPasskeys(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, listErr := Client.ListPasskeys(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.ListPasskeys(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, listErr) - if listErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) // always first check length, otherwise its failed anyway assert.Len(ttt, got.Result, len(tt.want.Result)) for i := range tt.want.Result { assert.Contains(ttt, got.Result, tt.want.Result[i]) } - integration.AssertListDetails(t, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected idplinks result") + integration.AssertListDetails(ttt, tt.want, got) + }, retryDuration, tick, "timeout waiting for expected idplinks result") }) } } diff --git a/internal/api/grpc/user/v2/integration_test/password_test.go b/internal/api/grpc/user/v2/integration_test/password_test.go index 00ac0bda8d..7707537653 100644 --- a/internal/api/grpc/user/v2/integration_test/password_test.go +++ b/internal/api/grpc/user/v2/integration_test/password_test.go @@ -94,9 +94,10 @@ func TestServer_RequestPasswordReset(t *testing.T) { got, err := Client.PasswordReset(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) diff --git a/internal/api/grpc/user/v2/integration_test/phone_test.go b/internal/api/grpc/user/v2/integration_test/phone_test.go index f757b0acb4..47590b6d67 100644 --- a/internal/api/grpc/user/v2/integration_test/phone_test.go +++ b/internal/api/grpc/user/v2/integration_test/phone_test.go @@ -4,10 +4,9 @@ package user_test import ( "context" - "fmt" "testing" - "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -112,9 +111,10 @@ func TestServer_SetPhone(t *testing.T) { got, err := Client.SetPhone(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) @@ -127,7 +127,7 @@ func TestServer_ResendPhoneCode(t *testing.T) { t.Parallel() userID := Instance.CreateHumanUser(CTX).GetUserId() - verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())).GetUserId() + verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId() tests := []struct { name string @@ -188,9 +188,10 @@ func TestServer_ResendPhoneCode(t *testing.T) { got, err := Client.ResendPhoneCode(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) @@ -245,9 +246,10 @@ func TestServer_VerifyPhone(t *testing.T) { got, err := Client.VerifyPhone(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) }) } @@ -340,12 +342,12 @@ func TestServer_RemovePhone(t *testing.T) { require.NoError(t, depErr) got, err := Client.RemovePhone(tt.ctx, tt.req) - if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) }) } diff --git a/internal/api/grpc/user/v2/integration_test/query_test.go b/internal/api/grpc/user/v2/integration_test/query_test.go index b4d332e6c5..fc2104d62e 100644 --- a/internal/api/grpc/user/v2/integration_test/query_test.go +++ b/internal/api/grpc/user/v2/integration_test/query_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ import ( func TestServer_GetUserByID(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg-%s", gofakeit.AppName()), gofakeit.Email()) type args struct { ctx context.Context req *user.GetUserByIDRequest @@ -153,23 +154,19 @@ func TestServer_GetUserByID(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - username := fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()) + username := gofakeit.Email() userAttr, err := tt.args.dep(tt.args.ctx, username, tt.args.req) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, getErr := Client.GetUserByID(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, getErr) - if getErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) + tt.want.User.Details = userAttr.Details tt.want.User.UserId = userAttr.UserID tt.want.User.Username = userAttr.Username @@ -183,7 +180,7 @@ func TestServer_GetUserByID(t *testing.T) { } assert.Equal(ttt, tt.want.User, got.User) integration.AssertDetails(ttt, tt.want, got) - }, retryDuration, time.Second) + }, retryDuration, tick) }) } } @@ -192,8 +189,8 @@ func TestServer_GetUserByID_Permission(t *testing.T) { t.Parallel() timeNow := time.Now().UTC() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.get.com", timeNow.UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrgOwnerEmail := gofakeit.Email() + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) newUserID := newOrg.CreatedAdmins[0].GetUserId() type args struct { ctx context.Context @@ -307,20 +304,21 @@ func TestServer_GetUserByID_Permission(t *testing.T) { got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) - tt.want.User.UserId = tt.args.req.GetUserId() - tt.want.User.Username = newOrgOwnerEmail - tt.want.User.PreferredLoginName = newOrgOwnerEmail - tt.want.User.LoginNames = []string{newOrgOwnerEmail} - if human := tt.want.User.GetHuman(); human != nil { - human.Email.Email = newOrgOwnerEmail - } - // details tested in GetUserByID - tt.want.User.Details = got.User.GetDetails() - - assert.Equal(t, tt.want.User, got.User) + return } + require.NoError(t, err) + + tt.want.User.UserId = tt.args.req.GetUserId() + tt.want.User.Username = newOrgOwnerEmail + tt.want.User.PreferredLoginName = newOrgOwnerEmail + tt.want.User.LoginNames = []string{newOrgOwnerEmail} + if human := tt.want.User.GetHuman(); human != nil { + human.Email.Email = newOrgOwnerEmail + } + // details tested in GetUserByID + tt.want.User.Details = got.User.GetDetails() + + assert.Equal(t, tt.want.User, got.User) }) } } @@ -335,8 +333,8 @@ type userAttr struct { func TestServer_ListUsers(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) - userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listusers.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg-%s", gofakeit.AppName()), gofakeit.Email()) + userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) type args struct { ctx context.Context count int @@ -806,7 +804,7 @@ func TestServer_ListUsers(t *testing.T) { 3, &user.ListUsersRequest{}, func(ctx context.Context, usernames []string, request *user.ListUsersRequest) ([]userAttr, error) { - orgResp := Instance.CreateOrganization(ctx, fmt.Sprintf("ListUsersResourceowner%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(ctx, fmt.Sprintf("ListUsersResourceowner-%s", gofakeit.AppName()), gofakeit.Email()) infos := make([]userAttr, len(usernames)) for i, username := range usernames { @@ -897,28 +895,24 @@ func TestServer_ListUsers(t *testing.T) { t.Run(tt.name, func(t *testing.T) { usernames := make([]string, tt.args.count) for i := 0; i < tt.args.count; i++ { - usernames[i] = fmt.Sprintf("%d%d@mouse.com", time.Now().UnixNano(), i) + usernames[i] = gofakeit.Email() } infos, err := tt.args.dep(tt.args.ctx, usernames, tt.args.req) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, listErr := Client.ListUsers(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.ListUsers(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, listErr) - if listErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) + // always only give back dependency infos which are required for the response - assert.Len(ttt, tt.want.Result, len(infos)) + require.Len(ttt, tt.want.Result, len(infos)) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions tt.want.Details.TotalResult = got.Details.TotalResult @@ -941,7 +935,7 @@ func TestServer_ListUsers(t *testing.T) { assert.Contains(ttt, got.Result, tt.want.Result[i]) } integration.AssertListDetails(ttt, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected user result") + }, retryDuration, tick, "timeout waiting for expected user result") }) } } diff --git a/internal/api/grpc/user/v2/integration_test/user_test.go b/internal/api/grpc/user/v2/integration_test/user_test.go index 8790030b54..3ebe760d91 100644 --- a/internal/api/grpc/user/v2/integration_test/user_test.go +++ b/internal/api/grpc/user/v2/integration_test/user_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -659,16 +660,20 @@ func TestServer_AddHumanUser(t *testing.T) { got, err := Client.AddHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) assert.Equal(t, tt.want.GetUserId(), got.GetUserId()) if tt.want.GetEmailCode() != "" { assert.NotEmpty(t, got.GetEmailCode()) + } else { + assert.Empty(t, got.GetEmailCode()) } if tt.want.GetPhoneCode() != "" { assert.NotEmpty(t, got.GetPhoneCode()) + } else { + assert.Empty(t, got.GetPhoneCode()) } integration.AssertDetails(t, tt.want, got) }) @@ -678,8 +683,8 @@ func TestServer_AddHumanUser(t *testing.T) { func TestServer_AddHumanUser_Permission(t *testing.T) { t.Parallel() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.com", time.Now().UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrgOwnerEmail := gofakeit.Email() + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) type args struct { ctx context.Context req *user.AddHumanUserRequest @@ -860,9 +865,9 @@ func TestServer_AddHumanUser_Permission(t *testing.T) { got, err := Client.AddHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) assert.Equal(t, tt.want.GetUserId(), got.GetUserId()) integration.AssertDetails(t, tt.want, got) @@ -908,7 +913,7 @@ func TestServer_UpdateHumanUser(t *testing.T) { args: args{ CTX, &user.UpdateHumanUserRequest{ - Username: gu.Ptr(fmt.Sprint(time.Now().UnixNano() + 1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1214,14 +1219,19 @@ func TestServer_UpdateHumanUser(t *testing.T) { got, err := Client.UpdateHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + if tt.want.GetEmailCode() != "" { assert.NotEmpty(t, got.GetEmailCode()) + } else { + assert.Empty(t, got.GetEmailCode()) } if tt.want.GetPhoneCode() != "" { assert.NotEmpty(t, got.GetPhoneCode()) + } else { + assert.Empty(t, got.GetPhoneCode()) } integration.AssertDetails(t, tt.want, got) }) @@ -1231,8 +1241,8 @@ func TestServer_UpdateHumanUser(t *testing.T) { func TestServer_UpdateHumanUser_Permission(t *testing.T) { t.Parallel() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.update.com", time.Now().UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrgOwnerEmail := gofakeit.Email() + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) newUserID := newOrg.CreatedAdmins[0].GetUserId() type args struct { ctx context.Context @@ -1250,7 +1260,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { SystemCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("system", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1266,7 +1276,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { IamCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("instance", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1282,7 +1292,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { CTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("org", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, wantErr: true, @@ -1293,7 +1303,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { UserCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("user", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, wantErr: true, @@ -1415,9 +1425,9 @@ func TestServer_LockUser(t *testing.T) { got, err := Client.LockUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1525,9 +1535,9 @@ func TestServer_UnLockUser(t *testing.T) { got, err := Client.UnlockUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1635,9 +1645,10 @@ func TestServer_DeactivateUser(t *testing.T) { got, err := Client.DeactivateUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) }) } @@ -1745,9 +1756,9 @@ func TestServer_ReactivateUser(t *testing.T) { got, err := Client.ReactivateUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1846,9 +1857,9 @@ func TestServer_DeleteUser(t *testing.T) { got, err := Client.DeleteUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1859,7 +1870,7 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) { idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id) orgIdpResp := Instance.AddOrgGenericOAuthProvider(CTX, Instance.DefaultOrg.Id) - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg-%s", gofakeit.AppName()), gofakeit.Email()) notDefaultOrgIdpResp := Instance.AddOrgGenericOAuthProvider(IamCTX, orgResp.OrganizationId) samlIdpID := Instance.AddSAMLProvider(IamCTX) samlRedirectIdpID := Instance.AddSAMLRedirectProvider(IamCTX, "") @@ -2092,15 +2103,14 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) { got, err := Client.StartIdentityProviderIntent(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) if tt.want.url != "" { authUrl, err := url.Parse(got.GetAuthUrl()) - assert.NoError(t, err) - - assert.Len(t, authUrl.Query(), len(tt.want.parametersEqual)+len(tt.want.parametersExisting)) + require.NoError(t, err) + require.Len(t, authUrl.Query(), len(tt.want.parametersEqual)+len(tt.want.parametersExisting)) for _, existing := range tt.want.parametersExisting { assert.True(t, authUrl.Query().Has(existing)) @@ -2771,9 +2781,10 @@ func TestServer_CreateInviteCode(t *testing.T) { got, err := Client.CreateInviteCode(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetInviteCode() != "" { assert.NotEmpty(t, got.GetInviteCode()) @@ -2866,9 +2877,10 @@ func TestServer_ResendInviteCode(t *testing.T) { got, err := Client.ResendInviteCode(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) }) } @@ -2957,9 +2969,9 @@ func TestServer_VerifyInviteCode(t *testing.T) { got, err := Client.VerifyInviteCode(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } diff --git a/internal/api/grpc/user/v2beta/integration_test/email_test.go b/internal/api/grpc/user/v2beta/integration_test/email_test.go index 073a432078..ebace6daa2 100644 --- a/internal/api/grpc/user/v2beta/integration_test/email_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/email_test.go @@ -3,10 +3,9 @@ package user_test import ( - "fmt" "testing" - "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -134,12 +133,15 @@ func TestServer_SetEmail(t *testing.T) { got, err := Client.SetEmail(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -149,7 +151,7 @@ func TestServer_ResendEmailCode(t *testing.T) { t.Parallel() userID := Instance.CreateHumanUser(CTX).GetUserId() - verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())).GetUserId() + verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId() tests := []struct { name string @@ -237,12 +239,15 @@ func TestServer_ResendEmailCode(t *testing.T) { got, err := Client.ResendEmailCode(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -294,9 +299,9 @@ func TestServer_VerifyEmail(t *testing.T) { got, err := Client.VerifyEmail(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } diff --git a/internal/api/grpc/user/v2beta/integration_test/password_test.go b/internal/api/grpc/user/v2beta/integration_test/password_test.go index 254a13a53e..5995f87c7f 100644 --- a/internal/api/grpc/user/v2beta/integration_test/password_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/password_test.go @@ -94,12 +94,15 @@ func TestServer_RequestPasswordReset(t *testing.T) { got, err := Client.PasswordReset(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } diff --git a/internal/api/grpc/user/v2beta/integration_test/phone_test.go b/internal/api/grpc/user/v2beta/integration_test/phone_test.go index afcbc35c4a..03567f4023 100644 --- a/internal/api/grpc/user/v2beta/integration_test/phone_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/phone_test.go @@ -4,10 +4,9 @@ package user_test import ( "context" - "fmt" "testing" - "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -112,12 +111,15 @@ func TestServer_SetPhone(t *testing.T) { got, err := Client.SetPhone(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -127,7 +129,7 @@ func TestServer_ResendPhoneCode(t *testing.T) { t.Parallel() userID := Instance.CreateHumanUser(CTX).GetUserId() - verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())).GetUserId() + verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId() tests := []struct { name string @@ -188,12 +190,14 @@ func TestServer_ResendPhoneCode(t *testing.T) { got, err := Client.ResendPhoneCode(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) if tt.want.GetVerificationCode() != "" { assert.NotEmpty(t, got.GetVerificationCode()) + } else { + assert.Empty(t, got.GetVerificationCode()) } }) } @@ -245,9 +249,9 @@ func TestServer_VerifyPhone(t *testing.T) { got, err := Client.VerifyPhone(CTX, tt.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -340,12 +344,12 @@ func TestServer_RemovePhone(t *testing.T) { require.NoError(t, depErr) got, err := Client.RemovePhone(tt.ctx, tt.req) - if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + integration.AssertDetails(t, tt.want, got) }) } diff --git a/internal/api/grpc/user/v2beta/integration_test/query_test.go b/internal/api/grpc/user/v2beta/integration_test/query_test.go index f7c12fd03a..4ea911727c 100644 --- a/internal/api/grpc/user/v2beta/integration_test/query_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/query_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,7 +31,7 @@ func detailsV2ToV2beta(obj *object.Details) *object_v2beta.Details { func TestServer_GetUserByID(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg-%s", gofakeit.AppName()), gofakeit.Email()) type args struct { ctx context.Context req *user.GetUserByIDRequest @@ -162,23 +163,19 @@ func TestServer_GetUserByID(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - username := fmt.Sprintf("%d@mouse.com", time.Now().UnixNano()) + username := gofakeit.Email() userAttr, err := tt.args.dep(tt.args.ctx, username, tt.args.req) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, getErr := Client.GetUserByID(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, getErr) - if getErr != nil { + require.Error(t, err) return } + require.NoError(t, err) + tt.want.User.Details = detailsV2ToV2beta(userAttr.Details) tt.want.User.UserId = userAttr.UserID tt.want.User.Username = userAttr.Username @@ -192,7 +189,7 @@ func TestServer_GetUserByID(t *testing.T) { } assert.Equal(ttt, tt.want.User, got.User) integration.AssertDetails(t, tt.want, got) - }, retryDuration, time.Second) + }, retryDuration, tick) }) } } @@ -201,8 +198,8 @@ func TestServer_GetUserByID_Permission(t *testing.T) { t.Parallel() timeNow := time.Now().UTC() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.get.com", timeNow.UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrgOwnerEmail := gofakeit.Email() + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) newUserID := newOrg.CreatedAdmins[0].GetUserId() type args struct { ctx context.Context @@ -313,11 +310,14 @@ func TestServer_GetUserByID_Permission(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) + if tt.wantErr { + require.Error(ttt, err) + return + } + require.NoError(ttt, err) tt.want.User.UserId = tt.args.req.GetUserId() tt.want.User.Username = newOrgOwnerEmail tt.want.User.PreferredLoginName = newOrgOwnerEmail @@ -328,8 +328,8 @@ func TestServer_GetUserByID_Permission(t *testing.T) { // details tested in GetUserByID tt.want.User.Details = got.User.GetDetails() - assert.Equal(t, tt.want.User, got.User) - } + assert.Equal(ttt, tt.want.User, got.User) + }, retryDuration, tick, "timeout waiting for expected user result") }) } } @@ -344,8 +344,8 @@ type userAttr struct { func TestServer_ListUsers(t *testing.T) { t.Parallel() - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) - userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, fmt.Sprintf("%d@listusers.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg-%s", gofakeit.AppName()), gofakeit.Email()) + userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email()) type args struct { ctx context.Context count int @@ -815,7 +815,7 @@ func TestServer_ListUsers(t *testing.T) { 3, &user.ListUsersRequest{}, func(ctx context.Context, usernames []string, request *user.ListUsersRequest) ([]userAttr, error) { - orgResp := Instance.CreateOrganization(ctx, fmt.Sprintf("ListUsersResourceowner%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(ctx, fmt.Sprintf("ListUsersResourceowner-%s", gofakeit.AppName()), gofakeit.Email()) infos := make([]userAttr, len(usernames)) for i, username := range usernames { @@ -906,28 +906,24 @@ func TestServer_ListUsers(t *testing.T) { t.Run(tt.name, func(t *testing.T) { usernames := make([]string, tt.args.count) for i := 0; i < tt.args.count; i++ { - usernames[i] = fmt.Sprintf("%d%d@mouse.com", time.Now().UnixNano(), i) + usernames[i] = gofakeit.Email() } infos, err := tt.args.dep(tt.args.ctx, usernames, tt.args.req) require.NoError(t, err) - retryDuration := time.Minute - if ctxDeadline, ok := CTX.Deadline(); ok { - retryDuration = time.Until(ctxDeadline) - } + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { - got, listErr := Client.ListUsers(tt.args.ctx, tt.args.req) - assertErr := assert.NoError + got, err := Client.ListUsers(tt.args.ctx, tt.args.req) if tt.wantErr { - assertErr = assert.Error - } - assertErr(ttt, listErr) - if listErr != nil { + require.Error(ttt, err) return } + require.NoError(ttt, err) + // always only give back dependency infos which are required for the response - assert.Len(ttt, tt.want.Result, len(infos)) + require.Len(ttt, tt.want.Result, len(infos)) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) + require.Len(ttt, got.Result, len(tt.want.Result)) // fill in userid and username as it is generated // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions @@ -949,8 +945,8 @@ func TestServer_ListUsers(t *testing.T) { for i := range tt.want.Result { assert.Contains(ttt, got.Result, tt.want.Result[i]) } - integration.AssertListDetails(t, tt.want, got) - }, retryDuration, time.Millisecond*100, "timeout waiting for expected user result") + integration.AssertListDetails(ttt, tt.want, got) + }, retryDuration, tick, "timeout waiting for expected user result") }) } } diff --git a/internal/api/grpc/user/v2beta/integration_test/user_test.go b/internal/api/grpc/user/v2beta/integration_test/user_test.go index 587d6b5fe3..7b158f0d68 100644 --- a/internal/api/grpc/user/v2beta/integration_test/user_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/user_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -616,16 +617,20 @@ func TestServer_AddHumanUser(t *testing.T) { got, err := Client.AddHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) assert.Equal(t, tt.want.GetUserId(), got.GetUserId()) if tt.want.GetEmailCode() != "" { assert.NotEmpty(t, got.GetEmailCode()) + } else { + assert.Empty(t, got.GetEmailCode()) } if tt.want.GetPhoneCode() != "" { assert.NotEmpty(t, got.GetPhoneCode()) + } else { + assert.Empty(t, got.GetPhoneCode()) } integration.AssertDetails(t, tt.want, got) }) @@ -635,8 +640,7 @@ func TestServer_AddHumanUser(t *testing.T) { func TestServer_AddHumanUser_Permission(t *testing.T) { t.Parallel() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.com", time.Now().UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman-%s", gofakeit.AppName()), gofakeit.Email()) type args struct { ctx context.Context req *user.AddHumanUserRequest @@ -817,9 +821,9 @@ func TestServer_AddHumanUser_Permission(t *testing.T) { got, err := Client.AddHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) assert.Equal(t, tt.want.GetUserId(), got.GetUserId()) integration.AssertDetails(t, tt.want, got) @@ -865,7 +869,7 @@ func TestServer_UpdateHumanUser(t *testing.T) { args: args{ CTX, &user.UpdateHumanUserRequest{ - Username: gu.Ptr(fmt.Sprint(time.Now().UnixNano() + 1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1171,14 +1175,19 @@ func TestServer_UpdateHumanUser(t *testing.T) { got, err := Client.UpdateHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) + if tt.want.GetEmailCode() != "" { assert.NotEmpty(t, got.GetEmailCode()) + } else { + assert.Empty(t, got.GetEmailCode()) } if tt.want.GetPhoneCode() != "" { assert.NotEmpty(t, got.GetPhoneCode()) + } else { + assert.Empty(t, got.GetPhoneCode()) } integration.AssertDetails(t, tt.want, got) }) @@ -1188,8 +1197,7 @@ func TestServer_UpdateHumanUser(t *testing.T) { func TestServer_UpdateHumanUser_Permission(t *testing.T) { t.Parallel() - newOrgOwnerEmail := fmt.Sprintf("%d@permission.update.com", time.Now().UnixNano()) - newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman%d", time.Now().UnixNano()), newOrgOwnerEmail) + newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman-%s", gofakeit.AppName()), gofakeit.Email()) newUserID := newOrg.CreatedAdmins[0].GetUserId() type args struct { ctx context.Context @@ -1207,7 +1215,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { SystemCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("system", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1223,7 +1231,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { IamCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("instance", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, want: &user.UpdateHumanUserResponse{ @@ -1239,7 +1247,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { CTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("org", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, wantErr: true, @@ -1250,7 +1258,7 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { UserCTX, &user.UpdateHumanUserRequest{ UserId: newUserID, - Username: gu.Ptr(fmt.Sprint("user", time.Now().UnixNano()+1)), + Username: gu.Ptr(gofakeit.Username()), }, }, wantErr: true, @@ -1262,9 +1270,9 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) { got, err := Client.UpdateHumanUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1482,9 +1490,9 @@ func TestServer_UnLockUser(t *testing.T) { got, err := Client.UnlockUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1592,9 +1600,9 @@ func TestServer_DeactivateUser(t *testing.T) { got, err := Client.DeactivateUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1702,9 +1710,9 @@ func TestServer_ReactivateUser(t *testing.T) { got, err := Client.ReactivateUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1803,9 +1811,9 @@ func TestServer_DeleteUser(t *testing.T) { got, err := Client.DeleteUser(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1884,10 +1892,9 @@ func TestServer_AddIDPLink(t *testing.T) { got, err := Client.AddIDPLink(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } - + require.NoError(t, err) integration.AssertDetails(t, tt.want, got) }) } @@ -1898,7 +1905,7 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) { idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id) orgIdpID := Instance.AddOrgGenericOAuthProvider(CTX, Instance.DefaultOrg.Id) - orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg-%s", gofakeit.AppName()), gofakeit.Email()) notDefaultOrgIdpID := Instance.AddOrgGenericOAuthProvider(IamCTX, orgResp.OrganizationId) samlIdpID := Instance.AddSAMLProvider(IamCTX) samlRedirectIdpID := Instance.AddSAMLRedirectProvider(IamCTX, "") @@ -2131,15 +2138,14 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) { got, err := Client.StartIdentityProviderIntent(tt.args.ctx, tt.args.req) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) + return } + require.NoError(t, err) if tt.want.url != "" { authUrl, err := url.Parse(got.GetAuthUrl()) - assert.NoError(t, err) - - assert.Len(t, authUrl.Query(), len(tt.want.parametersEqual)+len(tt.want.parametersExisting)) + require.NoError(t, err) + require.Len(t, authUrl.Query(), len(tt.want.parametersEqual)+len(tt.want.parametersExisting)) for _, existing := range tt.want.parametersExisting { assert.True(t, authUrl.Query().Has(existing)) diff --git a/internal/api/idp/integration_test/idp_test.go b/internal/api/idp/integration_test/idp_test.go index 609b98262b..8e7141271a 100644 --- a/internal/api/idp/integration_test/idp_test.go +++ b/internal/api/idp/integration_test/idp_test.go @@ -335,17 +335,18 @@ func TestServer_SAMLACS(t *testing.T) { location, err := integration.CheckPost(callbackURL, httpPostFormRequest(relayState, response)) if tt.wantErr { require.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, relayState, location.Query().Get("id")) - if tt.want.successful { - assert.True(t, strings.HasPrefix(location.String(), tt.args.successURL)) - assert.NotEmpty(t, location.Query().Get("token")) - assert.Equal(t, tt.want.user, location.Query().Get("user")) - } else { - assert.True(t, strings.HasPrefix(location.String(), tt.args.failureURL)) - } + return } + require.NoError(t, err) + assert.Equal(t, relayState, location.Query().Get("id")) + if tt.want.successful { + assert.True(t, strings.HasPrefix(location.String(), tt.args.successURL)) + assert.NotEmpty(t, location.Query().Get("token")) + assert.Equal(t, tt.want.user, location.Query().Get("user")) + } else { + assert.True(t, strings.HasPrefix(location.String(), tt.args.failureURL)) + } + }) } } diff --git a/internal/api/oidc/integration_test/oidc_test.go b/internal/api/oidc/integration_test/oidc_test.go index 016745cf56..86754aab0e 100644 --- a/internal/api/oidc/integration_test/oidc_test.go +++ b/internal/api/oidc/integration_test/oidc_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/brianvoe/gofakeit/v6" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/zitadel/oidc/v3/pkg/client/rp" @@ -121,7 +122,7 @@ func Test_ZITADEL_API_missing_authentication(t *testing.T) { func Test_ZITADEL_API_missing_mfa_policy(t *testing.T) { clientID, _ := createClient(t, Instance) - org := Instance.CreateOrganization(CTXIAM, fmt.Sprintf("ZITADEL_API_MISSING_MFA_%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + org := Instance.CreateOrganization(CTXIAM, fmt.Sprintf("ZITADEL_API_MISSING_MFA_%s", gofakeit.AppName()), gofakeit.Email()) userID := org.CreatedAdmins[0].GetUserId() Instance.SetUserPassword(CTXIAM, userID, integration.UserPassword, false) authRequestID := createAuthRequest(t, Instance, clientID, redirectURI, oidc.ScopeOpenID, zitadelAudienceScope) diff --git a/internal/api/oidc/integration_test/token_exchange_test.go b/internal/api/oidc/integration_test/token_exchange_test.go index 26c4475024..5b0b86f0ec 100644 --- a/internal/api/oidc/integration_test/token_exchange_test.go +++ b/internal/api/oidc/integration_test/token_exchange_test.go @@ -25,40 +25,71 @@ import ( "github.com/zitadel/zitadel/pkg/grpc/feature/v2" ) -func setTokenExchangeFeature(t *testing.T, value bool) { - iamCTX := Instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) +func setTokenExchangeFeature(t *testing.T, instance *integration.Instance, value bool) { + iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) - _, err := Instance.Client.FeatureV2.SetInstanceFeatures(iamCTX, &feature.SetInstanceFeaturesRequest{ + _, err := instance.Client.FeatureV2.SetInstanceFeatures(iamCTX, &feature.SetInstanceFeaturesRequest{ OidcTokenExchange: proto.Bool(value), }) require.NoError(t, err) + retryDuration := time.Minute + if ctxDeadline, ok := iamCTX.Deadline(); ok { + retryDuration = time.Until(ctxDeadline) + } + require.EventuallyWithT(t, + func(ttt *assert.CollectT) { + f, err := instance.Client.FeatureV2.GetInstanceFeatures(iamCTX, &feature.GetInstanceFeaturesRequest{ + Inheritance: true, + }) + assert.NoError(ttt, err) + if f.OidcTokenExchange.GetEnabled() { + return + } + }, + retryDuration, + time.Second, + "timed out waiting for ensuring instance feature") time.Sleep(time.Second) } -func resetFeatures(t *testing.T) { - iamCTX := Instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) - _, err := Instance.Client.FeatureV2.ResetInstanceFeatures(iamCTX, &feature.ResetInstanceFeaturesRequest{}) +func resetFeatures(t *testing.T, instance *integration.Instance) { + iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) + _, err := instance.Client.FeatureV2.ResetInstanceFeatures(iamCTX, &feature.ResetInstanceFeaturesRequest{}) require.NoError(t, err) time.Sleep(time.Second) } -func setImpersonationPolicy(t *testing.T, value bool) { - iamCTX := Instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) +func setImpersonationPolicy(t *testing.T, instance *integration.Instance, value bool) { + iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) - policy, err := Instance.Client.Admin.GetSecurityPolicy(iamCTX, &admin.GetSecurityPolicyRequest{}) + policy, err := instance.Client.Admin.GetSecurityPolicy(iamCTX, &admin.GetSecurityPolicyRequest{}) require.NoError(t, err) if policy.GetPolicy().GetEnableImpersonation() != value { - _, err = Instance.Client.Admin.SetSecurityPolicy(iamCTX, &admin.SetSecurityPolicyRequest{ + _, err = instance.Client.Admin.SetSecurityPolicy(iamCTX, &admin.SetSecurityPolicyRequest{ EnableImpersonation: value, }) require.NoError(t, err) } - time.Sleep(time.Second) + + retryDuration := time.Minute + if ctxDeadline, ok := iamCTX.Deadline(); ok { + retryDuration = time.Until(ctxDeadline) + } + require.EventuallyWithT(t, + func(ttt *assert.CollectT) { + f, err := instance.Client.Admin.GetSecurityPolicy(iamCTX, &admin.GetSecurityPolicyRequest{}) + assert.NoError(ttt, err) + if f.GetPolicy().GetEnableImpersonation() != value { + return + } + }, + retryDuration, + time.Second, + "timed out waiting for ensuring impersonation policy") } -func createMachineUserPATWithMembership(t *testing.T, roles ...string) (userID, pat string) { - iamCTX := Instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) - userID, pat, err := Instance.CreateMachineUserPATWithMembership(iamCTX, roles...) +func createMachineUserPATWithMembership(ctx context.Context, t *testing.T, instance *integration.Instance, roles ...string) (userID, pat string) { + userID, pat, err := instance.CreateMachineUserPATWithMembership(ctx, roles...) require.NoError(t, err) return userID, pat } @@ -114,40 +145,34 @@ func refreshTokenVerifier(ctx context.Context, provider rp.RelyingParty, subject func TestServer_TokenExchange(t *testing.T) { t.Parallel() - t.Cleanup(func() { - resetFeatures(t) - setImpersonationPolicy(t, false) - }) + instance := integration.NewInstance(CTX) + ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) + userResp := instance.CreateHumanUser(ctx) - client, keyData, err := Instance.CreateOIDCTokenExchangeClient(CTX) + client, keyData, err := instance.CreateOIDCTokenExchangeClient(ctx) require.NoError(t, err) signer, err := rp.SignerFromKeyFile(keyData)() require.NoError(t, err) - exchanger, err := tokenexchange.NewTokenExchangerJWTProfile(CTX, Instance.OIDCIssuer(), client.GetClientId(), signer) + exchanger, err := tokenexchange.NewTokenExchangerJWTProfile(ctx, instance.OIDCIssuer(), client.GetClientId(), signer) require.NoError(t, err) - time.Sleep(time.Second) + _, orgImpersonatorPAT := createMachineUserPATWithMembership(ctx, t, instance, "ORG_ADMIN_IMPERSONATOR") + serviceUserID, noPermPAT := createMachineUserPATWithMembership(ctx, t, instance) - iamUserID, iamImpersonatorPAT := createMachineUserPATWithMembership(t, "IAM_ADMIN_IMPERSONATOR") - orgUserID, orgImpersonatorPAT := createMachineUserPATWithMembership(t, "ORG_ADMIN_IMPERSONATOR") - serviceUserID, noPermPAT := createMachineUserPATWithMembership(t) - - // exchange some tokens for later use - setTokenExchangeFeature(t, true) - teResp, err := tokenexchange.ExchangeToken(CTX, exchanger, noPermPAT, oidc.AccessTokenType, "", "", nil, nil, nil, oidc.AccessTokenType) + // test that feature is disabled per default + teResp, err := tokenexchange.ExchangeToken(ctx, exchanger, noPermPAT, oidc.AccessTokenType, "", "", nil, nil, nil, oidc.AccessTokenType) + require.Error(t, err) + setTokenExchangeFeature(t, instance, true) + teResp, err = tokenexchange.ExchangeToken(ctx, exchanger, noPermPAT, oidc.AccessTokenType, "", "", nil, nil, nil, oidc.AccessTokenType) require.NoError(t, err) patScopes := oidc.SpaceDelimitedArray{"openid", "profile", "urn:zitadel:iam:user:metadata", "urn:zitadel:iam:user:resourceowner"} - relyingParty, err := rp.NewRelyingPartyOIDC(CTX, Instance.OIDCIssuer(), client.GetClientId(), "", "", []string{"openid"}, rp.WithJWTProfile(rp.SignerFromKeyFile(keyData))) + relyingParty, err := rp.NewRelyingPartyOIDC(ctx, instance.OIDCIssuer(), client.GetClientId(), "", "", []string{"openid"}, rp.WithJWTProfile(rp.SignerFromKeyFile(keyData))) require.NoError(t, err) - resourceServer, err := Instance.CreateResourceServerJWTProfile(CTX, keyData) + resourceServer, err := instance.CreateResourceServerJWTProfile(ctx, keyData) require.NoError(t, err) - type settings struct { - tokenExchangeFeature bool - impersonationPolicy bool - } type args struct { SubjectToken string SubjectTokenType oidc.TokenType @@ -168,30 +193,13 @@ func TestServer_TokenExchange(t *testing.T) { verifyIDToken func(t *testing.T, token string) } tests := []struct { - name string - settings settings - args args - want result - wantErr bool + name string + args args + want result + wantErr bool }{ - { - name: "feature disabled error", - settings: settings{ - tokenExchangeFeature: false, - impersonationPolicy: false, - }, - args: args{ - SubjectToken: noPermPAT, - SubjectTokenType: oidc.AccessTokenType, - }, - wantErr: true, - }, { name: "unsupported resource parameter", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -201,10 +209,6 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "invalid subject token", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: "foo", SubjectTokenType: oidc.AccessTokenType, @@ -213,10 +217,6 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "EXCHANGE: access token to default", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -226,16 +226,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, ""), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, ""), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, ""), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, ""), }, }, { name: "EXCHANGE: access token to access token", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -246,16 +242,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, ""), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, ""), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, ""), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, ""), }, }, { name: "EXCHANGE: access token to JWT", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -266,16 +258,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, ""), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, ""), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, ""), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, ""), }, }, { name: "EXCHANGE: access token to ID Token", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -286,7 +274,7 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: "N_A", expiresIn: 43100, scopes: patScopes, - verifyAccessToken: idTokenVerifier(CTX, relyingParty, serviceUserID, ""), + verifyAccessToken: idTokenVerifier(ctx, relyingParty, serviceUserID, ""), verifyIDToken: func(t *testing.T, token string) { assert.Empty(t, token) }, @@ -294,10 +282,6 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "EXCHANGE: refresh token not allowed", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: teResp.RefreshToken, SubjectTokenType: oidc.RefreshTokenType, @@ -307,10 +291,6 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "EXCHANGE: alternate scope for refresh token", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -322,17 +302,13 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, "profile"}, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, ""), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, ""), - verifyRefreshToken: refreshTokenVerifier(CTX, relyingParty, "", ""), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, ""), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, ""), + verifyRefreshToken: refreshTokenVerifier(ctx, relyingParty, "", ""), }, }, { name: "EXCHANGE: access token, requested token type not supported error", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -342,10 +318,6 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "EXCHANGE: access token, invalid audience", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ SubjectToken: noPermPAT, SubjectTokenType: oidc.AccessTokenType, @@ -356,12 +328,8 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "IMPERSONATION: subject: userID, actor: access token, policy disabled error", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: false, - }, args: args{ - SubjectToken: User.GetUserId(), + SubjectToken: userResp.GetUserId(), SubjectTokenType: oidc_api.UserIDTokenType, RequestedTokenType: oidc.AccessTokenType, ActorToken: orgImpersonatorPAT, @@ -369,14 +337,94 @@ func TestServer_TokenExchange(t *testing.T) { }, wantErr: true, }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tokenexchange.ExchangeToken(ctx, exchanger, tt.args.SubjectToken, tt.args.SubjectTokenType, tt.args.ActorToken, tt.args.ActorTokenType, tt.args.Resource, tt.args.Audience, tt.args.Scopes, tt.args.RequestedTokenType) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.want.issuedTokenType, got.IssuedTokenType) + assert.Equal(t, tt.want.tokenType, got.TokenType) + assert.Greater(t, got.ExpiresIn, tt.want.expiresIn) + assert.Equal(t, tt.want.scopes, got.Scopes) + if tt.want.verifyAccessToken != nil { + tt.want.verifyAccessToken(t, got.AccessToken) + } + if tt.want.verifyRefreshToken != nil { + tt.want.verifyRefreshToken(t, got.RefreshToken) + } + if tt.want.verifyIDToken != nil { + tt.want.verifyIDToken(t, got.IDToken) + } + }) + } +} + +func TestServer_TokenExchangeImpersonation(t *testing.T) { + t.Parallel() + + instance := integration.NewInstance(CTX) + ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) + userResp := instance.CreateHumanUser(ctx) + + // exchange some tokens for later use + setTokenExchangeFeature(t, instance, true) + setImpersonationPolicy(t, instance, true) + + client, keyData, err := instance.CreateOIDCTokenExchangeClient(ctx) + require.NoError(t, err) + signer, err := rp.SignerFromKeyFile(keyData)() + require.NoError(t, err) + exchanger, err := tokenexchange.NewTokenExchangerJWTProfile(ctx, instance.OIDCIssuer(), client.GetClientId(), signer) + require.NoError(t, err) + + iamUserID, iamImpersonatorPAT := createMachineUserPATWithMembership(ctx, t, instance, "IAM_ADMIN_IMPERSONATOR") + orgUserID, orgImpersonatorPAT := createMachineUserPATWithMembership(ctx, t, instance, "ORG_ADMIN_IMPERSONATOR") + serviceUserID, noPermPAT := createMachineUserPATWithMembership(ctx, t, instance) + + teResp, err := tokenexchange.ExchangeToken(ctx, exchanger, noPermPAT, oidc.AccessTokenType, "", "", nil, nil, nil, oidc.AccessTokenType) + require.NoError(t, err) + + patScopes := oidc.SpaceDelimitedArray{"openid", "profile", "urn:zitadel:iam:user:metadata", "urn:zitadel:iam:user:resourceowner"} + + relyingParty, err := rp.NewRelyingPartyOIDC(ctx, instance.OIDCIssuer(), client.GetClientId(), "", "", []string{"openid"}, rp.WithJWTProfile(rp.SignerFromKeyFile(keyData))) + require.NoError(t, err) + resourceServer, err := instance.CreateResourceServerJWTProfile(ctx, keyData) + require.NoError(t, err) + + type args struct { + SubjectToken string + SubjectTokenType oidc.TokenType + ActorToken string + ActorTokenType oidc.TokenType + Resource []string + Audience []string + Scopes []string + RequestedTokenType oidc.TokenType + } + type result struct { + issuedTokenType oidc.TokenType + tokenType string + expiresIn uint64 + scopes oidc.SpaceDelimitedArray + verifyAccessToken func(t *testing.T, token string) + verifyRefreshToken func(t *testing.T, token string) + verifyIDToken func(t *testing.T, token string) + } + tests := []struct { + name string + args args + want result + wantErr bool + }{ { name: "IMPERSONATION: subject: userID, actor: access token, membership not found error", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ - SubjectToken: User.GetUserId(), + SubjectToken: userResp.GetUserId(), SubjectTokenType: oidc_api.UserIDTokenType, RequestedTokenType: oidc.AccessTokenType, ActorToken: noPermPAT, @@ -386,12 +434,8 @@ func TestServer_TokenExchange(t *testing.T) { }, { name: "IAM IMPERSONATION: subject: userID, actor: access token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ - SubjectToken: User.GetUserId(), + SubjectToken: userResp.GetUserId(), SubjectTokenType: oidc_api.UserIDTokenType, RequestedTokenType: oidc.AccessTokenType, ActorToken: iamImpersonatorPAT, @@ -402,18 +446,14 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, User.GetUserId(), iamUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, User.GetUserId(), iamUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, userResp.GetUserId(), iamUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, userResp.GetUserId(), iamUserID), }, }, { name: "ORG IMPERSONATION: subject: userID, actor: access token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ - SubjectToken: User.GetUserId(), + SubjectToken: userResp.GetUserId(), SubjectTokenType: oidc_api.UserIDTokenType, RequestedTokenType: oidc.AccessTokenType, ActorToken: orgImpersonatorPAT, @@ -424,16 +464,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, User.GetUserId(), orgUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, User.GetUserId(), orgUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, userResp.GetUserId(), orgUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, userResp.GetUserId(), orgUserID), }, }, { name: "ORG IMPERSONATION: subject: access token, actor: access token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ SubjectToken: teResp.AccessToken, SubjectTokenType: oidc.AccessTokenType, @@ -446,16 +482,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, orgUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, orgUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, orgUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, orgUserID), }, }, { name: "ORG IMPERSONATION: subject: ID token, actor: access token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ SubjectToken: teResp.IDToken, SubjectTokenType: oidc.IDTokenType, @@ -468,22 +500,18 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, orgUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, orgUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, orgUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, orgUserID), }, }, { name: "ORG IMPERSONATION: subject: JWT, actor: access token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ SubjectToken: func() string { token, err := crypto.Sign(&oidc.JWTTokenRequest{ Issuer: client.GetClientId(), - Subject: User.GetUserId(), - Audience: oidc.Audience{Instance.OIDCIssuer()}, + Subject: userResp.GetUserId(), + Audience: oidc.Audience{instance.OIDCIssuer()}, ExpiresAt: oidc.FromTime(time.Now().Add(time.Hour)), IssuedAt: oidc.FromTime(time.Now().Add(-time.Second)), }, signer) @@ -500,16 +528,12 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: patScopes, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, User.GetUserId(), orgUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, User.GetUserId(), orgUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, userResp.GetUserId(), orgUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, userResp.GetUserId(), orgUserID), }, }, { name: "ORG IMPERSONATION: subject: access token, actor: access token, with refresh token, success", - settings: settings{ - tokenExchangeFeature: true, - impersonationPolicy: true, - }, args: args{ SubjectToken: teResp.AccessToken, SubjectTokenType: oidc.AccessTokenType, @@ -523,19 +547,15 @@ func TestServer_TokenExchange(t *testing.T) { tokenType: oidc.BearerToken, expiresIn: 43100, scopes: []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess}, - verifyAccessToken: accessTokenVerifier(CTX, resourceServer, serviceUserID, orgUserID), - verifyIDToken: idTokenVerifier(CTX, relyingParty, serviceUserID, orgUserID), - verifyRefreshToken: refreshTokenVerifier(CTX, relyingParty, serviceUserID, orgUserID), + verifyAccessToken: accessTokenVerifier(ctx, resourceServer, serviceUserID, orgUserID), + verifyIDToken: idTokenVerifier(ctx, relyingParty, serviceUserID, orgUserID), + verifyRefreshToken: refreshTokenVerifier(ctx, relyingParty, serviceUserID, orgUserID), }, }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - setTokenExchangeFeature(t, tt.settings.tokenExchangeFeature) - setImpersonationPolicy(t, tt.settings.impersonationPolicy) - - got, err := tokenexchange.ExchangeToken(CTX, exchanger, tt.args.SubjectToken, tt.args.SubjectTokenType, tt.args.ActorToken, tt.args.ActorTokenType, tt.args.Resource, tt.args.Audience, tt.args.Scopes, tt.args.RequestedTokenType) + got, err := tokenexchange.ExchangeToken(ctx, exchanger, tt.args.SubjectToken, tt.args.SubjectTokenType, tt.args.ActorToken, tt.args.ActorTokenType, tt.args.Resource, tt.args.Audience, tt.args.Scopes, tt.args.RequestedTokenType) if tt.wantErr { assert.Error(t, err) return @@ -561,32 +581,33 @@ func TestServer_TokenExchange(t *testing.T) { // This test tries to call the zitadel API with an impersonated token, // which should fail. func TestImpersonation_API_Call(t *testing.T) { - client, keyData, err := Instance.CreateOIDCTokenExchangeClient(CTX) + t.Parallel() + + instance := integration.NewInstance(CTX) + ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) + + client, keyData, err := instance.CreateOIDCTokenExchangeClient(ctx) require.NoError(t, err) signer, err := rp.SignerFromKeyFile(keyData)() require.NoError(t, err) - exchanger, err := tokenexchange.NewTokenExchangerJWTProfile(CTX, Instance.OIDCIssuer(), client.GetClientId(), signer) + exchanger, err := tokenexchange.NewTokenExchangerJWTProfile(ctx, instance.OIDCIssuer(), client.GetClientId(), signer) require.NoError(t, err) - resourceServer, err := Instance.CreateResourceServerJWTProfile(CTX, keyData) + resourceServer, err := instance.CreateResourceServerJWTProfile(ctx, keyData) require.NoError(t, err) - setTokenExchangeFeature(t, true) - setImpersonationPolicy(t, true) - t.Cleanup(func() { - resetFeatures(t) - setImpersonationPolicy(t, false) - }) + setTokenExchangeFeature(t, instance, true) + setImpersonationPolicy(t, instance, true) - iamUserID, iamImpersonatorPAT := createMachineUserPATWithMembership(t, "IAM_ADMIN_IMPERSONATOR") - iamOwner := Instance.Users.Get(integration.UserTypeIAMOwner) + iamUserID, iamImpersonatorPAT := createMachineUserPATWithMembership(ctx, t, instance, "IAM_ADMIN_IMPERSONATOR") + iamOwner := instance.Users.Get(integration.UserTypeIAMOwner) // impersonating the IAM owner! - resp, err := tokenexchange.ExchangeToken(CTX, exchanger, iamOwner.Token, oidc.AccessTokenType, iamImpersonatorPAT, oidc.AccessTokenType, nil, nil, nil, oidc.AccessTokenType) + resp, err := tokenexchange.ExchangeToken(ctx, exchanger, iamOwner.Token, oidc.AccessTokenType, iamImpersonatorPAT, oidc.AccessTokenType, nil, nil, nil, oidc.AccessTokenType) require.NoError(t, err) - accessTokenVerifier(CTX, resourceServer, iamOwner.ID, iamUserID) + accessTokenVerifier(ctx, resourceServer, iamOwner.ID, iamUserID) - impersonatedCTX := integration.WithAuthorizationToken(CTX, resp.AccessToken) - _, err = Instance.Client.Admin.GetAllowedLanguages(impersonatedCTX, &admin.GetAllowedLanguagesRequest{}) + impersonatedCTX := integration.WithAuthorizationToken(ctx, resp.AccessToken) + _, err = instance.Client.Admin.GetAllowedLanguages(impersonatedCTX, &admin.GetAllowedLanguagesRequest{}) status := status.Convert(err) assert.Equal(t, codes.PermissionDenied, status.Code()) assert.Equal(t, "Errors.TokenExchange.Token.NotForAPI (APP-Shi0J)", status.Message()) diff --git a/internal/api/oidc/integration_test/userinfo_test.go b/internal/api/oidc/integration_test/userinfo_test.go index c2ad3be3ec..da1dc6b1e3 100644 --- a/internal/api/oidc/integration_test/userinfo_test.go +++ b/internal/api/oidc/integration_test/userinfo_test.go @@ -6,8 +6,8 @@ import ( "fmt" "strings" "testing" - "time" + "github.com/brianvoe/gofakeit/v6" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/zitadel/oidc/v3/pkg/client/rp" @@ -135,14 +135,14 @@ func testServer_UserInfo(t *testing.T) { prepare: func(t *testing.T, clientID string, scope []string) *oidc.Tokens[*oidc.IDTokenClaims] { _, err := Instance.Client.Mgmt.UpdateProject(CTX, &management.UpdateProjectRequest{ Id: projectID, - Name: fmt.Sprintf("project-%d", time.Now().UnixNano()), + Name: fmt.Sprintf("project-%s", gofakeit.AppName()), ProjectRoleAssertion: true, }) require.NoError(t, err) t.Cleanup(func() { _, err := Instance.Client.Mgmt.UpdateProject(CTX, &management.UpdateProjectRequest{ Id: projectID, - Name: fmt.Sprintf("project-%d", time.Now().UnixNano()), + Name: fmt.Sprintf("project-%s", gofakeit.AppName()), ProjectRoleAssertion: false, }) require.NoError(t, err) @@ -245,7 +245,7 @@ func TestServer_UserInfo_OrgIDRoles(t *testing.T) { _, err := Instance.Client.Mgmt.UpdateProject(CTX, &management.UpdateProjectRequest{ Id: projectID, - Name: fmt.Sprintf("project-%d", time.Now().UnixNano()), + Name: fmt.Sprintf("project-%s", gofakeit.AppName()), ProjectRoleAssertion: true, }) require.NoError(t, err) @@ -356,7 +356,7 @@ func addProjectRolesGrants(t *testing.T, userID, projectID string, roles ...stri // addProjectOrgGrant adds a new organization which will be granted on the projectID with the specified roles. // The userID will be granted in the new organization to the project with the same roles. func addProjectOrgGrant(t *testing.T, userID, projectID string, roles ...string) (grantedOrgID string) { - grantedOrg := Instance.CreateOrganization(CTXIAM, fmt.Sprintf("ZITADEL_GRANTED_%d", time.Now().UnixNano()), fmt.Sprintf("%d@mouse.com", time.Now().UnixNano())) + grantedOrg := Instance.CreateOrganization(CTXIAM, fmt.Sprintf("ZITADEL_GRANTED_%s", gofakeit.AppName()), gofakeit.Email()) projectGrant, err := Instance.Client.Mgmt.AddProjectGrant(CTX, &management.AddProjectGrantRequest{ ProjectId: projectID, GrantedOrgId: grantedOrg.GetOrganizationId(), diff --git a/internal/integration/context.go b/internal/integration/context.go new file mode 100644 index 0000000000..8ba4cbe204 --- /dev/null +++ b/internal/integration/context.go @@ -0,0 +1,30 @@ +package integration + +import ( + "context" + "time" +) + +// WaitForAndTickWithMaxDuration determine a duration and interval for EventuallyWithT-tests from context timeout and desired max duration +func WaitForAndTickWithMaxDuration(ctx context.Context, max time.Duration) (time.Duration, time.Duration) { + // interval which is used to retry the test + tick := time.Millisecond * 100 + // tolerance which is used to stop the test for the timeout + tolerance := tick * 5 + // default of the WaitFor is always a defined duration, shortened if the context would time out before + waitFor := max + + if ctxDeadline, ok := ctx.Deadline(); ok { + // if the context has a deadline, set the WaitFor to the shorter duration + if until := time.Until(ctxDeadline); until < waitFor { + // ignore durations which are smaller than the tolerance + if until < tolerance { + waitFor = 0 + } else { + // always let the test stop with tolerance before the context is in timeout + waitFor = until - tolerance + } + } + } + return waitFor, tick +} diff --git a/internal/notification/handlers/integration_test/telemetry_pusher_test.go b/internal/notification/handlers/integration_test/telemetry_pusher_test.go index c12ab64f35..fdff1180a8 100644 --- a/internal/notification/handlers/integration_test/telemetry_pusher_test.go +++ b/internal/notification/handlers/integration_test/telemetry_pusher_test.go @@ -25,6 +25,8 @@ import ( ) func TestServer_TelemetryPushMilestones(t *testing.T) { + t.Parallel() + sub := sink.Subscribe(CTX, sink.ChannelMilestone) defer sub.Close() From 0de2f92d2e019427dff5829202a0d8f11bc5c822 Mon Sep 17 00:00:00 2001 From: Fabi Date: Fri, 18 Oct 2024 10:12:23 +0200 Subject: [PATCH 07/30] docs: fix wrong examples and links (#8780) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Which Problems Are Solved - The addorgmember request shows a wrong example in the api documentation - Broken Links on actions feature description # How the Problems Are Solved - Change example of AddOrgMember API Docs - Point towards correct links --------- Co-authored-by: Tim Möhlmann --- docs/docs/concepts/features/actions_v2.md | 4 ++-- proto/zitadel/management.proto | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/concepts/features/actions_v2.md b/docs/docs/concepts/features/actions_v2.md index 3c78229045..a06384639d 100644 --- a/docs/docs/concepts/features/actions_v2.md +++ b/docs/docs/concepts/features/actions_v2.md @@ -36,5 +36,5 @@ Possible conditions for the Execution: ## Further reading -- [Actions v2 example execution locally](/apis/actionsv2/execution-local) -- [Actions v2 reference](/apis/actionsv2/introduction#action) \ No newline at end of file +- [Actions v2 reference](/apis/actions/v3/usage) +- [Actions v2 example execution locally](/apis/actions/v3/testing-locally) \ No newline at end of file diff --git a/proto/zitadel/management.proto b/proto/zitadel/management.proto index 0eeaa29b0b..101741b2f5 100644 --- a/proto/zitadel/management.proto +++ b/proto/zitadel/management.proto @@ -9208,7 +9208,7 @@ message AddOrgMemberRequest { string user_id = 1 [(validate.rules).string = {min_len: 1, max_len: 200}]; repeated string roles = 2 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { - example: "[\"IAM_OWNER\"]"; + example: "[\"ORG_OWNER\"]"; description: "If no roles are provided the user won't have any rights" } ]; From 11782cf422acfca0479c88b5c3cb614855ff7508 Mon Sep 17 00:00:00 2001 From: Laust Rud Jacobsen Date: Mon, 21 Oct 2024 12:34:09 +0200 Subject: [PATCH 08/30] docs: typo repair (#8796) # Which Problems Are Solved Saw a typo, fixed it. --- docs/docs/guides/integrate/zitadel-apis/event-api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/guides/integrate/zitadel-apis/event-api.md b/docs/docs/guides/integrate/zitadel-apis/event-api.md index 0aacafed95..9051cb227a 100644 --- a/docs/docs/guides/integrate/zitadel-apis/event-api.md +++ b/docs/docs/guides/integrate/zitadel-apis/event-api.md @@ -12,7 +12,7 @@ You need to give a user the [manager role](/docs/guides/manage/console/managers) If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview). ## Request Events -Call the [ListEvents](/apis/resources/admin) enpoint in the Administration API to get all the events you need. +Call the [ListEvents](/apis/resources/admin) endpoint in the Administration API to get all the events you need. To further restrict your result you can add the following filters: - sequence - editor user id From fca6b28a97d7cd5cc7cd0de9d5d2e6ff34c92a07 Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Mon, 21 Oct 2024 21:15:02 +0200 Subject: [PATCH 09/30] chore: correct require usage to assert for eventual consistency (#8795) # Which Problems Are Solved Eventual consistency is handled wrongly in the newly improved integration tests. # How the Problems Are Solved Correct the usage of the require package with the assert package where necessary, to remove the panics where the EventuallyWithT functions can rerun. # Additional Changes Modify the timeout values for some EventuallyWithT which can vary when a instance is freshly setup. # Additional Context None --- .../admin/integration_test/iam_member_test.go | 11 ++-- .../integration_test/iam_settings_test.go | 2 +- .../org/v2/integration_test/query_test.go | 20 ++++---- .../integration_test/execution_target_test.go | 8 ++- .../v3alpha/integration_test/query_test.go | 37 +++++++------- .../v3alpha/integration_test/server_test.go | 6 +-- .../v3alpha/integration_test/query_test.go | 23 +++++---- .../v2/integration_test/settings_test.go | 8 +-- .../v2beta/integration_test/settings_test.go | 6 ++- .../user/v2/integration_test/idp_link_test.go | 7 +-- .../user/v2/integration_test/passkey_test.go | 7 +-- .../user/v2/integration_test/query_test.go | 42 +++++++-------- .../v2beta/integration_test/query_test.go | 51 ++++++++++--------- 13 files changed, 121 insertions(+), 107 deletions(-) diff --git a/internal/api/grpc/admin/integration_test/iam_member_test.go b/internal/api/grpc/admin/integration_test/iam_member_test.go index 1b6440923e..ff8d2715d7 100644 --- a/internal/api/grpc/admin/integration_test/iam_member_test.go +++ b/internal/api/grpc/admin/integration_test/iam_member_test.go @@ -92,7 +92,7 @@ func TestServer_ListIAMMembers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, 20*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) assert.EventuallyWithT(t, func(ct *assert.CollectT) { got, err := Client.ListIAMMembers(tt.args.ctx, tt.args.req) if tt.wantErr { @@ -103,10 +103,11 @@ func TestServer_ListIAMMembers(t *testing.T) { wantResult := tt.want.GetResult() gotResult := got.GetResult() - require.Len(ct, gotResult, len(wantResult)) - for i, want := range wantResult { - assert.Equal(ct, want.GetUserId(), gotResult[i].GetUserId()) - assert.ElementsMatch(ct, want.GetRoles(), gotResult[i].GetRoles()) + if assert.Len(ct, gotResult, len(wantResult)) { + for i, want := range wantResult { + assert.Equal(ct, want.GetUserId(), gotResult[i].GetUserId()) + assert.ElementsMatch(ct, want.GetRoles(), gotResult[i].GetRoles()) + } } }, retryDuration, tick) }) diff --git a/internal/api/grpc/admin/integration_test/iam_settings_test.go b/internal/api/grpc/admin/integration_test/iam_settings_test.go index 93da4aed8a..9eca09c06c 100644 --- a/internal/api/grpc/admin/integration_test/iam_settings_test.go +++ b/internal/api/grpc/admin/integration_test/iam_settings_test.go @@ -54,7 +54,7 @@ func TestServer_GetSecurityPolicy(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { resp, err := instance.Client.Admin.GetSecurityPolicy(tt.ctx, &admin_pb.GetSecurityPolicyRequest{}) if tt.wantErr { diff --git a/internal/api/grpc/org/v2/integration_test/query_test.go b/internal/api/grpc/org/v2/integration_test/query_test.go index e52ea40018..e476b4e60d 100644 --- a/internal/api/grpc/org/v2/integration_test/query_test.go +++ b/internal/api/grpc/org/v2/integration_test/query_test.go @@ -411,17 +411,17 @@ func TestServer_ListOrganizations(t *testing.T) { // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions tt.want.Details.TotalResult = got.Details.TotalResult // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + // domain from result, as it is generated though the create + tt.want.Result[i].PrimaryDomain = got.Result[i].PrimaryDomain + // sequence from result, as it can be with different sequence from create + tt.want.Result[i].Details.Sequence = got.Result[i].Details.Sequence + } - for i := range tt.want.Result { - // domain from result, as it is generated though the create - tt.want.Result[i].PrimaryDomain = got.Result[i].PrimaryDomain - // sequence from result, as it can be with different sequence from create - tt.want.Result[i].Details.Sequence = got.Result[i].Details.Sequence - } - - for i := range tt.want.Result { - assert.Contains(ttt, got.Result, tt.want.Result[i]) + for i := range tt.want.Result { + assert.Contains(ttt, got.Result, tt.want.Result[i]) + } } integration.AssertListDetails(t, tt.want, got) }, retryDuration, tick, "timeout waiting for expected user result") diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go index 042b7a416e..286048ab51 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go @@ -255,7 +255,7 @@ func TestServer_ExecutionTarget(t *testing.T) { require.NoError(t, err) defer close() } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.GetTarget(tt.ctx, tt.req) if tt.wantErr { @@ -278,7 +278,7 @@ func TestServer_ExecutionTarget(t *testing.T) { } func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition) { - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{ Filters: []*action.ExecutionSearchFilter{ @@ -290,9 +290,7 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in if !assert.NoError(ttt, err) { return } - if assert.Len(ttt, got.GetResult(), 1) { - return - } + assert.Len(ttt, got.GetResult(), 1) }, retryDuration, tick, "timeout waiting for expected execution result") return } diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go index 3756a144d8..c29900e966 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go @@ -216,14 +216,14 @@ func TestServer_GetTarget(t *testing.T) { err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want) require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req) if tt.wantErr { - require.Error(ttt, err, "Error: "+err.Error()) + assert.Error(ttt, err, "Error: "+err.Error()) return } - require.NoError(ttt, err) + assert.NoError(ttt, err) wantTarget := tt.want.GetTarget() gotTarget := got.GetTarget() @@ -478,7 +478,7 @@ func TestServer_ListTargets(t *testing.T) { require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, listErr := instance.Client.ActionV3Alpha.SearchTargets(tt.args.ctx, tt.args.req) if tt.wantErr { @@ -488,11 +488,11 @@ func TestServer_ListTargets(t *testing.T) { require.NoError(ttt, listErr) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) - - for i := range tt.want.Result { - integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[i].GetDetails()) - assert.EqualExportedValues(ttt, tt.want.Result[i].GetConfig(), got.Result[i].GetConfig()) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[i].GetDetails()) + assert.EqualExportedValues(ttt, tt.want.Result[i].GetConfig(), got.Result[i].GetConfig()) + } } integration.AssertResourceListDetails(ttt, tt.want, got) }, retryDuration, tick, "timeout waiting for expected execution result") @@ -863,7 +863,7 @@ func TestServer_SearchExecutions(t *testing.T) { require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, listErr := instance.Client.ActionV3Alpha.SearchExecutions(tt.args.ctx, tt.args.req) if tt.wantErr { @@ -872,14 +872,15 @@ func TestServer_SearchExecutions(t *testing.T) { } require.NoError(ttt, listErr) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) - for i := range tt.want.Result { - // as not sorted, all elements have to be checked - // workaround as oneof elements can only be checked with assert.EqualExportedValues() - if j, found := containExecution(got.Result, tt.want.Result[i]); found { - integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[j].GetDetails()) - got.Result[j].Details = tt.want.Result[i].GetDetails() - assert.EqualExportedValues(ttt, tt.want.Result[i], got.Result[j]) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + // as not sorted, all elements have to be checked + // workaround as oneof elements can only be checked with assert.EqualExportedValues() + if j, found := containExecution(got.Result, tt.want.Result[i]); found { + integration.AssertResourceDetails(ttt, tt.want.Result[i].GetDetails(), got.Result[j].GetDetails()) + got.Result[j].Details = tt.want.Result[i].GetDetails() + assert.EqualExportedValues(ttt, tt.want.Result[i], got.Result[j]) + } } } integration.AssertResourceListDetails(ttt, tt.want, got) diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go index 3c1c32062d..bc8e43eafc 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/server_test.go @@ -44,7 +44,7 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { }) require.NoError(t, err) - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { f, err := instance.Client.FeatureV2.GetInstanceFeatures(ctx, &feature.GetInstanceFeaturesRequest{ @@ -54,10 +54,10 @@ func ensureFeatureEnabled(t *testing.T, instance *integration.Instance) { assert.True(ttt, f.Actions.GetEnabled()) }, retryDuration, - time.Second, + tick, "timed out waiting for ensuring instance feature") - retryDuration, tick = integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + retryDuration, tick = integration.WaitForAndTickWithMaxDuration(ctx, 5*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { _, err := instance.Client.ActionV3Alpha.ListExecutionMethods(ctx, &action.ListExecutionMethodsRequest{}) diff --git a/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go index 436af3bc6f..ef7fe02807 100644 --- a/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/userschema/v3alpha/integration_test/query_test.go @@ -188,7 +188,7 @@ func TestServer_ListUserSchemas(t *testing.T) { require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 20*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.UserSchemaV3.SearchUserSchemas(tt.args.ctx, tt.args.req) if tt.wantErr { @@ -197,14 +197,15 @@ func TestServer_ListUserSchemas(t *testing.T) { } require.NoError(ttt, err) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) - for i := range tt.want.Result { - wantSchema := tt.want.Result[i] - gotSchema := got.Result[i] + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + wantSchema := tt.want.Result[i] + gotSchema := got.Result[i] - integration.AssertResourceDetails(ttt, wantSchema.GetDetails(), gotSchema.GetDetails()) - wantSchema.Details = gotSchema.GetDetails() - grpc.AllFieldsEqual(ttt, wantSchema.ProtoReflect(), gotSchema.ProtoReflect(), grpc.CustomMappers) + integration.AssertResourceDetails(ttt, wantSchema.GetDetails(), gotSchema.GetDetails()) + wantSchema.Details = gotSchema.GetDetails() + grpc.AllFieldsEqual(ttt, wantSchema.ProtoReflect(), gotSchema.ProtoReflect(), grpc.CustomMappers) + } } integration.AssertListDetails(ttt, tt.want, got) }, retryDuration, tick, "timeout waiting for expected user schema result") @@ -295,14 +296,14 @@ func TestServer_GetUserSchema(t *testing.T) { require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 5*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.UserSchemaV3.GetUserSchema(tt.args.ctx, tt.args.req) if tt.wantErr { - require.Error(ttt, err, "Error: "+err.Error()) + assert.Error(ttt, err, "Error: "+err.Error()) return } - require.NoError(ttt, err) + assert.NoError(ttt, err) wantSchema := tt.want.GetUserSchema() gotSchema := got.GetUserSchema() diff --git a/internal/api/grpc/settings/v2/integration_test/settings_test.go b/internal/api/grpc/settings/v2/integration_test/settings_test.go index 8ae576d104..16942137c9 100644 --- a/internal/api/grpc/settings/v2/integration_test/settings_test.go +++ b/internal/api/grpc/settings/v2/integration_test/settings_test.go @@ -53,14 +53,16 @@ func TestServer_GetSecuritySettings(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, 20*time.Second) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.ctx, time.Minute) assert.EventuallyWithT(t, func(ct *assert.CollectT) { resp, err := Client.GetSecuritySettings(tt.ctx, &settings.GetSecuritySettingsRequest{}) if tt.wantErr { - require.Error(ct, err) + assert.Error(ct, err) + return + } + if !assert.NoError(ct, err) { return } - require.NoError(ct, err) got, want := resp.GetSettings(), tt.want.GetSettings() assert.Equal(ct, want.GetEmbeddedIframe().GetEnabled(), got.GetEmbeddedIframe().GetEnabled(), "enable iframe embedding") assert.Equal(ct, want.GetEmbeddedIframe().GetAllowedOrigins(), got.GetEmbeddedIframe().GetAllowedOrigins(), "allowed origins") diff --git a/internal/api/grpc/settings/v2beta/integration_test/settings_test.go b/internal/api/grpc/settings/v2beta/integration_test/settings_test.go index 9f6968f5e0..d5c1914ba9 100644 --- a/internal/api/grpc/settings/v2beta/integration_test/settings_test.go +++ b/internal/api/grpc/settings/v2beta/integration_test/settings_test.go @@ -57,10 +57,12 @@ func TestServer_GetSecuritySettings(t *testing.T) { assert.EventuallyWithT(t, func(ct *assert.CollectT) { resp, err := Client.GetSecuritySettings(tt.ctx, &settings.GetSecuritySettingsRequest{}) if tt.wantErr { - require.Error(ct, err) + assert.Error(ct, err) + return + } + if !assert.NoError(ct, err) { return } - require.NoError(ct, err) got, want := resp.GetSettings(), tt.want.GetSettings() assert.Equal(ct, want.GetEmbeddedIframe().GetEnabled(), got.GetEmbeddedIframe().GetEnabled(), "enable iframe embedding") assert.Equal(ct, want.GetEmbeddedIframe().GetAllowedOrigins(), got.GetEmbeddedIframe().GetAllowedOrigins(), "allowed origins") diff --git a/internal/api/grpc/user/v2/integration_test/idp_link_test.go b/internal/api/grpc/user/v2/integration_test/idp_link_test.go index e9022f31f8..ab398c7233 100644 --- a/internal/api/grpc/user/v2/integration_test/idp_link_test.go +++ b/internal/api/grpc/user/v2/integration_test/idp_link_test.go @@ -245,9 +245,10 @@ func TestServer_ListIDPLinks(t *testing.T) { } require.NoError(ttt, err) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) - for i := range tt.want.Result { - assert.Contains(ttt, got.Result, tt.want.Result[i]) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + assert.Contains(ttt, got.Result, tt.want.Result[i]) + } } integration.AssertListDetails(t, tt.want, got) }, retryDuration, tick, "timeout waiting for expected idplinks result") diff --git a/internal/api/grpc/user/v2/integration_test/passkey_test.go b/internal/api/grpc/user/v2/integration_test/passkey_test.go index 585e5ba413..9d9cb8a047 100644 --- a/internal/api/grpc/user/v2/integration_test/passkey_test.go +++ b/internal/api/grpc/user/v2/integration_test/passkey_test.go @@ -593,9 +593,10 @@ func TestServer_ListPasskeys(t *testing.T) { } require.NoError(ttt, err) // always first check length, otherwise its failed anyway - assert.Len(ttt, got.Result, len(tt.want.Result)) - for i := range tt.want.Result { - assert.Contains(ttt, got.Result, tt.want.Result[i]) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + for i := range tt.want.Result { + assert.Contains(ttt, got.Result, tt.want.Result[i]) + } } integration.AssertListDetails(ttt, tt.want, got) }, retryDuration, tick, "timeout waiting for expected idplinks result") diff --git a/internal/api/grpc/user/v2/integration_test/query_test.go b/internal/api/grpc/user/v2/integration_test/query_test.go index fc2104d62e..4ee085336c 100644 --- a/internal/api/grpc/user/v2/integration_test/query_test.go +++ b/internal/api/grpc/user/v2/integration_test/query_test.go @@ -162,10 +162,12 @@ func TestServer_GetUserByID(t *testing.T) { require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { - require.Error(ttt, err) + assert.Error(ttt, err) + return + } + if !assert.NoError(ttt, err) { return } - require.NoError(ttt, err) tt.want.User.Details = userAttr.Details tt.want.User.UserId = userAttr.UserID @@ -912,27 +914,27 @@ func TestServer_ListUsers(t *testing.T) { // always only give back dependency infos which are required for the response require.Len(ttt, tt.want.Result, len(infos)) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions + tt.want.Details.TotalResult = got.Details.TotalResult - // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions - tt.want.Details.TotalResult = got.Details.TotalResult - - // fill in userid and username as it is generated - for i := range infos { - tt.want.Result[i].UserId = infos[i].UserID - tt.want.Result[i].Username = infos[i].Username - tt.want.Result[i].PreferredLoginName = infos[i].Username - tt.want.Result[i].LoginNames = []string{infos[i].Username} - if human := tt.want.Result[i].GetHuman(); human != nil { - human.Email.Email = infos[i].Username - if tt.want.Result[i].GetHuman().GetPasswordChanged() != nil { - human.PasswordChanged = infos[i].Changed + // fill in userid and username as it is generated + for i := range infos { + tt.want.Result[i].UserId = infos[i].UserID + tt.want.Result[i].Username = infos[i].Username + tt.want.Result[i].PreferredLoginName = infos[i].Username + tt.want.Result[i].LoginNames = []string{infos[i].Username} + if human := tt.want.Result[i].GetHuman(); human != nil { + human.Email.Email = infos[i].Username + if tt.want.Result[i].GetHuman().GetPasswordChanged() != nil { + human.PasswordChanged = infos[i].Changed + } } + tt.want.Result[i].Details = infos[i].Details + } + for i := range tt.want.Result { + assert.Contains(ttt, got.Result, tt.want.Result[i]) } - tt.want.Result[i].Details = infos[i].Details - } - for i := range tt.want.Result { - assert.Contains(ttt, got.Result, tt.want.Result[i]) } integration.AssertListDetails(ttt, tt.want, got) }, retryDuration, tick, "timeout waiting for expected user result") diff --git a/internal/api/grpc/user/v2beta/integration_test/query_test.go b/internal/api/grpc/user/v2beta/integration_test/query_test.go index 4ea911727c..654a84a5d4 100644 --- a/internal/api/grpc/user/v2beta/integration_test/query_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/query_test.go @@ -171,10 +171,12 @@ func TestServer_GetUserByID(t *testing.T) { require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { - require.Error(t, err) + assert.Error(ttt, err) + return + } + if !assert.NoError(ttt, err) { return } - require.NoError(t, err) tt.want.User.Details = detailsV2ToV2beta(userAttr.Details) tt.want.User.UserId = userAttr.UserID @@ -188,7 +190,7 @@ func TestServer_GetUserByID(t *testing.T) { } } assert.Equal(ttt, tt.want.User, got.User) - integration.AssertDetails(t, tt.want, got) + integration.AssertDetails(ttt, tt.want, got) }, retryDuration, tick) }) } @@ -314,10 +316,13 @@ func TestServer_GetUserByID_Permission(t *testing.T) { require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) if tt.wantErr { - require.Error(ttt, err) + assert.Error(ttt, err) return } - require.NoError(ttt, err) + if !assert.NoError(ttt, err) { + return + } + tt.want.User.UserId = tt.args.req.GetUserId() tt.want.User.Username = newOrgOwnerEmail tt.want.User.PreferredLoginName = newOrgOwnerEmail @@ -923,27 +928,27 @@ func TestServer_ListUsers(t *testing.T) { // always only give back dependency infos which are required for the response require.Len(ttt, tt.want.Result, len(infos)) // always first check length, otherwise its failed anyway - require.Len(ttt, got.Result, len(tt.want.Result)) - // fill in userid and username as it is generated + if assert.Len(ttt, got.Result, len(tt.want.Result)) { + // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions + tt.want.Details.TotalResult = got.Details.TotalResult - // totalResult is unrelated to the tests here so gets carried over, can vary from the count of results due to permissions - tt.want.Details.TotalResult = got.Details.TotalResult - - for i := range infos { - tt.want.Result[i].UserId = infos[i].UserID - tt.want.Result[i].Username = infos[i].Username - tt.want.Result[i].PreferredLoginName = infos[i].Username - tt.want.Result[i].LoginNames = []string{infos[i].Username} - if human := tt.want.Result[i].GetHuman(); human != nil { - human.Email.Email = infos[i].Username - if tt.want.Result[i].GetHuman().GetPasswordChanged() != nil { - human.PasswordChanged = infos[i].Changed + // fill in userid and username as it is generated + for i := range infos { + tt.want.Result[i].UserId = infos[i].UserID + tt.want.Result[i].Username = infos[i].Username + tt.want.Result[i].PreferredLoginName = infos[i].Username + tt.want.Result[i].LoginNames = []string{infos[i].Username} + if human := tt.want.Result[i].GetHuman(); human != nil { + human.Email.Email = infos[i].Username + if tt.want.Result[i].GetHuman().GetPasswordChanged() != nil { + human.PasswordChanged = infos[i].Changed + } } + tt.want.Result[i].Details = detailsV2ToV2beta(infos[i].Details) + } + for i := range tt.want.Result { + assert.Contains(ttt, got.Result, tt.want.Result[i]) } - tt.want.Result[i].Details = detailsV2ToV2beta(infos[i].Details) - } - for i := range tt.want.Result { - assert.Contains(ttt, got.Result, tt.want.Result[i]) } integration.AssertListDetails(ttt, tt.want, got) }, retryDuration, tick, "timeout waiting for expected user result") From 79fb4cc1cc6ebba91f9af917807e0e3651516acd Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Tue, 22 Oct 2024 16:16:44 +0200 Subject: [PATCH 10/30] fix: correctly check denied domains and ips for actions (#8810) # Which Problems Are Solved System administrators can block hosts and IPs for HTTP calls in actions. Using DNS, blocked IPs could be bypassed. # How the Problems Are Solved - Hosts are resolved (DNS lookup) to check whether their corresponding IP is blocked. # Additional Changes - Added complete lookup ip address range and "unspecified" address to the default `DenyList` --- cmd/defaults.yaml | 5 ++- cmd/start/config_test.go | 12 ++--- internal/actions/http_module.go | 28 +++++++++--- internal/actions/http_module_config.go | 62 +++++++++++--------------- internal/actions/http_module_test.go | 48 ++++++++++++++++---- 5 files changed, 95 insertions(+), 60 deletions(-) diff --git a/cmd/defaults.yaml b/cmd/defaults.yaml index 90c2db1f01..a12fe474ba 100644 --- a/cmd/defaults.yaml +++ b/cmd/defaults.yaml @@ -600,7 +600,10 @@ Actions: # Wildcard sub domains are currently unsupported DenyList: # ZITADEL_ACTIONS_HTTP_DENYLIST (comma separated list) - localhost - - "127.0.0.1" + - "127.0.0.0/8" + - "::1" + - "0.0.0.0" + - "::" LogStore: Access: diff --git a/cmd/start/config_test.go b/cmd/start/config_test.go index 90d4b9d2dc..53c95d35ab 100644 --- a/cmd/start/config_test.go +++ b/cmd/start/config_test.go @@ -47,9 +47,9 @@ Log: `}, want: func(t *testing.T, config *Config) { assert.Equal(t, config.Actions.HTTP.DenyList, []actions.AddressChecker{ - &actions.DomainChecker{Domain: "localhost"}, - &actions.IPChecker{IP: net.ParseIP("127.0.0.1")}, - &actions.DomainChecker{Domain: "foobar"}}) + &actions.HostChecker{Domain: "localhost"}, + &actions.HostChecker{IP: net.ParseIP("127.0.0.1")}, + &actions.HostChecker{Domain: "foobar"}}) }, }, { name: "actions deny list string ok", @@ -63,9 +63,9 @@ Log: `}, want: func(t *testing.T, config *Config) { assert.Equal(t, config.Actions.HTTP.DenyList, []actions.AddressChecker{ - &actions.DomainChecker{Domain: "localhost"}, - &actions.IPChecker{IP: net.ParseIP("127.0.0.1")}, - &actions.DomainChecker{Domain: "foobar"}}) + &actions.HostChecker{Domain: "localhost"}, + &actions.HostChecker{IP: net.ParseIP("127.0.0.1")}, + &actions.HostChecker{Domain: "foobar"}}) }, }, { name: "features ok", diff --git a/internal/actions/http_module.go b/internal/actions/http_module.go index 33cfbc91bc..2f9d09932c 100644 --- a/internal/actions/http_module.go +++ b/internal/actions/http_module.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "io" + "net" "net/http" "net/url" "strings" @@ -19,7 +20,7 @@ import ( func WithHTTP(ctx context.Context) Option { return func(c *runConfig) { c.modules["zitadel/http"] = func(runtime *goja.Runtime, module *goja.Object) { - requireHTTP(ctx, &http.Client{Transport: new(transport)}, runtime, module) + requireHTTP(ctx, &http.Client{Transport: &transport{lookup: net.LookupIP}}, runtime, module) } } } @@ -170,21 +171,34 @@ func parseHeaders(headers *goja.Object) http.Header { return h } -type transport struct{} +type transport struct { + lookup func(string) ([]net.IP, error) +} -func (*transport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { if httpConfig == nil { return http.DefaultTransport.RoundTrip(req) } - if isHostBlocked(httpConfig.DenyList, req.URL) { + if t.isHostBlocked(httpConfig.DenyList, req.URL) { return nil, zerrors.ThrowInvalidArgument(nil, "ACTIO-N72d0", "host is denied") } return http.DefaultTransport.RoundTrip(req) } -func isHostBlocked(denyList []AddressChecker, address *url.URL) bool { +func (t *transport) isHostBlocked(denyList []AddressChecker, address *url.URL) bool { + host := address.Hostname() + ip := net.ParseIP(host) + ips := []net.IP{ip} + // if the hostname is a domain, we need to check resolve the ip(s), since it might be denied + if ip == nil { + var err error + ips, err = t.lookup(host) + if err != nil { + return true + } + } for _, blocked := range denyList { - if blocked.Matches(address.Hostname()) { + if blocked.Matches(ips, host) { return true } } @@ -192,5 +206,5 @@ func isHostBlocked(denyList []AddressChecker, address *url.URL) bool { } type AddressChecker interface { - Matches(string) bool + Matches([]net.IP, string) bool } diff --git a/internal/actions/http_module_config.go b/internal/actions/http_module_config.go index d10ad39676..d1b965814e 100644 --- a/internal/actions/http_module_config.go +++ b/internal/actions/http_module_config.go @@ -6,8 +6,6 @@ import ( "strings" "github.com/mitchellh/mapstructure" - - "github.com/zitadel/zitadel/internal/zerrors" ) func SetHTTPConfig(config *HTTPConfig) { @@ -48,7 +46,7 @@ func HTTPConfigDecodeHook(from, to reflect.Value) (interface{}, error) { for _, unsplit := range config.DenyList { for _, split := range strings.Split(unsplit, ",") { - parsed, parseErr := parseDenyListEntry(split) + parsed, parseErr := NewHostChecker(split) if parseErr != nil { return nil, parseErr } @@ -61,46 +59,36 @@ func HTTPConfigDecodeHook(from, to reflect.Value) (interface{}, error) { return c, nil } -func parseDenyListEntry(entry string) (AddressChecker, error) { - if checker, err := NewIPChecker(entry); err == nil { - return checker, nil - } - return &DomainChecker{Domain: entry}, nil -} - -func NewIPChecker(i string) (AddressChecker, error) { - _, network, err := net.ParseCIDR(i) +func NewHostChecker(entry string) (AddressChecker, error) { + _, network, err := net.ParseCIDR(entry) if err == nil { - return &IPChecker{Net: network}, nil + return &HostChecker{Net: network}, nil } - if ip := net.ParseIP(i); ip != nil { - return &IPChecker{IP: ip}, nil + if ip := net.ParseIP(entry); ip != nil { + return &HostChecker{IP: ip}, nil } - return nil, zerrors.ThrowInvalidArgument(nil, "ACTIO-ddJ7h", "invalid ip") + return &HostChecker{Domain: entry}, nil } -type IPChecker struct { - Net *net.IPNet - IP net.IP -} - -func (c *IPChecker) Matches(address string) bool { - ip := net.ParseIP(address) - if ip == nil { - return false - } - - if c.IP != nil { - return c.IP.Equal(ip) - } - return c.Net.Contains(ip) -} - -type DomainChecker struct { +type HostChecker struct { + Net *net.IPNet + IP net.IP Domain string } -func (c *DomainChecker) Matches(domain string) bool { - //TODO: allow wild cards - return c.Domain == domain +func (c *HostChecker) Matches(ips []net.IP, address string) bool { + // if the address matches the domain, no additional checks as needed + if c.Domain == address { + return true + } + // otherwise we need to check on ips (incl. the resolved ips of the host) + for _, ip := range ips { + if c.Net != nil && c.Net.Contains(ip) { + return true + } + if c.IP != nil && c.IP.Equal(ip) { + return true + } + } + return false } diff --git a/internal/actions/http_module_test.go b/internal/actions/http_module_test.go index 0d3bdef75e..7a1f8d7816 100644 --- a/internal/actions/http_module_test.go +++ b/internal/actions/http_module_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "net" "net/http" "net/url" "reflect" @@ -19,17 +20,21 @@ import ( func Test_isHostBlocked(t *testing.T) { SetLogstoreService(logstore.New[*record.ExecutionLog](nil, nil)) var denyList = []AddressChecker{ - mustNewIPChecker(t, "192.168.5.0/24"), - mustNewIPChecker(t, "127.0.0.1"), - &DomainChecker{Domain: "test.com"}, + mustNewHostChecker(t, "192.168.5.0/24"), + mustNewHostChecker(t, "127.0.0.1"), + mustNewHostChecker(t, "test.com"), + } + type fields struct { + lookup func(host string) ([]net.IP, error) } type args struct { address *url.URL } tests := []struct { - name string - args args - want bool + name string + fields fields + args args + want bool }{ { name: "in range", @@ -47,6 +52,11 @@ func Test_isHostBlocked(t *testing.T) { }, { name: "address match", + fields: fields{ + lookup: func(host string) ([]net.IP, error) { + return []net.IP{net.ParseIP("194.264.52.4")}, nil + }, + }, args: args{ address: mustNewURL(t, "https://test.com:42/hodor"), }, @@ -54,24 +64,44 @@ func Test_isHostBlocked(t *testing.T) { }, { name: "address not match", + fields: fields{ + lookup: func(host string) ([]net.IP, error) { + return []net.IP{net.ParseIP("194.264.52.4")}, nil + }, + }, args: args{ address: mustNewURL(t, "https://test2.com/hodor"), }, want: false, }, + { + name: "looked up ip matches", + fields: fields{ + lookup: func(host string) ([]net.IP, error) { + return []net.IP{net.ParseIP("127.0.0.1")}, nil + }, + }, + args: args{ + address: mustNewURL(t, "https://test2.com/hodor"), + }, + want: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := isHostBlocked(denyList, tt.args.address); got != tt.want { + trans := &transport{ + lookup: tt.fields.lookup, + } + if got := trans.isHostBlocked(denyList, tt.args.address); got != tt.want { t.Errorf("isHostBlocked() = %v, want %v", got, tt.want) } }) } } -func mustNewIPChecker(t *testing.T, ip string) AddressChecker { +func mustNewHostChecker(t *testing.T, ip string) AddressChecker { t.Helper() - checker, err := NewIPChecker(ip) + checker, err := NewHostChecker(ip) if err != nil { t.Errorf("unable to parse cidr of %q because: %v", ip, err) t.FailNow() From 70449caafbabd8e080104eb125de518d8f382ad0 Mon Sep 17 00:00:00 2001 From: Mark Stosberg Date: Tue, 22 Oct 2024 10:59:16 -0400 Subject: [PATCH 11/30] docs: standardize multi-factor spelling and related string updates (#8752) - **docs: s/Secondfactor/Second factor/** - **docs: s/IDP/IdP/** - **docs: s/Hardwaretokens/Hardware tokens/** - **docs: standardize multi-factor vs multi factor vs multifactor** # Which Problems Are Solved - English strings are improved # How the Problems Are Solved - With better strings --------- Co-authored-by: Fabi --- .../login-policy/login-policy.component.html | 2 +- console/src/assets/i18n/en.json | 20 +++++------ docs/docs/concepts/architecture/secrets.md | 2 +- .../integrate/login-ui/_list-mfa-options.mdx | 2 +- docs/docs/guides/integrate/login-ui/mfa.mdx | 4 +-- .../integrate/zitadel-apis/event-api.md | 6 ++-- .../manage/console/default-settings.mdx | 8 ++--- docs/docs/guides/migrate/sources/zitadel.md | 2 +- docs/docs/guides/migrate/users.md | 2 +- internal/api/oidc/server.go | 2 +- .../eventstore/token_verifier.go | 2 +- .../handlers/user_notifier_test.go | 4 +-- internal/notification/static/i18n/en.yaml | 2 +- internal/static/i18n/en.yaml | 4 +-- internal/static/i18n/es.yaml | 4 +-- proto/zitadel/admin.proto | 12 +++---- proto/zitadel/auth.proto | 36 +++++++++---------- proto/zitadel/management.proto | 16 ++++----- .../user/v3alpha/authenticator.proto | 14 ++++---- .../resources/user/v3alpha/user_service.proto | 14 ++++---- .../zitadel/settings/v2/login_settings.proto | 2 +- .../settings/v2beta/login_settings.proto | 2 +- proto/zitadel/user/v2/user_service.proto | 14 ++++---- proto/zitadel/user/v2beta/user_service.proto | 14 ++++---- 24 files changed, 95 insertions(+), 95 deletions(-) diff --git a/console/src/app/modules/policies/login-policy/login-policy.component.html b/console/src/app/modules/policies/login-policy/login-policy.component.html index 151e8cda93..622fd84b81 100644 --- a/console/src/app/modules/policies/login-policy/login-policy.component.html +++ b/console/src/app/modules/policies/login-policy/login-policy.component.html @@ -189,7 +189,7 @@ [title]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.TITLE' | translate" [description]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.DESCRIPTION' | translate" > - + {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} diff --git a/console/src/assets/i18n/en.json b/console/src/assets/i18n/en.json index e446a57c6f..c6fe05499d 100644 --- a/console/src/assets/i18n/en.json +++ b/console/src/assets/i18n/en.json @@ -760,7 +760,7 @@ "3": "Deleted" }, "DIALOG": { - "MFA_DELETE_TITLE": "Remove Secondfactor", + "MFA_DELETE_TITLE": "Remove Second factor", "MFA_DELETE_DESCRIPTION": "You are about to delete a second factor. Are you sure?", "ADD_MFA_TITLE": "Add Second Factor", "ADD_MFA_DESCRIPTION": "Select one of the following options." @@ -773,9 +773,9 @@ "IDPNAME": "IDP Name", "USERDISPLAYNAME": "External Name", "EXTERNALUSERID": "External User ID", - "EMPTY": "No external IDP found", + "EMPTY": "No external IdP found", "DIALOG": { - "DELETE_TITLE": "Remove IDP", + "DELETE_TITLE": "Remove IdP", "DELETE_DESCRIPTION": "You are about to delete an Identity Provider from a user. Do you really want to continue?" } }, @@ -1691,7 +1691,7 @@ "username": "Username", "tempUsername": "Temp username", "otp": "One-time password", - "verifyUrl": "Verify One-time-password URL", + "verifyUrl": "Verify One-time password URL", "expiry": "Expiry", "applicationName": "Application name" }, @@ -2154,7 +2154,7 @@ "PREFERREDLANGUAGEATTRIBUTE": "Preferred language attribute", "PREFERREDUSERNAMEATTRIBUTE": "Preferred username attribute", "PROFILEATTRIBUTE": "Profile attribute", - "IDPDISPLAYNAMMAPPING": "IDP Display Name Mapping", + "IDPDISPLAYNAMMAPPING": "IdP Display Name Mapping", "USERNAMEMAPPING": "Username Mapping", "DATES": "Dates", "CREATIONDATE": "Created At", @@ -2162,13 +2162,13 @@ "DEACTIVATE": "Deactivate", "ACTIVATE": "Activate", "DELETE": "Delete", - "DELETE_TITLE": "Delete IDP", + "DELETE_TITLE": "Delete IdP", "DELETE_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?", - "REMOVE_WARN_TITLE": "Remove IDP", - "REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IDP for your users and already registered users won't be able to login again. Are you sure to continue?", - "DELETE_SELECTION_TITLE": "Delete IDP", + "REMOVE_WARN_TITLE": "Remove IdP", + "REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IdP for your users and already registered users won't be able to login again. Are you sure to continue?", + "DELETE_SELECTION_TITLE": "Delete IdP", "DELETE_SELECTION_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?", - "EMPTY": "No IDP available", + "EMPTY": "No IdP available", "OIDC": { "GENERAL": "General Information", "TITLE": "OIDC Configuration", diff --git a/docs/docs/concepts/architecture/secrets.md b/docs/docs/concepts/architecture/secrets.md index 2f8c196797..4156f77a26 100644 --- a/docs/docs/concepts/architecture/secrets.md +++ b/docs/docs/concepts/architecture/secrets.md @@ -92,7 +92,7 @@ Some secrets cannot be hashed because they need to be used in their raw form. Th - Federation - Client Secrets of Identity Providers (IdPs) -- Multi Factor Authentication +- Multi-factor Authentication - TOTP Seed Values - Validation Secrets - Verifying contact information like eMail, Phonenumbers diff --git a/docs/docs/guides/integrate/login-ui/_list-mfa-options.mdx b/docs/docs/guides/integrate/login-ui/_list-mfa-options.mdx index 8b62002c3e..0694816238 100644 --- a/docs/docs/guides/integrate/login-ui/_list-mfa-options.mdx +++ b/docs/docs/guides/integrate/login-ui/_list-mfa-options.mdx @@ -14,7 +14,7 @@ curl --request GET \ ``` Response Example: -The relevant part for the list is the second factor and multi factor list. +The relevant part for the list is the second factor and multi-factor list. ```bash { diff --git a/docs/docs/guides/integrate/login-ui/mfa.mdx b/docs/docs/guides/integrate/login-ui/mfa.mdx index cea51870a5..09046a2506 100644 --- a/docs/docs/guides/integrate/login-ui/mfa.mdx +++ b/docs/docs/guides/integrate/login-ui/mfa.mdx @@ -30,7 +30,7 @@ ZITADEL supports different Methods: ### Start TOTP Registration -The user has selected to setup Time-based One-Time-Password (TOTP). +The user has selected to setup Time-based One-Time Password (TOTP). To show the user the QR to register TOTP with his Authenticator App like Google/Microsoft Authenticator or Authy you have to start the registration on the ZITADEL API. Generate the QR Code with the URI from the response. For users that do not have a QR Code reader make sure to also show the secret, to enable manual configuration. @@ -485,7 +485,7 @@ You have successfully registered a new U2F to the user. ### Check User -To be able to check the Universal-Second-Factor (U2F) you need a user check and a webAuthN challenge. +To be able to check the Universal Second Factor (U2F) you need a user check and a webAuthN challenge. In the creat session request you can check for the user and directly initiate the webAuthN challenge. For U2F you can choose between "USER_VERIFICATION_REQUIREMENT_PREFERRED" and "USER_VERIFICATION_REQUIREMENT_DISCOURAGED" for the challenge. diff --git a/docs/docs/guides/integrate/zitadel-apis/event-api.md b/docs/docs/guides/integrate/zitadel-apis/event-api.md index 9051cb227a..ed35aa1c8e 100644 --- a/docs/docs/guides/integrate/zitadel-apis/event-api.md +++ b/docs/docs/guides/integrate/zitadel-apis/event-api.md @@ -139,10 +139,10 @@ curl --request POST \ The following example shows you how you could use the events search to find out the failed login attempts of your users. You have to include all the event types that tell you that a login attempt has failed. -In this case this are the following events: +In this case these are the following events: - Password verification failed -- One-time-password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc) -- Universal-Second-Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc) +- One-time password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc) +- Universal Second Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc) - Passwordless/Passkey check failed (FaceID, WindowsHello, FingerPrint, etc) ```bash diff --git a/docs/docs/guides/manage/console/default-settings.mdx b/docs/docs/guides/manage/console/default-settings.mdx index dce9f4648b..26639b32f3 100644 --- a/docs/docs/guides/manage/console/default-settings.mdx +++ b/docs/docs/guides/manage/console/default-settings.mdx @@ -178,10 +178,10 @@ Multifactors: - U2F (Universal Second Factor) with PIN, e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey -Secondfactors (2FA): +Second factors (2FA): - Time-based One Time Password (TOTP), Authenticator Apps like Google/Microsoft Authenticator, Authy, etc. -- Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey +- Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardware tokens like Yubikey - One Time Password with Email (Email OTP) - One Time Password with SMS (SMS OTP) @@ -195,9 +195,9 @@ Configure the different lifetimes checks for the login process: - **Password Check Lifetime** specifies after which period a user has to reenter his password during the login process - **External Login Check Lifetime** specifies after which period a user will be redirected to the IDP during the login process -- **Multifactor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi Factor during the login process (value 0 will deactivate the prompt) +- **Multi-factor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi-factor during the login process (value 0 will deactivate the prompt) - **Second Factor Check Lifetime** specifies after which period a user has to revalidate the 2-Factor during the login process -- **Multifactor Login Check Lifetime** specifies after which period a user has to revalidate the Multi Factor during the login process +- **Multi-factor Login Check Lifetime** specifies after which period a user has to revalidate the Multi-factor during the login process ## Identity Providers diff --git a/docs/docs/guides/migrate/sources/zitadel.md b/docs/docs/guides/migrate/sources/zitadel.md index cb1cd51cb7..8a8dd60a3d 100644 --- a/docs/docs/guides/migrate/sources/zitadel.md +++ b/docs/docs/guides/migrate/sources/zitadel.md @@ -16,7 +16,7 @@ The following scripts don't include: - Global policies - IAM members - Global IDPs -- Global second/multi factors +- Global second factor / multi-factors - Machine keys - Personal Access Tokens - Application keys diff --git a/docs/docs/guides/migrate/users.md b/docs/docs/guides/migrate/users.md index 3ce38a9ac4..52ef779347 100644 --- a/docs/docs/guides/migrate/users.md +++ b/docs/docs/guides/migrate/users.md @@ -173,7 +173,7 @@ In case the hashes can't be transferred directly, you always have the option to If your legacy system receives the passwords in clear text (eg, login form) you could also directly create users via ZITADEL API. We will explain this pattern in more detail in this guide. -### One-time-passwords (OTP) +### One-time passwords (OTP) You can pass the OTP secret when creating users: diff --git a/internal/api/oidc/server.go b/internal/api/oidc/server.go index 15628cad8a..07bc4706be 100644 --- a/internal/api/oidc/server.go +++ b/internal/api/oidc/server.go @@ -46,7 +46,7 @@ type Server struct { } func endpoints(endpointConfig *EndpointConfig) op.Endpoints { - // some defaults. The new Server will disable enpoints that are nil. + // some defaults. The new Server will disable endpoints that are nil. endpoints := op.Endpoints{ Authorization: op.NewEndpoint("/oauth/v2/authorize"), Token: op.NewEndpoint("/oauth/v2/token"), diff --git a/internal/authz/repository/eventsourcing/eventstore/token_verifier.go b/internal/authz/repository/eventsourcing/eventstore/token_verifier.go index 1a50c141d6..9dec3fcf00 100644 --- a/internal/authz/repository/eventsourcing/eventstore/token_verifier.go +++ b/internal/authz/repository/eventsourcing/eventstore/token_verifier.go @@ -173,7 +173,7 @@ func (repo *TokenVerifierRepo) verifySessionToken(ctx context.Context, sessionID } // checkAuthentication ensures the session or token was authenticated (at least a single [domain.UserAuthMethodType]). -// It will also check if there was a multi factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor +// It will also check if there was a multi-factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor func (repo *TokenVerifierRepo) checkAuthentication(ctx context.Context, authMethods []domain.UserAuthMethodType, userID string) error { if len(authMethods) == 0 { return zerrors.ThrowPermissionDenied(nil, "AUTHZ-Kl3p0", "authentication required") diff --git a/internal/notification/handlers/user_notifier_test.go b/internal/notification/handlers/user_notifier_test.go index d0151b6d7e..991eb0531d 100644 --- a/internal/notification/handlers/user_notifier_test.go +++ b/internal/notification/handlers/user_notifier_test.go @@ -1469,7 +1469,7 @@ func Test_userNotifier_reduceOTPSMSChallenged(t *testing.T) { givenTemplate := "{{.LogoURL}}" testCode := "" expiry := 0 * time.Hour - expectContent := fmt.Sprintf(`%[1]s is your one-time-password for %[2]s. Use it within the next %[3]s. + expectContent := fmt.Sprintf(`%[1]s is your one-time password for %[2]s. Use it within the next %[3]s. @%[2]s #%[1]s`, testCode, eventOriginDomain, expiry) w.messageSMS = &messages.SMS{ SenderPhoneNumber: "senderNumber", @@ -1506,7 +1506,7 @@ func Test_userNotifier_reduceOTPSMSChallenged(t *testing.T) { givenTemplate := "{{.LogoURL}}" testCode := "" expiry := 0 * time.Hour - expectContent := fmt.Sprintf(`%[1]s is your one-time-password for %[2]s. Use it within the next %[3]s. + expectContent := fmt.Sprintf(`%[1]s is your one-time password for %[2]s. Use it within the next %[3]s. @%[2]s #%[1]s`, testCode, instancePrimaryDomain, expiry) w.messageSMS = &messages.SMS{ SenderPhoneNumber: "senderNumber", diff --git a/internal/notification/static/i18n/en.yaml b/internal/notification/static/i18n/en.yaml index a431fc999d..b4367c4932 100644 --- a/internal/notification/static/i18n/en.yaml +++ b/internal/notification/static/i18n/en.yaml @@ -35,7 +35,7 @@ VerifyEmailOTP: ButtonText: Authenticate VerifySMSOTP: Text: >- - {{.OTP}} is your one-time-password for {{ .Domain }}. Use it within the next {{.Expiry}}. + {{.OTP}} is your one-time password for {{ .Domain }}. Use it within the next {{.Expiry}}. @{{.Domain}} #{{.OTP}} DomainClaimed: diff --git a/internal/static/i18n/en.yaml b/internal/static/i18n/en.yaml index 110a8d71e0..6ba27b8280 100644 --- a/internal/static/i18n/en.yaml +++ b/internal/static/i18n/en.yaml @@ -938,8 +938,8 @@ EventTypes: added: Second factor added to Login Policy removed: Second factor removed from Login Policy multifactor: - added: Multi factor added to Login Policy - removed: Multi factor removed from Login Policy + added: Multi-factor added to Login Policy + removed: Multi-factor removed from Login Policy password: complexity: added: Password complexity policy added diff --git a/internal/static/i18n/es.yaml b/internal/static/i18n/es.yaml index 48f7f3d33e..8afb690833 100644 --- a/internal/static/i18n/es.yaml +++ b/internal/static/i18n/es.yaml @@ -937,8 +937,8 @@ EventTypes: added: Doble factor añadido a la política de inicio de sesión removed: Doble factor eliminado de la política de inicio de sesión multifactor: - added: Multi factor añadido a la política de inicio de sesión - removed: Multi factor eliminado de la política de inicio de sesión + added: Multi-factor añadido a la política de inicio de sesión + removed: Multi-factor eliminado de la política de inicio de sesión password: complexity: added: Política de complejidad de la contraseña añadida diff --git a/proto/zitadel/admin.proto b/proto/zitadel/admin.proto index d3cf774f41..e1818833e0 100644 --- a/proto/zitadel/admin.proto +++ b/proto/zitadel/admin.proto @@ -2827,7 +2827,7 @@ service AdminService { responses: { key: "400"; value: { - description: "invalid second-factor type"; + description: "invalid second factor type"; schema: { json_schema: { ref: "#/definitions/rpcStatus"; @@ -2862,7 +2862,7 @@ service AdminService { responses: { key: "400"; value: { - description: "Invalid second-factor type"; + description: "Invalid second factor type"; schema: { json_schema: { ref: "#/definitions/rpcStatus"; @@ -2886,12 +2886,12 @@ service AdminService { tags: "Settings"; tags: "Login Settings"; tags: "Authentication Methods" - summary: "List Multi Factors (MFA)"; - description: "Returns a list of multi factors (MFA) configured on the login settings of the instance. It affects all organizations, without custom login settings. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multifactor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." + summary: "List Multi-factors (MFA)"; + description: "Returns a list of multi-factors (MFA) configured on the login settings of the instance. It affects all organizations, without custom login settings. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multifactor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." responses: { key: "200"; value: { - description: "multi factors of default login policy"; + description: "multi-factors of default login policy"; }; }; }; @@ -2946,7 +2946,7 @@ service AdminService { tags: "Settings"; tags: "Login Settings"; tags: "Authentication Methods" - summary: "Remove Multi Factor (MFA)"; + summary: "Remove Multi-factor (MFA)"; description: "Remove a multi-factor (MFA) from the login settings of the instance. It affects all organizations, without custom login settings. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multi-factor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." responses: { key: "200"; diff --git a/proto/zitadel/auth.proto b/proto/zitadel/auth.proto index ed58d70569..0ee6ad86d8 100644 --- a/proto/zitadel/auth.proto +++ b/proto/zitadel/auth.proto @@ -597,7 +597,7 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" summary: "List Authentication Factors"; - description: "Returns a list of possible authentication factors, multi-factor (MFA), second-factor (2FA)" + description: "Returns a list of possible authentication factors, multi-factor (MFA), second factor (2FA)" }; } @@ -612,8 +612,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Add One-Time-Password (OTP)"; - description: "Add a new One-Time-Password (OTP) factor to the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. Only one OTP per user is allowed. After adding a new OTP it has to be verified." + summary: "Add One-Time Password (OTP)"; + description: "Add a new One-Time Password (OTP) factor to the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. Only one OTP per user is allowed. After adding a new OTP it has to be verified." }; } @@ -629,8 +629,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Verify One-Time-Password (OTP)"; - description: "Verify the last added One-Time-Password (OTP) factor of the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. Only one OTP per user is allowed." + summary: "Verify One-Time Password (OTP)"; + description: "Verify the last added One-Time Password (OTP) factor of the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. Only one OTP per user is allowed." }; } @@ -645,8 +645,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Remove One-Time-Password (OTP)"; - description: "Remove the configured One-Time-Password (OTP) factor of the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. As only one OTP per user is allowed, the user will not have OTP as a second-factor afterward." + summary: "Remove One-Time Password (OTP)"; + description: "Remove the configured One-Time Password (OTP) factor of the authenticated user. OTP is an authenticator app like Google/Microsoft Authenticator, Authy, etc. As only one OTP per user is allowed, the user will not have OTP as a second factor afterward." }; } @@ -661,8 +661,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Add One-Time-Password (OTP) SMS"; - description: "Add a new One-Time-Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor." + summary: "Add One-Time Password (OTP) SMS"; + description: "Add a new One-Time Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor." }; } @@ -677,8 +677,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Remove One-Time-Password (OTP) SMS"; - description: "Remove the configured One-Time-Password (OTP) SMS factor of the authenticated user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second-factor afterward." + summary: "Remove One-Time Password (OTP) SMS"; + description: "Remove the configured One-Time Password (OTP) SMS factor of the authenticated user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second factor afterward." }; } @@ -693,8 +693,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Add One-Time-Password (OTP) Email"; - description: "Add a new One-Time-Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor." + summary: "Add One-Time Password (OTP) Email"; + description: "Add a new One-Time Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor." }; } @@ -709,8 +709,8 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" - summary: "Remove One-Time-Password (OTP) Email"; - description: "Remove the configured One-Time-Password (OTP) Email factor of the authenticated user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second-factor afterward." + summary: "Remove One-Time Password (OTP) Email"; + description: "Remove the configured One-Time Password (OTP) Email factor of the authenticated user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second factor afterward." }; } @@ -726,7 +726,7 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" summary: "Add Universal Second Factor (U2F)"; - description: "Add a new Universal-Second-Factor (U2F) to the authenticated user. U2F is a device-dependent authentication like FingerScan, FaceID, WindowHello, etc. The factor has to be verified after adding. Multiple factors can be added." + description: "Add a new Universal Second Factor (U2F) to the authenticated user. U2F is a device-dependent authentication like FingerScan, FaceID, WindowHello, etc. The factor has to be verified after adding. Multiple factors can be added." }; } @@ -743,7 +743,7 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" summary: "Add Universal Second Factor (U2F)"; - description: "Verify the last added new Universal-Second-Factor (U2F) to the authenticated user." + description: "Verify the last added new Universal Second Factor (U2F) to the authenticated user." }; } @@ -759,7 +759,7 @@ service AuthService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { tags: "User Authentication Factor" summary: "Remove Universal Second Factor (U2F)"; - description: "Remove a specific Universal-Second-Factor (U2F) from the authenticated user by sending the id." + description: "Remove a specific Universal Second Factor (U2F) from the authenticated user by sending the id." }; } diff --git a/proto/zitadel/management.proto b/proto/zitadel/management.proto index 101741b2f5..cb5bfb1389 100644 --- a/proto/zitadel/management.proto +++ b/proto/zitadel/management.proto @@ -1345,7 +1345,7 @@ service ManagementService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { summary: "Get User Authentication Factors (2FA/MFA)"; - description: "Get a list of authentication factors the user has set. Including Second-Factors (2FA) and Multi-Factors (MFA).\n\nDeprecated: please use user service v2 ListAuthenticationMethodTypes" + description: "Get a list of authentication factors the user has set. Including Second Factors (2FA) and Multi-Factors (MFA).\n\nDeprecated: please use user service v2 ListAuthenticationMethodTypes" tags: "Users"; tags: "User Human"; deprecated: true; @@ -1378,7 +1378,7 @@ service ManagementService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { summary: "Remove Multi-Factor OTP"; - description: "Remove the configured One-Time-Password (OTP) as a factor from the user. OTP is an authentication app, like Authy or Google/Microsoft Authenticator.\n\nDeprecated: please use user service v2 RemoveTOTP" + description: "Remove the configured One-Time Password (OTP) as a factor from the user. OTP is an authentication app, like Authy or Google/Microsoft Authenticator.\n\nDeprecated: please use user service v2 RemoveTOTP" tags: "Users"; tags: "User Human"; deprecated: true; @@ -1412,7 +1412,7 @@ service ManagementService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { summary: "Remove Multi-Factor U2F"; deprecated: true; - description: "Remove the configured Universal-Second-Factor (U2F) as a factor from the user. U2F is a device-dependent factor like FingerPrint, Windows-Hello, etc.\n\nDeprecated: please use user service v2 RemoveU2F" + description: "Remove the configured Universal Second Factor (U2F) as a factor from the user. U2F is a device-dependent factor like FingerPrint, Windows-Hello, etc.\n\nDeprecated: please use user service v2 RemoveU2F" tags: "Users"; tags: "User Human"; responses: { @@ -1444,7 +1444,7 @@ service ManagementService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { summary: "Remove Multi-Factor OTP SMS"; - description: "Remove the configured One-Time-Password (OTP) SMS as a factor from the user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second-factor afterward.\n\nDeprecated: please use user service v2 RemoveOTPSMS" + description: "Remove the configured One-Time Password (OTP) SMS as a factor from the user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second factor afterward.\n\nDeprecated: please use user service v2 RemoveOTPSMS" tags: "Users"; tags: "User Human"; deprecated: true; @@ -1477,7 +1477,7 @@ service ManagementService { option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { summary: "Remove Multi-Factor OTP SMS"; - description: "Remove the configured One-Time-Password (OTP) Email as a factor from the user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second-factor afterward.\n\nDeprecated: please use user service v2 RemoveOTPEmail" + description: "Remove the configured One-Time Password (OTP) Email as a factor from the user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second factor afterward.\n\nDeprecated: please use user service v2 RemoveOTPEmail" tags: "Users"; tags: "User Human"; deprecated: true; @@ -4618,8 +4618,8 @@ service ManagementService { tags: "Settings"; tags: "Login Settings"; tags: "Authentication Methods" - summary: "List Multi Factors (MFA)"; - description: "Returns a list of multi factors (MFA) configured on the login settings of the organization. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multifactor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." + summary: "List Multi-factors (MFA)"; + description: "Returns a list of multi-factors (MFA) configured on the login settings of the organization. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multifactor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." parameters: { headers: { name: "x-zitadel-orgid"; @@ -4671,7 +4671,7 @@ service ManagementService { tags: "Settings"; tags: "Login Settings"; tags: "Authentication Methods" - summary: "Remove Multi Factor (MFA)"; + summary: "Remove Multi-factor (MFA)"; description: "Remove a multi-factor (MFA) from the login settings of the organization. It affects all organizations, without custom login settings. Authentication factors are used as an additional layer of security for your users (e.g. Authentication App, FingerPrint, Windows Hello, etc). Per definition, it is called multi-factor factor or passwordless as it is used as first and second authentication and a password is not necessary. In the UI we generalize it as passwordless or passkey." parameters: { headers: { diff --git a/proto/zitadel/resources/user/v3alpha/authenticator.proto b/proto/zitadel/resources/user/v3alpha/authenticator.proto index eed57a414f..dc8120789c 100644 --- a/proto/zitadel/resources/user/v3alpha/authenticator.proto +++ b/proto/zitadel/resources/user/v3alpha/authenticator.proto @@ -18,12 +18,12 @@ message Authenticators { Password password = 2; // Meta information about the user's WebAuthN authenticators. repeated WebAuthN web_auth_n = 3; - // A list of the user's time-based one-time-password (TOTP) authenticators, + // A list of the user's time-based one-time password (TOTP) authenticators, // incl. the name for identification. repeated TOTP totps = 4; - // A list of the user's one-time-password (OTP) SMS authenticators. + // A list of the user's one-time password (OTP) SMS authenticators. repeated OTPSMS otp_sms = 5; - // A list of the user's one-time-password (OTP) Email authenticators. + // A list of the user's one-time password (OTP) Email authenticators. repeated OTPEmail otp_email = 6; // A list of the user's authentication keys. They can be used to authenticate e.g. by JWT Profile. repeated AuthenticationKey authentication_keys = 7; @@ -100,7 +100,7 @@ message WebAuthN { // State whether the WebAuthN registration has been completed. bool is_verified = 3; // States if the user has been verified during the registration. Authentication with this device - // will be considered as multi factor authentication (MFA) without the need to check a password + // will be considered as multi-factor authentication (MFA) without the need to check a password // (typically known as Passkeys). // Without user verification it will be a second factor authentication (2FA), typically done // after a password check. @@ -154,7 +154,7 @@ message VerifyWebAuthNRegistration { } message OTPSMS { - // unique identifier of the one-time-password (OTP) SMS authenticator. + // unique identifier of the one-time password (OTP) SMS authenticator. string otp_sms_id = 1 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { example: "\"69629023906488334\"" @@ -171,7 +171,7 @@ message OTPSMS { } message OTPEmail { - // unique identifier of the one-time-password (OTP) Email authenticator. + // unique identifier of the one-time password (OTP) Email authenticator. string otp_email_id = 1 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { example: "\"69629023906488334\"" @@ -188,7 +188,7 @@ message OTPEmail { } message TOTP { - // unique identifier of the time-based one-time-password (TOTP) authenticator. + // unique identifier of the time-based one-time password (TOTP) authenticator. string totp_id = 1 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { example: "\"69629023906488334\"" diff --git a/proto/zitadel/resources/user/v3alpha/user_service.proto b/proto/zitadel/resources/user/v3alpha/user_service.proto index 96e595d81d..4e297d5ed1 100644 --- a/proto/zitadel/resources/user/v3alpha/user_service.proto +++ b/proto/zitadel/resources/user/v3alpha/user_service.proto @@ -719,7 +719,7 @@ service ZITADELUsers { // Start a TOTP registration // - // Start the registration of a new time-based one-time-password (TOTP) generator for a user. + // Start the registration of a new time-based one-time password (TOTP) generator for a user. // As a response a secret is returned, which is used to initialize a TOTP app or device. rpc StartTOTPRegistration (StartTOTPRegistrationRequest) returns (StartTOTPRegistrationResponse) { option (google.api.http) = { @@ -743,7 +743,7 @@ service ZITADELUsers { // Verify a TOTP registration // - // Verify the time-based one-time-password (TOTP) registration with the generated code. + // Verify the time-based one-time password (TOTP) registration with the generated code. rpc VerifyTOTPRegistration (VerifyTOTPRegistrationRequest) returns (VerifyTOTPRegistrationResponse) { option (google.api.http) = { post: "/resources/v3alpha/users/{id}/totp/{totp_id}/_verify" @@ -767,7 +767,7 @@ service ZITADELUsers { // Remove a TOTP authenticator // - // Remove an existing time-based one-time-password (TOTP) authenticator from a user, so it cannot be used for authentication anymore. + // Remove an existing time-based one-time password (TOTP) authenticator from a user, so it cannot be used for authentication anymore. rpc RemoveTOTPAuthenticator (RemoveTOTPAuthenticatorRequest) returns (RemoveTOTPAuthenticatorResponse) { option (google.api.http) = { delete: "/resources/v3alpha/users/{id}/totp/{totp_id}" @@ -790,7 +790,7 @@ service ZITADELUsers { // Add a OTP SMS authenticator // - // Add a new one-time-password (OTP) SMS authenticator to a user. + // Add a new one-time password (OTP) SMS authenticator to a user. // If the phone is not passed as verified, a verification code will be generated, // which can be either returned or will be sent to the user by SMS. rpc AddOTPSMSAuthenticator (AddOTPSMSAuthenticatorRequest) returns (AddOTPSMSAuthenticatorResponse) { @@ -841,7 +841,7 @@ service ZITADELUsers { // Remove a OTP SMS authenticator // - // Remove an existing one-time-password (OTP) SMS authenticator from a user, so it cannot be used for authentication anymore. + // Remove an existing one-time password (OTP) SMS authenticator from a user, so it cannot be used for authentication anymore. rpc RemoveOTPSMSAuthenticator (RemoveOTPSMSAuthenticatorRequest) returns (RemoveOTPSMSAuthenticatorResponse) { option (google.api.http) = { delete: "/resources/v3alpha/users/{id}/otp_sms/{otp_sms_id}" @@ -864,7 +864,7 @@ service ZITADELUsers { // Add a OTP Email authenticator // - // Add a new one-time-password (OTP) Email authenticator to a user. + // Add a new one-time password (OTP) Email authenticator to a user. // If the email is not passed as verified, a verification code will be generated, // which can be either returned or will be sent to the user by email. rpc AddOTPEmailAuthenticator (AddOTPEmailAuthenticatorRequest) returns (AddOTPEmailAuthenticatorResponse) { @@ -915,7 +915,7 @@ service ZITADELUsers { // Remove a OTP Email authenticator // - // Remove an existing one-time-password (OTP) Email authenticator from a user, so it cannot be used for authentication anymore. + // Remove an existing one-time password (OTP) Email authenticator from a user, so it cannot be used for authentication anymore. rpc RemoveOTPEmailAuthenticator (RemoveOTPEmailAuthenticatorRequest) returns (RemoveOTPEmailAuthenticatorResponse) { option (google.api.http) = { delete: "/resources/v3alpha/users/{id}/otp_email/{otp_email_id}" diff --git a/proto/zitadel/settings/v2/login_settings.proto b/proto/zitadel/settings/v2/login_settings.proto index ca004288fe..9fdbb45993 100644 --- a/proto/zitadel/settings/v2/login_settings.proto +++ b/proto/zitadel/settings/v2/login_settings.proto @@ -70,7 +70,7 @@ message LoginSettings { ]; google.protobuf.Duration second_factor_check_lifetime = 12 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { - description: "Defines after how long the second-factor check is valid."; + description: "Defines after how long the second factor check is valid."; example: "\"64800s\""; } ]; diff --git a/proto/zitadel/settings/v2beta/login_settings.proto b/proto/zitadel/settings/v2beta/login_settings.proto index a31c058931..dd809becca 100644 --- a/proto/zitadel/settings/v2beta/login_settings.proto +++ b/proto/zitadel/settings/v2beta/login_settings.proto @@ -70,7 +70,7 @@ message LoginSettings { ]; google.protobuf.Duration second_factor_check_lifetime = 12 [ (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { - description: "Defines after how long the second-factor check is valid."; + description: "Defines after how long the second factor check is valid."; example: "\"64800s\""; } ]; diff --git a/proto/zitadel/user/v2/user_service.proto b/proto/zitadel/user/v2/user_service.proto index ac018f9099..47707fef4f 100644 --- a/proto/zitadel/user/v2/user_service.proto +++ b/proto/zitadel/user/v2/user_service.proto @@ -771,7 +771,7 @@ service UserService { // Remove TOTP generator from a user // - // Remove the configured TOTP generator of a user. As only one TOTP generator per user is allowed, the user will not have TOTP as a second-factor afterward.. + // Remove the configured TOTP generator of a user. As only one TOTP generator per user is allowed, the user will not have TOTP as a second factor afterward. rpc RemoveTOTP (RemoveTOTPRequest) returns (RemoveTOTPResponse) { option (google.api.http) = { delete: "/v2/users/{user_id}/totp" @@ -794,7 +794,7 @@ service UserService { // Add OTP SMS for a user // - // Add a new One-Time-Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor.. + // Add a new One-Time Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor.. rpc AddOTPSMS (AddOTPSMSRequest) returns (AddOTPSMSResponse) { option (google.api.http) = { post: "/v2/users/{user_id}/otp_sms" @@ -816,9 +816,9 @@ service UserService { }; } - // Remove One-Time-Password (OTP) SMS from a user + // Remove One-Time Password (OTP) SMS from a user // - // Remove the configured One-Time-Password (OTP) SMS factor of a user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second-factor afterward.. + // Remove the configured One-Time Password (OTP) SMS factor of a user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second factor afterward. rpc RemoveOTPSMS (RemoveOTPSMSRequest) returns (RemoveOTPSMSResponse) { option (google.api.http) = { delete: "/v2/users/{user_id}/otp_sms" @@ -841,7 +841,7 @@ service UserService { // Add OTP Email for a user // - // Add a new One-Time-Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor.. + // Add a new One-Time Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor.. rpc AddOTPEmail (AddOTPEmailRequest) returns (AddOTPEmailResponse) { option (google.api.http) = { post: "/v2/users/{user_id}/otp_email" @@ -863,9 +863,9 @@ service UserService { }; } - // Remove One-Time-Password (OTP) Email from a user + // Remove One-Time Password (OTP) Email from a user // - // Remove the configured One-Time-Password (OTP) Email factor of a user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second-factor afterward.. + // Remove the configured One-Time Password (OTP) Email factor of a user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second factor afterward. rpc RemoveOTPEmail (RemoveOTPEmailRequest) returns (RemoveOTPEmailResponse) { option (google.api.http) = { delete: "/v2/users/{user_id}/otp_email" diff --git a/proto/zitadel/user/v2beta/user_service.proto b/proto/zitadel/user/v2beta/user_service.proto index f4ee9e5f3c..156f961c59 100644 --- a/proto/zitadel/user/v2beta/user_service.proto +++ b/proto/zitadel/user/v2beta/user_service.proto @@ -769,7 +769,7 @@ service UserService { // Remove TOTP generator from a user // - // Remove the configured TOTP generator of a user. As only one TOTP generator per user is allowed, the user will not have TOTP as a second-factor afterward. + // Remove the configured TOTP generator of a user. As only one TOTP generator per user is allowed, the user will not have TOTP as a second factor afterward. // // Deprecated: please move to the corresponding endpoint under user service v2 (GA). rpc RemoveTOTP (RemoveTOTPRequest) returns (RemoveTOTPResponse) { @@ -795,7 +795,7 @@ service UserService { // Add OTP SMS for a user // - // Add a new One-Time-Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor. + // Add a new One-Time Password (OTP) SMS factor to the authenticated user. OTP SMS will enable the user to verify a OTP with the latest verified phone number. The phone number has to be verified to add the second factor. // // Deprecated: please move to the corresponding endpoint under user service v2 (GA). rpc AddOTPSMS (AddOTPSMSRequest) returns (AddOTPSMSResponse) { @@ -820,9 +820,9 @@ service UserService { }; } - // Remove One-Time-Password (OTP) SMS from a user + // Remove One-Time Password (OTP) SMS from a user // - // Remove the configured One-Time-Password (OTP) SMS factor of a user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second-factor afterward. + // Remove the configured One-Time Password (OTP) SMS factor of a user. As only one OTP SMS per user is allowed, the user will not have OTP SMS as a second factor afterward. // // Deprecated: please move to the corresponding endpoint under user service v2 (GA). rpc RemoveOTPSMS (RemoveOTPSMSRequest) returns (RemoveOTPSMSResponse) { @@ -848,7 +848,7 @@ service UserService { // Add OTP Email for a user // - // Add a new One-Time-Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor. + // Add a new One-Time Password (OTP) Email factor to the authenticated user. OTP Email will enable the user to verify a OTP with the latest verified email. The email has to be verified to add the second factor. // // Deprecated: please move to the corresponding endpoint under user service v2 (GA). rpc AddOTPEmail (AddOTPEmailRequest) returns (AddOTPEmailResponse) { @@ -873,9 +873,9 @@ service UserService { }; } - // Remove One-Time-Password (OTP) Email from a user + // Remove One-Time Password (OTP) Email from a user // - // Remove the configured One-Time-Password (OTP) Email factor of a user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second-factor afterward. + // Remove the configured One-Time Password (OTP) Email factor of a user. As only one OTP Email per user is allowed, the user will not have OTP Email as a second factor afterward. // // Deprecated: please move to the corresponding endpoint under user service v2 (GA). rpc RemoveOTPEmail (RemoveOTPEmailRequest) returns (RemoveOTPEmailResponse) { From d696d15a1c26b84449d3c984bc2594a0dbb19002 Mon Sep 17 00:00:00 2001 From: karatekaneen <41196840+karatekaneen@users.noreply.github.com> Date: Wed, 23 Oct 2024 07:40:43 +0200 Subject: [PATCH 12/30] docs: update logger example for action modules (#8813) # Which Problems Are Solved Updated the example to fit with the actual logger signature. Solves #8811. --- docs/docs/apis/actions/modules.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/apis/actions/modules.md b/docs/docs/apis/actions/modules.md index 7c99ab05bb..0fadcdff15 100644 --- a/docs/docs/apis/actions/modules.md +++ b/docs/docs/apis/actions/modules.md @@ -80,7 +80,7 @@ Use the function that reflects your log level. ### Example ```js -logger.info("This is an info log.") +logger.log("This is an info log.") logger.warn("This is a warn log.") From 32d958ea43ab407eb1c03e5060c7a71ecfaf9fda Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:36:50 +0200 Subject: [PATCH 13/30] chore: add await for project to oidc integration tests (#8809) # Which Problems Are Solved In integration tests there is waiting for the application, but the project is also included if the token can be created. # How the Problems Are Solved Wait for project not only for the application in the integration tests. # Additional Changes Some more corrections in integration tests. # Additional Context None --------- Co-authored-by: Livio Spring --- .../oidc/v2/integration_test/oidc_test.go | 3 - .../oidc/v2beta/integration_test/oidc_test.go | 3 - .../integration_test/execution_target_test.go | 70 +++++++++++++++---- .../v3alpha/integration_test/query_test.go | 4 +- .../webkey_integration_test.go | 29 ++++---- .../user/v2/integration_test/query_test.go | 42 ++++++----- .../user/v2/integration_test/user_test.go | 29 +++----- .../user/v2beta/integration_test/user_test.go | 29 +++----- internal/integration/assert.go | 12 +++- internal/integration/oidc.go | 16 ++++- 10 files changed, 141 insertions(+), 96 deletions(-) diff --git a/internal/api/grpc/oidc/v2/integration_test/oidc_test.go b/internal/api/grpc/oidc/v2/integration_test/oidc_test.go index d9e9a76754..4c079476b1 100644 --- a/internal/api/grpc/oidc/v2/integration_test/oidc_test.go +++ b/internal/api/grpc/oidc/v2/integration_test/oidc_test.go @@ -19,14 +19,12 @@ import ( "github.com/zitadel/zitadel/pkg/grpc/object/v2" oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2" "github.com/zitadel/zitadel/pkg/grpc/session/v2" - "github.com/zitadel/zitadel/pkg/grpc/user/v2" ) var ( CTX context.Context Instance *integration.Instance Client oidc_pb.OIDCServiceClient - User *user.AddHumanUserResponse ) const ( @@ -44,7 +42,6 @@ func TestMain(m *testing.M) { Client = Instance.Client.OIDCv2 CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner) - User = Instance.CreateHumanUser(CTX) return m.Run() }()) } diff --git a/internal/api/grpc/oidc/v2beta/integration_test/oidc_test.go b/internal/api/grpc/oidc/v2beta/integration_test/oidc_test.go index dc4f6cc0af..650a0dac30 100644 --- a/internal/api/grpc/oidc/v2beta/integration_test/oidc_test.go +++ b/internal/api/grpc/oidc/v2beta/integration_test/oidc_test.go @@ -19,14 +19,12 @@ import ( object "github.com/zitadel/zitadel/pkg/grpc/object/v2beta" oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2beta" session "github.com/zitadel/zitadel/pkg/grpc/session/v2beta" - "github.com/zitadel/zitadel/pkg/grpc/user/v2" ) var ( CTX context.Context Instance *integration.Instance Client oidc_pb.OIDCServiceClient - User *user.AddHumanUserResponse ) const ( @@ -44,7 +42,6 @@ func TestMain(m *testing.M) { Client = Instance.Client.OIDCv2beta CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner) - User = Instance.CreateHumanUser(CTX) return m.Run() }()) } diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go index 286048ab51..b62d0ee37f 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go @@ -62,10 +62,10 @@ func TestServer_ExecutionTarget(t *testing.T) { changedRequest := &action.GetTargetRequest{Id: targetCreated.GetDetails().GetId()} // replace original request with different targetID urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusOK, changedRequest) - targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, false) - instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) - waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod)) + targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, false) + + waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) // expected response from the GetTarget expectedResponse := &action.GetTargetResponse{ @@ -119,10 +119,9 @@ func TestServer_ExecutionTarget(t *testing.T) { } // after request with different targetID, return changed response targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusOK, changedResponse) - targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, false) - instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) - waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) + targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, false) + waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) return func() { closeRequest() closeResponse() @@ -161,12 +160,10 @@ func TestServer_ExecutionTarget(t *testing.T) { wantRequest := &middleware.ContextInfoRequest{FullMethod: fullMethod, InstanceID: instance.ID(), OrgID: orgID, ProjectID: projectID, UserID: userID, Request: request} urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusInternalServerError, &action.GetTargetRequest{Id: "notchanged"}) - targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, true) - instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) + targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, true) + waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) // GetTarget with used target request.Id = targetRequest.GetDetails().GetId() - - waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod)) return func() { closeRequest() }, nil @@ -233,10 +230,9 @@ func TestServer_ExecutionTarget(t *testing.T) { } // after request with different targetID, return changed response targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusInternalServerError, changedResponse) - targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, true) - instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) - waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) + targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, true) + waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId())) return func() { closeResponse() }, nil @@ -277,7 +273,9 @@ func TestServer_ExecutionTarget(t *testing.T) { } } -func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition) { +func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition, targets []*action.ExecutionTargetType) { + instance.SetExecution(ctx, t, condition, targets) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{ @@ -291,10 +289,54 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in return } assert.Len(ttt, got.GetResult(), 1) + gotTargets := got.GetResult()[0].GetExecution().GetTargets() + // always first check length, otherwise its failed anyway + if assert.Len(ttt, gotTargets, len(targets)) { + for i := range targets { + assert.EqualExportedValues(ttt, targets[i].GetType(), gotTargets[i].GetType()) + } + } + }, retryDuration, tick, "timeout waiting for expected execution result") return } +func waitForTarget(ctx context.Context, t *testing.T, instance *integration.Instance, endpoint string, ty domain.TargetType, interrupt bool) *action.CreateTargetResponse { + resp := instance.CreateTarget(ctx, t, "", endpoint, ty, interrupt) + + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := instance.Client.ActionV3Alpha.SearchTargets(ctx, &action.SearchTargetsRequest{ + Filters: []*action.TargetSearchFilter{ + {Filter: &action.TargetSearchFilter_InTargetIdsFilter{ + InTargetIdsFilter: &action.InTargetIDsFilter{TargetIds: []string{resp.GetDetails().GetId()}}, + }}, + }, + }) + if !assert.NoError(ttt, err) { + return + } + assert.Len(ttt, got.GetResult(), 1) + config := got.GetResult()[0].GetConfig() + assert.Equal(ttt, config.GetEndpoint(), endpoint) + switch ty { + case domain.TargetTypeWebhook: + if !assert.NotNil(ttt, config.GetRestWebhook()) { + return + } + assert.Equal(ttt, interrupt, config.GetRestWebhook().GetInterruptOnError()) + case domain.TargetTypeAsync: + assert.NotNil(ttt, config.GetRestAsync()) + case domain.TargetTypeCall: + if !assert.NotNil(ttt, config.GetRestCall()) { + return + } + assert.Equal(ttt, interrupt, config.GetRestCall().GetInterruptOnError()) + } + }, retryDuration, tick, "timeout waiting for expected execution result") + return resp +} + func conditionRequestFullMethod(fullMethod string) *action.Condition { return &action.Condition{ ConditionType: &action.Condition_Request{ diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go index c29900e966..e3d3233604 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go @@ -223,7 +223,9 @@ func TestServer_GetTarget(t *testing.T) { assert.Error(ttt, err, "Error: "+err.Error()) return } - assert.NoError(ttt, err) + if !assert.NoError(ttt, err) { + return + } wantTarget := tt.want.GetTarget() gotTarget := got.GetTarget() diff --git a/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go b/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go index 2a178ea285..19d02dcea3 100644 --- a/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go +++ b/internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go @@ -36,7 +36,7 @@ func TestMain(m *testing.M) { } func TestServer_Feature_Disabled(t *testing.T) { - instance, iamCtx := createInstance(t, false) + instance, iamCtx, _ := createInstance(t, false) client := instance.Client.WebKeyV3Alpha t.Run("CreateWebKey", func(t *testing.T) { @@ -62,18 +62,18 @@ func TestServer_Feature_Disabled(t *testing.T) { } func TestServer_ListWebKeys(t *testing.T) { - instance, iamCtx := createInstance(t, true) + instance, iamCtx, creationDate := createInstance(t, true) // After the feature is first enabled, we can expect 2 generated keys with the default config. checkWebKeyListState(iamCtx, t, instance, 2, "", &webkey.WebKey_Rsa{ Rsa: &webkey.WebKeyRSAConfig{ Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, }, - }) + }, creationDate) } func TestServer_CreateWebKey(t *testing.T) { - instance, iamCtx := createInstance(t, true) + instance, iamCtx, creationDate := createInstance(t, true) client := instance.Client.WebKeyV3Alpha _, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{ @@ -93,11 +93,11 @@ func TestServer_CreateWebKey(t *testing.T) { Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, }, - }) + }, creationDate) } func TestServer_ActivateWebKey(t *testing.T) { - instance, iamCtx := createInstance(t, true) + instance, iamCtx, creationDate := createInstance(t, true) client := instance.Client.WebKeyV3Alpha resp, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{ @@ -122,11 +122,11 @@ func TestServer_ActivateWebKey(t *testing.T) { Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, }, - }) + }, creationDate) } func TestServer_DeleteWebKey(t *testing.T) { - instance, iamCtx := createInstance(t, true) + instance, iamCtx, creationDate := createInstance(t, true) client := instance.Client.WebKeyV3Alpha keyIDs := make([]string, 2) @@ -178,11 +178,12 @@ func TestServer_DeleteWebKey(t *testing.T) { Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, }, - }) + }, creationDate) } -func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context) { +func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context, *timestamppb.Timestamp) { instance := integration.NewInstance(CTX) + creationDate := timestamppb.Now() iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) if enableFeature { @@ -203,7 +204,7 @@ func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, co } }, retryDuration, tick) - return instance, iamCTX + return instance, iamCTX, creationDate } func assertFeatureDisabledError(t *testing.T, err error) { @@ -214,7 +215,7 @@ func assertFeatureDisabledError(t *testing.T, err error) { assert.Contains(t, s.Message(), "WEBKEY-Ohx6E") } -func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any) { +func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any, creationDate *timestamppb.Timestamp) { retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) assert.EventuallyWithT(t, func(collect *assert.CollectT) { @@ -227,8 +228,8 @@ func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integrati var gotActiveKeyID string for _, key := range list { integration.AssertResourceDetails(t, &resource_object.Details{ - Created: timestamppb.Now(), - Changed: timestamppb.Now(), + Created: creationDate, + Changed: creationDate, Owner: &object.Owner{ Type: object.OwnerType_OWNER_TYPE_INSTANCE, Id: instance.ID(), diff --git a/internal/api/grpc/user/v2/integration_test/query_test.go b/internal/api/grpc/user/v2/integration_test/query_test.go index 4ee085336c..3d5b2d9416 100644 --- a/internal/api/grpc/user/v2/integration_test/query_test.go +++ b/internal/api/grpc/user/v2/integration_test/query_test.go @@ -190,7 +190,6 @@ func TestServer_GetUserByID(t *testing.T) { func TestServer_GetUserByID_Permission(t *testing.T) { t.Parallel() - timeNow := time.Now().UTC() newOrgOwnerEmail := gofakeit.Email() newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) newUserID := newOrg.CreatedAdmins[0].GetUserId() @@ -237,7 +236,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) { }, }, Details: &object.Details{ - ChangeDate: timestamppb.New(timeNow), + ChangeDate: timestamppb.Now(), ResourceOwner: newOrg.GetOrganizationId(), }, }, @@ -275,7 +274,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) { }, }, Details: &object.Details{ - ChangeDate: timestamppb.New(timeNow), + ChangeDate: timestamppb.Now(), ResourceOwner: newOrg.GetOrganizationId(), }, }, @@ -303,24 +302,29 @@ func TestServer_GetUserByID_Permission(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) + if tt.wantErr { + assert.Error(ttt, err) + return + } + if !assert.NoError(ttt, err) { + return + } - tt.want.User.UserId = tt.args.req.GetUserId() - tt.want.User.Username = newOrgOwnerEmail - tt.want.User.PreferredLoginName = newOrgOwnerEmail - tt.want.User.LoginNames = []string{newOrgOwnerEmail} - if human := tt.want.User.GetHuman(); human != nil { - human.Email.Email = newOrgOwnerEmail - } - // details tested in GetUserByID - tt.want.User.Details = got.User.GetDetails() + tt.want.User.UserId = tt.args.req.GetUserId() + tt.want.User.Username = newOrgOwnerEmail + tt.want.User.PreferredLoginName = newOrgOwnerEmail + tt.want.User.LoginNames = []string{newOrgOwnerEmail} + if human := tt.want.User.GetHuman(); human != nil { + human.Email.Email = newOrgOwnerEmail + } + // details tested in GetUserByID + tt.want.User.Details = got.User.GetDetails() - assert.Equal(t, tt.want.User, got.User) + assert.Equal(ttt, tt.want.User, got.User) + }, retryDuration, tick, "timeout waiting for expected user result") }) } } diff --git a/internal/api/grpc/user/v2/integration_test/user_test.go b/internal/api/grpc/user/v2/integration_test/user_test.go index 3ebe760d91..e4196c1a30 100644 --- a/internal/api/grpc/user/v2/integration_test/user_test.go +++ b/internal/api/grpc/user/v2/integration_test/user_test.go @@ -2447,7 +2447,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) { OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG, }) require.NoError(t, err) - idpLink, err := Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ + _, err = Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ IdpId: provider.GetId(), UserId: "external-id", UserName: "displayName", @@ -2639,25 +2639,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var got *user.ListAuthenticationMethodTypesResponse - var err error - - for { - got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) - if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) { - break + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) + require.NoError(ttt, err) + if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) { + return } - select { - case <-CTX.Done(): - t.Fatal(CTX.Err(), err) - case <-time.After(time.Second): - t.Log("retrying ListAuthenticationMethodTypes") - continue - } - } - require.NoError(t, err) - assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) - require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes()) + assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes()) + integration.AssertListDetails(ttt, tt.want, got) + }, retryDuration, tick, "timeout waiting for expected auth methods result") }) } } diff --git a/internal/api/grpc/user/v2beta/integration_test/user_test.go b/internal/api/grpc/user/v2beta/integration_test/user_test.go index 7b158f0d68..b5f0b16d20 100644 --- a/internal/api/grpc/user/v2beta/integration_test/user_test.go +++ b/internal/api/grpc/user/v2beta/integration_test/user_test.go @@ -2454,7 +2454,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) { OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG, }) require.NoError(t, err) - idpLink, err := Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ + _, err = Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ IdpId: provider.GetId(), UserId: "external-id", UserName: "displayName", @@ -2527,25 +2527,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var got *user.ListAuthenticationMethodTypesResponse - var err error - - for { - got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) - if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) { - break + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute) + require.EventuallyWithT(t, func(ttt *assert.CollectT) { + got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) + require.NoError(ttt, err) + if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) { + return } - select { - case <-CTX.Done(): - t.Fatal(CTX.Err(), err) - case <-time.After(time.Second): - t.Log("retrying ListAuthenticationMethodTypes") - continue - } - } - require.NoError(t, err) - assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) - require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes()) + assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes()) + integration.AssertListDetails(ttt, tt.want, got) + }, retryDuration, tick, "timeout waiting for expected auth methods result") }) } } diff --git a/internal/integration/assert.go b/internal/integration/assert.go index 3c5fadb373..8e875ee48e 100644 --- a/internal/integration/assert.go +++ b/internal/integration/assert.go @@ -80,7 +80,15 @@ func AssertResourceDetails(t assert.TestingT, expected *resources_object.Details gotCreatedDate := actual.GetCreated().AsTime() assert.WithinRange(t, gotCreatedDate, wantCreatedDate.Add(-time.Minute), wantCreatedDate.Add(time.Minute)) } - assert.Equal(t, expected.GetOwner(), actual.GetOwner()) + if expected.GetOwner() != nil { + expectedOwner := expected.GetOwner() + actualOwner := actual.GetOwner() + if !assert.NotNil(t, actualOwner) { + return + } + assert.Equal(t, expectedOwner.GetId(), actualOwner.GetId()) + assert.Equal(t, expectedOwner.GetType(), actualOwner.GetType()) + } assert.NotEmpty(t, actual.GetId()) if expected.GetId() != "" { assert.Equal(t, expected.GetId(), actual.GetId()) @@ -99,7 +107,7 @@ func AssertListDetails[L ListDetails, D ListDetailsMsg[L]](t assert.TestingT, ex if wantDetails.GetTimestamp() != nil { gotCD := gotDetails.GetTimestamp().AsTime() wantCD := time.Now() - assert.WithinRange(t, gotCD, wantCD.Add(-10*time.Minute), wantCD.Add(time.Minute)) + assert.WithinRange(t, gotCD, wantCD.Add(-1*time.Minute), wantCD.Add(time.Minute)) } } diff --git a/internal/integration/oidc.go b/internal/integration/oidc.go index f6d779de95..3186ccdaa6 100644 --- a/internal/integration/oidc.go +++ b/internal/integration/oidc.go @@ -52,7 +52,13 @@ func (i *Instance) CreateOIDCClient(ctx context.Context, redirectURI, logoutRedi return nil, err } return resp, await(func() error { - _, err := i.Client.Mgmt.GetAppByID(ctx, &management.GetAppByIDRequest{ + _, err := i.Client.Mgmt.GetProjectByID(ctx, &management.GetProjectByIDRequest{ + Id: projectID, + }) + if err != nil { + return err + } + _, err = i.Client.Mgmt.GetAppByID(ctx, &management.GetAppByIDRequest{ ProjectId: projectID, AppId: resp.GetAppId(), }) @@ -152,7 +158,13 @@ func (i *Instance) CreateOIDCImplicitFlowClient(ctx context.Context, redirectURI return nil, err } return resp, await(func() error { - _, err := i.Client.Mgmt.GetAppByID(ctx, &management.GetAppByIDRequest{ + _, err := i.Client.Mgmt.GetProjectByID(ctx, &management.GetProjectByIDRequest{ + Id: project.GetId(), + }) + if err != nil { + return err + } + _, err = i.Client.Mgmt.GetAppByID(ctx, &management.GetAppByIDRequest{ ProjectId: project.GetId(), AppId: resp.GetAppId(), }) From 4eeb2be36adc1d1e24bca59a339fa0afd6449514 Mon Sep 17 00:00:00 2001 From: Mostafa Galal <77402549+MostafaGalal1@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:44:15 +0300 Subject: [PATCH 14/30] fix: Negative values allowed by spinners of Login Lifetimes inputs (#8694) # Which Problems Are Solved Previously, the login lifetime input fields allowed negative values and, in some cases, zero values, which were not valid according to the business rules. # How the Problems Are Solved The issue was resolved by adding min and step properties to the relevant HTML input fields. This ensures that only valid values are entered, adhering to the specific requirements for each field. Co-authored-by: Max Peintner --- .../login-policy/login-policy.component.html | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/console/src/app/modules/policies/login-policy/login-policy.component.html b/console/src/app/modules/policies/login-policy/login-policy.component.html index 622fd84b81..4a059c53fc 100644 --- a/console/src/app/modules/policies/login-policy/login-policy.component.html +++ b/console/src/app/modules/policies/login-policy/login-policy.component.html @@ -155,7 +155,7 @@ > {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} - + {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} - + {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} - + {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} - + {{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }} - + From 0b5079c11fef69c1fe9e37ac602127bec8ac13dd Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Fri, 25 Oct 2024 17:51:44 +0200 Subject: [PATCH 15/30] fix: correctly search for verified domain (#8820) # Which Problems Are Solved Searching orgs by domain currently only looked for the primary domain, but should be possible with all verified domains (as documented) # How the Problems Are Solved - fixed the search query # Additional Changes None # Additional Context - closes https://github.com/zitadel/zitadel/issues/8749 --- internal/api/grpc/org/converter.go | 2 +- .../org/v2/integration_test/query_test.go | 41 +++++++++++++++++++ internal/api/grpc/org/v2/query.go | 2 +- internal/query/org.go | 20 ++++++++- 4 files changed, 61 insertions(+), 4 deletions(-) diff --git a/internal/api/grpc/org/converter.go b/internal/api/grpc/org/converter.go index 43fc1c18dc..fa91599c03 100644 --- a/internal/api/grpc/org/converter.go +++ b/internal/api/grpc/org/converter.go @@ -22,7 +22,7 @@ func OrgQueriesToModel(queries []*org_pb.OrgQuery) (_ []query.SearchQuery, err e func OrgQueryToModel(apiQuery *org_pb.OrgQuery) (query.SearchQuery, error) { switch q := apiQuery.Query.(type) { case *org_pb.OrgQuery_DomainQuery: - return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) + return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) case *org_pb.OrgQuery_NameQuery: return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name) case *org_pb.OrgQuery_StateQuery: diff --git a/internal/api/grpc/org/v2/integration_test/query_test.go b/internal/api/grpc/org/v2/integration_test/query_test.go index e476b4e60d..bd0352ed75 100644 --- a/internal/api/grpc/org/v2/integration_test/query_test.go +++ b/internal/api/grpc/org/v2/integration_test/query_test.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/zitadel/zitadel/internal/integration" + "github.com/zitadel/zitadel/pkg/grpc/management" "github.com/zitadel/zitadel/pkg/grpc/object/v2" "github.com/zitadel/zitadel/pkg/grpc/org/v2" ) @@ -214,6 +215,46 @@ func TestServer_ListOrganizations(t *testing.T) { }, }, }, + { + name: "list org by domain (non primary), ok", + args: args{ + CTX, + &org.ListOrganizationsRequest{}, + func(ctx context.Context, request *org.ListOrganizationsRequest) ([]orgAttr, error) { + orgs := make([]orgAttr, 1) + name := fmt.Sprintf("ListOrgs-%s", gofakeit.AppName()) + orgResp := Instance.CreateOrganization(ctx, name, gofakeit.Email()) + orgs[0] = orgAttr{ + ID: orgResp.GetOrganizationId(), + Name: name, + Details: orgResp.GetDetails(), + } + domain := gofakeit.DomainName() + _, err := Instance.Client.Mgmt.AddOrgDomain(integration.SetOrgID(ctx, orgResp.GetOrganizationId()), &management.AddOrgDomainRequest{ + Domain: domain, + }) + if err != nil { + return nil, err + } + request.Queries = []*org.SearchQuery{ + OrganizationDomainQuery(domain), + } + return orgs, nil + }, + }, + want: &org.ListOrganizationsResponse{ + Details: &object.ListDetails{ + TotalResult: 1, + Timestamp: timestamppb.Now(), + }, + SortingColumn: 0, + Result: []*org.Organization{ + { + State: org.OrganizationState_ORGANIZATION_STATE_ACTIVE, + }, + }, + }, + }, { name: "list org by inactive state, ok", args: args{ diff --git a/internal/api/grpc/org/v2/query.go b/internal/api/grpc/org/v2/query.go index 6c2919c5b8..f07fb71d20 100644 --- a/internal/api/grpc/org/v2/query.go +++ b/internal/api/grpc/org/v2/query.go @@ -57,7 +57,7 @@ func orgQueriesToQuery(ctx context.Context, queries []*org.SearchQuery) (_ []que func orgQueryToQuery(ctx context.Context, orgQuery *org.SearchQuery) (query.SearchQuery, error) { switch q := orgQuery.Query.(type) { case *org.SearchQuery_DomainQuery: - return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) + return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) case *org.SearchQuery_NameQuery: return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name) case *org.SearchQuery_StateQuery: diff --git a/internal/query/org.go b/internal/query/org.go index 50e2d4dbde..1c20255171 100644 --- a/internal/query/org.go +++ b/internal/query/org.go @@ -308,8 +308,24 @@ func NewOrgIDSearchQuery(value string) (SearchQuery, error) { return NewTextQuery(OrgColumnID, value, TextEquals) } -func NewOrgDomainSearchQuery(method TextComparison, value string) (SearchQuery, error) { - return NewTextQuery(OrgColumnDomain, value, method) +func NewOrgVerifiedDomainSearchQuery(method TextComparison, value string) (SearchQuery, error) { + domainQuery, err := NewTextQuery(OrgDomainDomainCol, value, method) + if err != nil { + return nil, err + } + verifiedQuery, err := NewBoolQuery(OrgDomainIsVerifiedCol, true) + if err != nil { + return nil, err + } + subSelect, err := NewSubSelect(OrgDomainOrgIDCol, []SearchQuery{domainQuery, verifiedQuery}) + if err != nil { + return nil, err + } + return NewListQuery( + OrgColumnID, + subSelect, + ListIn, + ) } func NewOrgNameSearchQuery(method TextComparison, value string) (SearchQuery, error) { From 54f1c0bc50d9b547670871f9b44fb3d5ab1af186 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:12:27 +0000 Subject: [PATCH 16/30] chore(deps): bump http-proxy-middleware from 2.0.6 to 2.0.7 in /docs (#8821) Bumps [http-proxy-middleware](https://github.com/chimurai/http-proxy-middleware) from 2.0.6 to 2.0.7.
Release notes

Sourced from http-proxy-middleware's releases.

v2.0.7

Full Changelog: https://github.com/chimurai/http-proxy-middleware/compare/v2.0.6...v2.0.7

v2.0.7-beta.1

Full Changelog: https://github.com/chimurai/http-proxy-middleware/compare/v2.0.7-beta.0...v2.0.7-beta.1

v2.0.7-beta.0

Full Changelog: https://github.com/chimurai/http-proxy-middleware/compare/v2.0.6...v2.0.7-beta.0

Changelog

Sourced from http-proxy-middleware's changelog.

v2.0.7

  • ci(github actions): add publish.yml
  • fix(filter): handle errors
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=http-proxy-middleware&package-manager=npm_and_yarn&previous-version=2.0.6&new-version=2.0.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/zitadel/zitadel/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Florian Forster --- docs/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/yarn.lock b/docs/yarn.lock index d799eddad4..538212d391 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -6068,9 +6068,9 @@ http-parser-js@>=0.5.1: integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q== http-proxy-middleware@^2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" - integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== + version "2.0.7" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6" + integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA== dependencies: "@types/http-proxy" "^1.17.8" http-proxy "^1.18.1" From 32bad3feb3e260444d385c009b3898eb98060448 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20M=C3=B6hlmann?= Date: Mon, 28 Oct 2024 09:29:34 +0100 Subject: [PATCH 17/30] perf(milestones): refactor (#8788) # Which Problems Are Solved Milestones used existing events from a number of aggregates. OIDC session is one of them. We noticed in load-tests that the reduction of the oidc_session.added event into the milestone projection is a costly business with payload based conditionals. A milestone is reached once, but even then we remain subscribed to the OIDC events. This requires the projections.current_states to be updated continuously. # How the Problems Are Solved The milestone creation is refactored to use dedicated events instead. The command side decides when a milestone is reached and creates the reached event once for each milestone when required. # Additional Changes In order to prevent reached milestones being created twice, a migration script is provided. When the old `projections.milestones` table exist, the state is read from there and `v2` milestone aggregate events are created, with the original reached and pushed dates. # Additional Context - Closes https://github.com/zitadel/zitadel/issues/8800 --- cmd/mirror/projections.go | 1 + cmd/setup/03.go | 1 + cmd/setup/36.go | 118 ++++ cmd/setup/36.sql | 4 + cmd/setup/config.go | 1 + cmd/setup/config_change.go | 1 + cmd/setup/setup.go | 3 + cmd/start/start.go | 1 + internal/api/authz/instance.go | 10 +- internal/cache/cache.go | 4 +- internal/command/cache.go | 82 +++ internal/command/command.go | 13 + internal/command/instance.go | 21 +- internal/command/instance_test.go | 29 +- internal/command/milestone.go | 170 ++++- internal/command/milestone_model.go | 58 ++ internal/command/milestone_test.go | 629 ++++++++++++++++++ internal/command/oidc_session.go | 34 +- internal/command/oidc_session_test.go | 22 +- internal/command/project.go | 24 +- internal/command/project_application_oidc.go | 5 + .../command/project_application_oidc_test.go | 16 +- internal/command/project_application_saml.go | 5 + .../command/project_application_saml_test.go | 21 +- internal/command/project_test.go | 14 +- internal/eventstore/eventstore.go | 7 + internal/eventstore/eventstore_test.go | 5 + .../repository/mock/repository.mock.go | 15 + internal/eventstore/repository/sql/crdb.go | 2 +- internal/eventstore/repository/sql/query.go | 4 +- internal/notification/handlers/commands.go | 2 +- .../integration_test/telemetry_pusher_test.go | 27 +- .../handlers/mock/commands.mock.go | 2 +- .../notification/handlers/telemetry_pusher.go | 69 +- internal/query/milestone.go | 12 +- internal/query/milestone_test.go | 13 +- internal/query/projection/event_test.go | 18 +- internal/query/projection/milestones.go | 252 +------ internal/query/projection/milestones_test.go | 327 ++------- internal/query/projection/projection.go | 2 +- internal/repository/milestone/aggregate.go | 11 +- internal/repository/milestone/events.go | 106 ++- internal/repository/milestone/eventstore.go | 6 + internal/repository/milestone/type.go | 59 -- internal/repository/milestone/type_enumer.go | 112 ++++ internal/repository/milestone/type_string.go | 30 - 46 files changed, 1612 insertions(+), 756 deletions(-) create mode 100644 cmd/setup/36.go create mode 100644 cmd/setup/36.sql create mode 100644 internal/command/cache.go create mode 100644 internal/command/milestone_model.go create mode 100644 internal/command/milestone_test.go delete mode 100644 internal/repository/milestone/type.go create mode 100644 internal/repository/milestone/type_enumer.go delete mode 100644 internal/repository/milestone/type_string.go diff --git a/cmd/mirror/projections.go b/cmd/mirror/projections.go index c7b83c9d3a..442609d12a 100644 --- a/cmd/mirror/projections.go +++ b/cmd/mirror/projections.go @@ -163,6 +163,7 @@ func projections( } commands, err := command.StartCommands( es, + config.Caches, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, staticStorage, diff --git a/cmd/setup/03.go b/cmd/setup/03.go index 4860ae3eec..4311418388 100644 --- a/cmd/setup/03.go +++ b/cmd/setup/03.go @@ -65,6 +65,7 @@ func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error } cmd, err := command.StartCommands(mig.es, + nil, mig.defaults, mig.zitadelRoles, nil, diff --git a/cmd/setup/36.go b/cmd/setup/36.go new file mode 100644 index 0000000000..3ccab4992a --- /dev/null +++ b/cmd/setup/36.go @@ -0,0 +1,118 @@ +package setup + +import ( + "context" + _ "embed" + "errors" + "fmt" + "slices" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/command" + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/repository/milestone" +) + +var ( + //go:embed 36.sql + getProjectedMilestones string +) + +type FillV2Milestones struct { + dbClient *database.DB + eventstore *eventstore.Eventstore +} + +type instanceMilestone struct { + Type milestone.Type + Reached time.Time + Pushed *time.Time +} + +func (mig *FillV2Milestones) Execute(ctx context.Context, _ eventstore.Event) error { + im, err := mig.getProjectedMilestones(ctx) + if err != nil { + return err + } + return mig.pushEventsByInstance(ctx, im) +} + +func (mig *FillV2Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) { + type row struct { + InstanceID string + Type milestone.Type + Reached time.Time + Pushed *time.Time + } + + rows, _ := mig.dbClient.Pool.Query(ctx, getProjectedMilestones) + scanned, err := pgx.CollectRows(rows, pgx.RowToStructByPos[row]) + var pgError *pgconn.PgError + // catch ERROR: relation "projections.milestones" does not exist + if errors.As(err, &pgError) && pgError.SQLState() == "42P01" { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("milestones get: %w", err) + } + milestoneMap := make(map[string][]instanceMilestone) + for _, s := range scanned { + milestoneMap[s.InstanceID] = append(milestoneMap[s.InstanceID], instanceMilestone{ + Type: s.Type, + Reached: s.Reached, + Pushed: s.Pushed, + }) + } + return milestoneMap, nil +} + +// pushEventsByInstance creates the v2 milestone events by instance. +// This prevents we will try to push 6*N(instance) events in one push. +func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error { + // keep a deterministic order by instance ID. + order := make([]string, 0, len(milestoneMap)) + for k := range milestoneMap { + order = append(order, k) + } + slices.Sort(order) + + for _, instanceID := range order { + logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("filter existing milestone events") + + // because each Push runs in a separate TX, we need to make sure that events + // from a partially executed migration are pushed again. + model := command.NewMilestonesReachedWriteModel(instanceID) + if err := mig.eventstore.FilterToQueryReducer(ctx, model); err != nil { + return fmt.Errorf("milestones filter: %w", err) + } + if model.InstanceCreated { + logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("milestone events already migrated") + continue // This instance was migrated, skip + } + logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("push milestone events") + + aggregate := milestone.NewInstanceAggregate(instanceID) + + cmds := make([]eventstore.Command, 0, len(milestoneMap[instanceID])*2) + for _, m := range milestoneMap[instanceID] { + cmds = append(cmds, milestone.NewReachedEventWithDate(ctx, aggregate, m.Type, &m.Reached)) + if m.Pushed != nil { + cmds = append(cmds, milestone.NewPushedEventWithDate(ctx, aggregate, m.Type, nil, "", m.Pushed)) + } + } + + if _, err := mig.eventstore.Push(ctx, cmds...); err != nil { + return fmt.Errorf("milestones push: %w", err) + } + } + return nil +} + +func (mig *FillV2Milestones) String() string { + return "36_fill_v2_milestones" +} diff --git a/cmd/setup/36.sql b/cmd/setup/36.sql new file mode 100644 index 0000000000..03215388d2 --- /dev/null +++ b/cmd/setup/36.sql @@ -0,0 +1,4 @@ +SELECT instance_id, type, reached_date, last_pushed_date +FROM projections.milestones +WHERE reached_date IS NOT NULL +ORDER BY instance_id, reached_date; diff --git a/cmd/setup/config.go b/cmd/setup/config.go index d75c9bbd3c..7a5beebcfe 100644 --- a/cmd/setup/config.go +++ b/cmd/setup/config.go @@ -122,6 +122,7 @@ type Steps struct { s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid s34AddCacheSchema *AddCacheSchema s35AddPositionToIndexEsWm *AddPositionToIndexEsWm + s36FillV2Milestones *FillV2Milestones } func MustNewSteps(v *viper.Viper) *Steps { diff --git a/cmd/setup/config_change.go b/cmd/setup/config_change.go index a7d7c5a463..08f0c3c3d6 100644 --- a/cmd/setup/config_change.go +++ b/cmd/setup/config_change.go @@ -33,6 +33,7 @@ func (mig *externalConfigChange) Check(lastRun map[string]interface{}) bool { func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error { cmd, err := command.StartCommands( mig.es, + nil, mig.defaults, nil, nil, diff --git a/cmd/setup/setup.go b/cmd/setup/setup.go index d1cfac6397..e24b69d5b6 100644 --- a/cmd/setup/setup.go +++ b/cmd/setup/setup.go @@ -165,6 +165,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient} steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient} steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient} + steps.s36FillV2Milestones = &FillV2Milestones{dbClient: queryDBClient, eventstore: eventstoreClient} err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil) logging.OnError(err).Fatal("unable to start projections") @@ -209,6 +210,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s30FillFieldsForOrgDomainVerified, steps.s34AddCacheSchema, steps.s35AddPositionToIndexEsWm, + steps.s36FillV2Milestones, } { mustExecuteMigration(ctx, eventstoreClient, step, "migration failed") } @@ -390,6 +392,7 @@ func initProjections( } commands, err := command.StartCommands( eventstoreClient, + config.Caches, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, staticStorage, diff --git a/cmd/start/start.go b/cmd/start/start.go index 991e4df592..97a38ba50d 100644 --- a/cmd/start/start.go +++ b/cmd/start/start.go @@ -224,6 +224,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server } commands, err := command.StartCommands( eventstoreClient, + config.Caches, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, storage, diff --git a/internal/api/authz/instance.go b/internal/api/authz/instance.go index 8721a75f3a..7ee8d605ca 100644 --- a/internal/api/authz/instance.go +++ b/internal/api/authz/instance.go @@ -114,7 +114,15 @@ func WithConsole(ctx context.Context, projectID, appID string) context.Context { i.projectID = projectID i.appID = appID - //i.clientID = clientID + return context.WithValue(ctx, instanceKey, i) +} + +func WithConsoleClientID(ctx context.Context, clientID string) context.Context { + i, ok := ctx.Value(instanceKey).(*instance) + if !ok { + i = new(instance) + } + i.clientID = clientID return context.WithValue(ctx, instanceKey, i) } diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 4957e63179..c6d01b928e 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -6,6 +6,7 @@ import ( "time" "github.com/zitadel/logging" + "github.com/zitadel/zitadel/internal/database/postgres" ) @@ -77,7 +78,8 @@ type CachesConfig struct { Postgres PostgresConnectorConfig // Redis redis.Config? } - Instance *CacheConfig + Instance *CacheConfig + Milestones *CacheConfig } type CacheConfig struct { diff --git a/internal/command/cache.go b/internal/command/cache.go new file mode 100644 index 0000000000..bf976bd2d7 --- /dev/null +++ b/internal/command/cache.go @@ -0,0 +1,82 @@ +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/gomap" + "github.com/zitadel/zitadel/internal/cache/noop" + "github.com/zitadel/zitadel/internal/cache/pg" + "github.com/zitadel/zitadel/internal/database" +) + +type Caches struct { + connectors *cacheConnectors + milestones cache.Cache[milestoneIndex, string, *MilestonesReached] +} + +func startCaches(background context.Context, conf *cache.CachesConfig, client *database.DB) (_ *Caches, err error) { + caches := &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + } + if conf == nil { + return caches, nil + } + caches.connectors, err = startCacheConnectors(background, conf, client) + if err != nil { + return nil, err + } + caches.milestones, err = startCache[milestoneIndex, string, *MilestonesReached](background, []milestoneIndex{milestoneIndexInstanceID}, "milestones", conf.Instance, caches.connectors) + if err != nil { + return nil, err + } + return caches, nil +} + +type cacheConnectors struct { + memory *cache.AutoPruneConfig + postgres *pgxPoolCacheConnector +} + +type pgxPoolCacheConnector struct { + *cache.AutoPruneConfig + client *database.DB +} + +func startCacheConnectors(_ context.Context, conf *cache.CachesConfig, client *database.DB) (_ *cacheConnectors, err error) { + connectors := new(cacheConnectors) + if conf.Connectors.Memory.Enabled { + connectors.memory = &conf.Connectors.Memory.AutoPrune + } + if conf.Connectors.Postgres.Enabled { + connectors.postgres = &pgxPoolCacheConnector{ + AutoPruneConfig: &conf.Connectors.Postgres.AutoPrune, + client: client, + } + } + return connectors, nil +} + +func startCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, name string, conf *cache.CacheConfig, connectors *cacheConnectors) (cache.Cache[I, K, V], error) { + if conf == nil || conf.Connector == "" { + return noop.NewCache[I, K, V](), nil + } + if strings.EqualFold(conf.Connector, "memory") && connectors.memory != nil { + c := gomap.NewCache[I, K, V](background, indices, *conf) + connectors.memory.StartAutoPrune(background, c, name) + return c, nil + } + if strings.EqualFold(conf.Connector, "postgres") && connectors.postgres != nil { + client := connectors.postgres.client + c, err := pg.NewCache[I, K, V](background, name, *conf, indices, client.Pool, client.Type()) + if err != nil { + return nil, fmt.Errorf("query start cache: %w", err) + } + connectors.postgres.StartAutoPrune(background, c, name) + return c, nil + } + + return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector) +} diff --git a/internal/command/command.go b/internal/command/command.go index 4ee8310525..7c56f05b86 100644 --- a/internal/command/command.go +++ b/internal/command/command.go @@ -18,6 +18,7 @@ import ( "github.com/zitadel/zitadel/internal/api/authz" api_http "github.com/zitadel/zitadel/internal/api/http" + "github.com/zitadel/zitadel/internal/cache" "github.com/zitadel/zitadel/internal/command/preparation" sd "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/crypto" @@ -88,10 +89,17 @@ type Commands struct { EventGroupExisting func(group string) bool GenerateDomain func(instanceName, domain string) (string, error) + + caches *Caches + // Store instance IDs where all milestones are reached (except InstanceDeleted). + // These instance's milestones never need to be invalidated, + // so the query and cache overhead can completely eliminated. + milestonesCompleted sync.Map } func StartCommands( es *eventstore.Eventstore, + cachesConfig *cache.CachesConfig, defaults sd.SystemDefaults, zitadelRoles []authz.RoleMapping, staticStore static.Storage, @@ -123,6 +131,10 @@ func StartCommands( if err != nil { return nil, fmt.Errorf("password hasher: %w", err) } + caches, err := startCaches(context.TODO(), cachesConfig, es.Client()) + if err != nil { + return nil, fmt.Errorf("caches: %w", err) + } repo = &Commands{ eventstore: es, static: staticStore, @@ -176,6 +188,7 @@ func StartCommands( }, }, GenerateDomain: domain.NewGeneratedInstanceDomain, + caches: caches, } if defaultSecretGenerators != nil && defaultSecretGenerators.ClientSecret != nil { diff --git a/internal/command/instance.go b/internal/command/instance.go index f220c0c961..3491aaf4a2 100644 --- a/internal/command/instance.go +++ b/internal/command/instance.go @@ -6,6 +6,7 @@ import ( "golang.org/x/text/language" + "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/http" "github.com/zitadel/zitadel/internal/command/preparation" @@ -17,6 +18,7 @@ import ( "github.com/zitadel/zitadel/internal/notification/channels/smtp" "github.com/zitadel/zitadel/internal/repository/instance" "github.com/zitadel/zitadel/internal/repository/limits" + "github.com/zitadel/zitadel/internal/repository/milestone" "github.com/zitadel/zitadel/internal/repository/org" "github.com/zitadel/zitadel/internal/repository/project" "github.com/zitadel/zitadel/internal/repository/quota" @@ -292,7 +294,7 @@ func setUpInstance(ctx context.Context, c *Commands, setup *InstanceSetup) (vali setupFeatures(&validations, setup.Features, setup.zitadel.instanceID) setupLimits(c, &validations, limits.NewAggregate(setup.zitadel.limitsID, setup.zitadel.instanceID), setup.Limits) setupRestrictions(c, &validations, restrictions.NewAggregate(setup.zitadel.restrictionsID, setup.zitadel.instanceID, setup.zitadel.instanceID), setup.Restrictions) - + setupInstanceCreatedMilestone(&validations, setup.zitadel.instanceID) return validations, pat, machineKey, nil } @@ -890,7 +892,8 @@ func (c *Commands) RemoveInstance(ctx context.Context, id string) (*domain.Objec if err != nil { return nil, err } - + err = c.caches.milestones.Invalidate(ctx, milestoneIndexInstanceID, id) + logging.OnError(err).Error("milestone invalidate") return &domain.ObjectDetails{ Sequence: events[len(events)-1].Sequence(), EventDate: events[len(events)-1].CreatedAt(), @@ -908,10 +911,16 @@ func (c *Commands) prepareRemoveInstance(a *instance.Aggregate) preparation.Vali if !writeModel.State.Exists() { return nil, zerrors.ThrowNotFound(err, "COMMA-AE3GS", "Errors.Instance.NotFound") } - return []eventstore.Command{instance.NewInstanceRemovedEvent(ctx, - &a.Aggregate, - writeModel.Name, - writeModel.Domains)}, + milestoneAggregate := milestone.NewInstanceAggregate(a.ID) + return []eventstore.Command{ + instance.NewInstanceRemovedEvent(ctx, + &a.Aggregate, + writeModel.Name, + writeModel.Domains), + milestone.NewReachedEvent(ctx, + milestoneAggregate, + milestone.InstanceDeleted), + }, nil }, nil } diff --git a/internal/command/instance_test.go b/internal/command/instance_test.go index b35e226383..af32ea538d 100644 --- a/internal/command/instance_test.go +++ b/internal/command/instance_test.go @@ -13,6 +13,7 @@ import ( "golang.org/x/text/language" "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/cache/noop" "github.com/zitadel/zitadel/internal/command/preparation" "github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/domain" @@ -20,6 +21,7 @@ import ( "github.com/zitadel/zitadel/internal/id" id_mock "github.com/zitadel/zitadel/internal/id/mock" "github.com/zitadel/zitadel/internal/repository/instance" + "github.com/zitadel/zitadel/internal/repository/milestone" "github.com/zitadel/zitadel/internal/repository/org" "github.com/zitadel/zitadel/internal/repository/project" "github.com/zitadel/zitadel/internal/repository/user" @@ -372,6 +374,7 @@ func setupInstanceEvents(ctx context.Context, instanceID, orgID, projectID, appI setupInstanceElementsEvents(ctx, instanceID, instanceName, defaultLanguage), orgEvents(ctx, instanceID, orgID, orgName, projectID, domain, externalSecure, true, true), generatedDomainEvents(ctx, instanceID, orgID, projectID, appID, domain), + instanceCreatedMilestoneEvent(ctx, instanceID), ) } @@ -401,6 +404,12 @@ func generatedDomainEvents(ctx context.Context, instanceID, orgID, projectID, ap } } +func instanceCreatedMilestoneEvent(ctx context.Context, instanceID string) []eventstore.Command { + return []eventstore.Command{ + milestone.NewReachedEvent(ctx, milestone.NewInstanceAggregate(instanceID), milestone.InstanceCreated), + } +} + func generatedDomainFilters(instanceID, orgID, projectID, appID, generatedDomain string) []expect { return []expect{ expectFilter(), @@ -1378,7 +1387,7 @@ func TestCommandSide_UpdateInstance(t *testing.T) { func TestCommandSide_RemoveInstance(t *testing.T) { type fields struct { - eventstore *eventstore.Eventstore + eventstore func(t *testing.T) *eventstore.Eventstore } type args struct { ctx context.Context @@ -1397,8 +1406,7 @@ func TestCommandSide_RemoveInstance(t *testing.T) { { name: "instance not existing, not found error", fields: fields{ - eventstore: eventstoreExpect( - t, + eventstore: expectEventstore( expectFilter(), ), }, @@ -1413,8 +1421,7 @@ func TestCommandSide_RemoveInstance(t *testing.T) { { name: "instance removed, not found error", fields: fields{ - eventstore: eventstoreExpect( - t, + eventstore: expectEventstore( expectFilter( eventFromEventPusher( instance.NewInstanceAddedEvent( @@ -1444,8 +1451,7 @@ func TestCommandSide_RemoveInstance(t *testing.T) { { name: "instance remove, ok", fields: fields{ - eventstore: eventstoreExpect( - t, + eventstore: expectEventstore( expectFilter( eventFromEventPusherWithInstanceID( "INSTANCE", @@ -1480,6 +1486,10 @@ func TestCommandSide_RemoveInstance(t *testing.T) { "custom.domain", }, ), + milestone.NewReachedEvent(context.Background(), + milestone.NewInstanceAggregate("INSTANCE"), + milestone.InstanceDeleted, + ), ), ), }, @@ -1497,7 +1507,10 @@ func TestCommandSide_RemoveInstance(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &Commands{ - eventstore: tt.fields.eventstore, + eventstore: tt.fields.eventstore(t), + caches: &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + }, } got, err := r.RemoveInstance(tt.args.ctx, tt.args.instanceID) if tt.res.err == nil { diff --git a/internal/command/milestone.go b/internal/command/milestone.go index f01ec6d158..11e6e5ab7f 100644 --- a/internal/command/milestone.go +++ b/internal/command/milestone.go @@ -3,20 +3,176 @@ package command import ( "context" + "github.com/zitadel/logging" + "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/command/preparation" + "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/repository/milestone" ) -// MilestonePushed writes a new milestone.PushedEvent with a new milestone.Aggregate to the eventstore +type milestoneIndex int + +const ( + milestoneIndexInstanceID milestoneIndex = iota +) + +type MilestonesReached struct { + InstanceID string + InstanceCreated bool + AuthenticationSucceededOnInstance bool + ProjectCreated bool + ApplicationCreated bool + AuthenticationSucceededOnApplication bool + InstanceDeleted bool +} + +// complete returns true if all milestones except InstanceDeleted are reached. +func (m *MilestonesReached) complete() bool { + return m.InstanceCreated && + m.AuthenticationSucceededOnInstance && + m.ProjectCreated && + m.ApplicationCreated && + m.AuthenticationSucceededOnApplication +} + +// GetMilestonesReached finds the milestone state for the current instance. +func (c *Commands) GetMilestonesReached(ctx context.Context) (*MilestonesReached, error) { + milestones, ok := c.getCachedMilestonesReached(ctx) + if ok { + return milestones, nil + } + model := NewMilestonesReachedWriteModel(authz.GetInstance(ctx).InstanceID()) + if err := c.eventstore.FilterToQueryReducer(ctx, model); err != nil { + return nil, err + } + milestones = &model.MilestonesReached + c.setCachedMilestonesReached(ctx, milestones) + return milestones, nil +} + +// getCachedMilestonesReached checks for milestone completeness on an instance and returns a filled +// [MilestonesReached] object. +// Otherwise it looks for the object in the milestone cache. +func (c *Commands) getCachedMilestonesReached(ctx context.Context) (*MilestonesReached, bool) { + instanceID := authz.GetInstance(ctx).InstanceID() + if _, ok := c.milestonesCompleted.Load(instanceID); ok { + return &MilestonesReached{ + InstanceID: instanceID, + InstanceCreated: true, + AuthenticationSucceededOnInstance: true, + ProjectCreated: true, + ApplicationCreated: true, + AuthenticationSucceededOnApplication: true, + InstanceDeleted: false, + }, ok + } + return c.caches.milestones.Get(ctx, milestoneIndexInstanceID, instanceID) +} + +// setCachedMilestonesReached stores the current milestones state in the milestones cache. +// If the milestones are complete, the instance ID is stored in milestonesCompleted instead. +func (c *Commands) setCachedMilestonesReached(ctx context.Context, milestones *MilestonesReached) { + if milestones.complete() { + c.milestonesCompleted.Store(milestones.InstanceID, struct{}{}) + return + } + c.caches.milestones.Set(ctx, milestones) +} + +// Keys implements cache.Entry +func (c *MilestonesReached) Keys(i milestoneIndex) []string { + if i == milestoneIndexInstanceID { + return []string{c.InstanceID} + } + return nil +} + +// MilestonePushed writes a new milestone.PushedEvent with the milestone.Aggregate to the eventstore func (c *Commands) MilestonePushed( ctx context.Context, + instanceID string, msType milestone.Type, endpoints []string, - primaryDomain string, ) error { - id, err := c.idGenerator.Next() - if err != nil { - return err - } - _, err = c.eventstore.Push(ctx, milestone.NewPushedEvent(ctx, milestone.NewAggregate(ctx, id), msType, endpoints, primaryDomain, c.externalDomain)) + _, err := c.eventstore.Push(ctx, milestone.NewPushedEvent(ctx, milestone.NewInstanceAggregate(instanceID), msType, endpoints, c.externalDomain)) return err } + +func setupInstanceCreatedMilestone(validations *[]preparation.Validation, instanceID string) { + *validations = append(*validations, func() (preparation.CreateCommands, error) { + return func(ctx context.Context, _ preparation.FilterToQueryReducer) ([]eventstore.Command, error) { + return []eventstore.Command{ + milestone.NewReachedEvent(ctx, milestone.NewInstanceAggregate(instanceID), milestone.InstanceCreated), + }, nil + }, nil + }) +} + +func (s *OIDCSessionEvents) SetMilestones(ctx context.Context, clientID string, isHuman bool) (postCommit func(ctx context.Context), err error) { + postCommit = func(ctx context.Context) {} + milestones, err := s.commands.GetMilestonesReached(ctx) + if err != nil { + return postCommit, err + } + + instance := authz.GetInstance(ctx) + aggregate := milestone.NewAggregate(ctx) + var invalidate bool + if !milestones.AuthenticationSucceededOnInstance { + s.events = append(s.events, milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)) + invalidate = true + } + if !milestones.AuthenticationSucceededOnApplication && isHuman && clientID != instance.ConsoleClientID() { + s.events = append(s.events, milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnApplication)) + invalidate = true + } + if invalidate { + postCommit = s.commands.invalidateMilestoneCachePostCommit(instance.InstanceID()) + } + return postCommit, nil +} + +func (c *Commands) projectCreatedMilestone(ctx context.Context, cmds *[]eventstore.Command) (postCommit func(ctx context.Context), err error) { + postCommit = func(ctx context.Context) {} + if isSystemUser(ctx) { + return postCommit, nil + } + milestones, err := c.GetMilestonesReached(ctx) + if err != nil { + return postCommit, err + } + if milestones.ProjectCreated { + return postCommit, nil + } + aggregate := milestone.NewAggregate(ctx) + *cmds = append(*cmds, milestone.NewReachedEvent(ctx, aggregate, milestone.ProjectCreated)) + return c.invalidateMilestoneCachePostCommit(aggregate.InstanceID), nil +} + +func (c *Commands) applicationCreatedMilestone(ctx context.Context, cmds *[]eventstore.Command) (postCommit func(ctx context.Context), err error) { + postCommit = func(ctx context.Context) {} + if isSystemUser(ctx) { + return postCommit, nil + } + milestones, err := c.GetMilestonesReached(ctx) + if err != nil { + return postCommit, err + } + if milestones.ApplicationCreated { + return postCommit, nil + } + aggregate := milestone.NewAggregate(ctx) + *cmds = append(*cmds, milestone.NewReachedEvent(ctx, aggregate, milestone.ApplicationCreated)) + return c.invalidateMilestoneCachePostCommit(aggregate.InstanceID), nil +} + +func (c *Commands) invalidateMilestoneCachePostCommit(instanceID string) func(ctx context.Context) { + return func(ctx context.Context) { + err := c.caches.milestones.Invalidate(ctx, milestoneIndexInstanceID, instanceID) + logging.WithFields("instance_id", instanceID).OnError(err).Error("failed to invalidate milestone cache") + } +} + +func isSystemUser(ctx context.Context) bool { + return authz.GetCtxData(ctx).SystemMemberships != nil +} diff --git a/internal/command/milestone_model.go b/internal/command/milestone_model.go new file mode 100644 index 0000000000..7d18b3a800 --- /dev/null +++ b/internal/command/milestone_model.go @@ -0,0 +1,58 @@ +package command + +import ( + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/repository/milestone" +) + +type MilestonesReachedWriteModel struct { + eventstore.WriteModel + MilestonesReached +} + +func NewMilestonesReachedWriteModel(instanceID string) *MilestonesReachedWriteModel { + return &MilestonesReachedWriteModel{ + WriteModel: eventstore.WriteModel{ + AggregateID: instanceID, + InstanceID: instanceID, + }, + MilestonesReached: MilestonesReached{ + InstanceID: instanceID, + }, + } +} + +func (m *MilestonesReachedWriteModel) Query() *eventstore.SearchQueryBuilder { + return eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent). + AddQuery(). + AggregateTypes(milestone.AggregateType). + AggregateIDs(m.AggregateID). + EventTypes(milestone.ReachedEventType, milestone.PushedEventType). + Builder() +} + +func (m *MilestonesReachedWriteModel) Reduce() error { + for _, event := range m.Events { + if e, ok := event.(*milestone.ReachedEvent); ok { + m.reduceReachedEvent(e) + } + } + return m.WriteModel.Reduce() +} + +func (m *MilestonesReachedWriteModel) reduceReachedEvent(e *milestone.ReachedEvent) { + switch e.MilestoneType { + case milestone.InstanceCreated: + m.InstanceCreated = true + case milestone.AuthenticationSucceededOnInstance: + m.AuthenticationSucceededOnInstance = true + case milestone.ProjectCreated: + m.ProjectCreated = true + case milestone.ApplicationCreated: + m.ApplicationCreated = true + case milestone.AuthenticationSucceededOnApplication: + m.AuthenticationSucceededOnApplication = true + case milestone.InstanceDeleted: + m.InstanceDeleted = true + } +} diff --git a/internal/command/milestone_test.go b/internal/command/milestone_test.go new file mode 100644 index 0000000000..819db9d098 --- /dev/null +++ b/internal/command/milestone_test.go @@ -0,0 +1,629 @@ +package command + +import ( + "context" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/gomap" + "github.com/zitadel/zitadel/internal/cache/noop" + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/repository/milestone" +) + +func TestCommands_GetMilestonesReached(t *testing.T) { + cached := &MilestonesReached{ + InstanceID: "cached-id", + InstanceCreated: true, + AuthenticationSucceededOnInstance: true, + } + + ctx := authz.WithInstanceID(context.Background(), "instanceID") + aggregate := milestone.NewAggregate(ctx) + + type fields struct { + eventstore func(*testing.T) *eventstore.Eventstore + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + want *MilestonesReached + wantErr error + }{ + { + name: "cached", + fields: fields{ + eventstore: expectEventstore(), + }, + args: args{ + ctx: authz.WithInstanceID(context.Background(), "cached-id"), + }, + want: cached, + }, + { + name: "filter error", + fields: fields{ + eventstore: expectEventstore( + expectFilterError(io.ErrClosedPipe), + ), + }, + args: args{ + ctx: ctx, + }, + wantErr: io.ErrClosedPipe, + }, + { + name: "no events, all false", + fields: fields{ + eventstore: expectEventstore( + expectFilter(), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + }, + }, + { + name: "instance created", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.InstanceCreated)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + InstanceCreated: true, + }, + }, + { + name: "instance auth", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + AuthenticationSucceededOnInstance: true, + }, + }, + { + name: "project created", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.ProjectCreated)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + ProjectCreated: true, + }, + }, + { + name: "app created", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.ApplicationCreated)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + ApplicationCreated: true, + }, + }, + { + name: "app auth", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnApplication)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + AuthenticationSucceededOnApplication: true, + }, + }, + { + name: "instance deleted", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.InstanceDeleted)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + want: &MilestonesReached{ + InstanceID: "instanceID", + InstanceDeleted: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := gomap.NewCache[milestoneIndex, string, *MilestonesReached]( + context.Background(), + []milestoneIndex{milestoneIndexInstanceID}, + cache.CacheConfig{Connector: "memory"}, + ) + cache.Set(context.Background(), cached) + + c := &Commands{ + eventstore: tt.fields.eventstore(t), + caches: &Caches{ + milestones: cache, + }, + } + got, err := c.GetMilestonesReached(tt.args.ctx) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestCommands_milestonesCompleted(t *testing.T) { + c := &Commands{ + caches: &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + }, + } + ctx := authz.WithInstanceID(context.Background(), "instanceID") + arg := &MilestonesReached{ + InstanceID: "instanceID", + InstanceCreated: true, + AuthenticationSucceededOnInstance: true, + ProjectCreated: true, + ApplicationCreated: true, + AuthenticationSucceededOnApplication: true, + InstanceDeleted: false, + } + c.setCachedMilestonesReached(ctx, arg) + got, ok := c.getCachedMilestonesReached(ctx) + assert.True(t, ok) + assert.Equal(t, arg, got) +} + +func TestCommands_MilestonePushed(t *testing.T) { + aggregate := milestone.NewInstanceAggregate("instanceID") + type fields struct { + eventstore func(*testing.T) *eventstore.Eventstore + } + type args struct { + ctx context.Context + instanceID string + msType milestone.Type + endpoints []string + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "milestone pushed", + fields: fields{ + eventstore: expectEventstore( + expectPush( + milestone.NewPushedEvent( + context.Background(), + aggregate, + milestone.ApplicationCreated, + []string{"foo.com", "bar.com"}, + "example.com", + ), + ), + ), + }, + args: args{ + ctx: context.Background(), + instanceID: "instanceID", + msType: milestone.ApplicationCreated, + endpoints: []string{"foo.com", "bar.com"}, + }, + wantErr: nil, + }, + { + name: "pusher error", + fields: fields{ + eventstore: expectEventstore( + expectPushFailed( + io.ErrClosedPipe, + milestone.NewPushedEvent( + context.Background(), + aggregate, + milestone.ApplicationCreated, + []string{"foo.com", "bar.com"}, + "example.com", + ), + ), + ), + }, + args: args{ + ctx: context.Background(), + instanceID: "instanceID", + msType: milestone.ApplicationCreated, + endpoints: []string{"foo.com", "bar.com"}, + }, + wantErr: io.ErrClosedPipe, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Commands{ + eventstore: tt.fields.eventstore(t), + externalDomain: "example.com", + } + err := c.MilestonePushed(tt.args.ctx, tt.args.instanceID, tt.args.msType, tt.args.endpoints) + assert.ErrorIs(t, err, tt.wantErr) + }) + } +} + +func TestOIDCSessionEvents_SetMilestones(t *testing.T) { + ctx := authz.WithInstanceID(context.Background(), "instanceID") + ctx = authz.WithConsoleClientID(ctx, "console") + aggregate := milestone.NewAggregate(ctx) + + type fields struct { + eventstore func(*testing.T) *eventstore.Eventstore + } + type args struct { + ctx context.Context + clientID string + isHuman bool + } + tests := []struct { + name string + fields fields + args args + wantEvents []eventstore.Command + wantErr error + }{ + { + name: "get error", + fields: fields{ + eventstore: expectEventstore( + expectFilterError(io.ErrClosedPipe), + ), + }, + args: args{ + ctx: ctx, + clientID: "client", + isHuman: true, + }, + wantErr: io.ErrClosedPipe, + }, + { + name: "milestones already reached", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)), + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnApplication)), + ), + ), + }, + args: args{ + ctx: ctx, + clientID: "client", + isHuman: true, + }, + wantErr: nil, + }, + { + name: "auth on instance", + fields: fields{ + eventstore: expectEventstore( + expectFilter(), + ), + }, + args: args{ + ctx: ctx, + clientID: "console", + isHuman: true, + }, + wantEvents: []eventstore.Command{ + milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance), + }, + wantErr: nil, + }, + { + name: "subsequent console login, no milestone", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)), + ), + ), + }, + args: args{ + ctx: ctx, + clientID: "console", + isHuman: true, + }, + wantErr: nil, + }, + { + name: "subsequent machine login, no milestone", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)), + ), + ), + }, + args: args{ + ctx: ctx, + clientID: "client", + isHuman: false, + }, + wantErr: nil, + }, + { + name: "auth on app", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnInstance)), + ), + ), + }, + args: args{ + ctx: ctx, + clientID: "client", + isHuman: true, + }, + wantEvents: []eventstore.Command{ + milestone.NewReachedEvent(ctx, aggregate, milestone.AuthenticationSucceededOnApplication), + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Commands{ + eventstore: tt.fields.eventstore(t), + caches: &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + }, + } + s := &OIDCSessionEvents{ + commands: c, + } + postCommit, err := s.SetMilestones(tt.args.ctx, tt.args.clientID, tt.args.isHuman) + postCommit(tt.args.ctx) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantEvents, s.events) + }) + } +} + +func TestCommands_projectCreatedMilestone(t *testing.T) { + ctx := authz.WithInstanceID(context.Background(), "instanceID") + systemCtx := authz.SetCtxData(ctx, authz.CtxData{ + SystemMemberships: authz.Memberships{ + &authz.Membership{ + MemberType: authz.MemberTypeSystem, + }, + }, + }) + aggregate := milestone.NewAggregate(ctx) + + type fields struct { + eventstore func(*testing.T) *eventstore.Eventstore + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantEvents []eventstore.Command + wantErr error + }{ + { + name: "system user", + fields: fields{ + eventstore: expectEventstore(), + }, + args: args{ + ctx: systemCtx, + }, + wantErr: nil, + }, + { + name: "get error", + fields: fields{ + eventstore: expectEventstore( + expectFilterError(io.ErrClosedPipe), + ), + }, + args: args{ + ctx: ctx, + }, + wantErr: io.ErrClosedPipe, + }, + { + name: "milestone already reached", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.ProjectCreated)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + wantErr: nil, + }, + { + name: "milestone reached event", + fields: fields{ + eventstore: expectEventstore( + expectFilter(), + ), + }, + args: args{ + ctx: ctx, + }, + wantEvents: []eventstore.Command{ + milestone.NewReachedEvent(ctx, aggregate, milestone.ProjectCreated), + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Commands{ + eventstore: tt.fields.eventstore(t), + caches: &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + }, + } + var cmds []eventstore.Command + postCommit, err := c.projectCreatedMilestone(tt.args.ctx, &cmds) + postCommit(tt.args.ctx) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantEvents, cmds) + }) + } +} + +func TestCommands_applicationCreatedMilestone(t *testing.T) { + ctx := authz.WithInstanceID(context.Background(), "instanceID") + systemCtx := authz.SetCtxData(ctx, authz.CtxData{ + SystemMemberships: authz.Memberships{ + &authz.Membership{ + MemberType: authz.MemberTypeSystem, + }, + }, + }) + aggregate := milestone.NewAggregate(ctx) + + type fields struct { + eventstore func(*testing.T) *eventstore.Eventstore + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantEvents []eventstore.Command + wantErr error + }{ + { + name: "system user", + fields: fields{ + eventstore: expectEventstore(), + }, + args: args{ + ctx: systemCtx, + }, + wantErr: nil, + }, + { + name: "get error", + fields: fields{ + eventstore: expectEventstore( + expectFilterError(io.ErrClosedPipe), + ), + }, + args: args{ + ctx: ctx, + }, + wantErr: io.ErrClosedPipe, + }, + { + name: "milestone already reached", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + eventFromEventPusher(milestone.NewReachedEvent(ctx, aggregate, milestone.ApplicationCreated)), + ), + ), + }, + args: args{ + ctx: ctx, + }, + wantErr: nil, + }, + { + name: "milestone reached event", + fields: fields{ + eventstore: expectEventstore( + expectFilter(), + ), + }, + args: args{ + ctx: ctx, + }, + wantEvents: []eventstore.Command{ + milestone.NewReachedEvent(ctx, aggregate, milestone.ApplicationCreated), + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Commands{ + eventstore: tt.fields.eventstore(t), + caches: &Caches{ + milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), + }, + } + var cmds []eventstore.Command + postCommit, err := c.applicationCreatedMilestone(tt.args.ctx, &cmds) + postCommit(tt.args.ctx) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantEvents, cmds) + }) + } +} + +func (c *Commands) setMilestonesCompletedForTest(instanceID string) { + c.milestonesCompleted.Store(instanceID, struct{}{}) +} diff --git a/internal/command/oidc_session.go b/internal/command/oidc_session.go index 95a5934b91..f7bb9b4cb6 100644 --- a/internal/command/oidc_session.go +++ b/internal/command/oidc_session.go @@ -71,7 +71,8 @@ func (c *Commands) CreateOIDCSessionFromAuthRequest(ctx context.Context, authReq return nil, "", zerrors.ThrowPreconditionFailed(nil, "COMMAND-Iung5", "Errors.AuthRequest.NoCode") } - sessionModel := NewSessionWriteModel(authReqModel.SessionID, authz.GetInstance(ctx).InstanceID()) + instanceID := authz.GetInstance(ctx).InstanceID() + sessionModel := NewSessionWriteModel(authReqModel.SessionID, instanceID) err = c.eventstore.FilterToQueryReducer(ctx, sessionModel) if err != nil { return nil, "", err @@ -118,8 +119,15 @@ func (c *Commands) CreateOIDCSessionFromAuthRequest(ctx context.Context, authReq } } cmd.SetAuthRequestSuccessful(ctx, authReqModel.aggregate) - session, err = cmd.PushEvents(ctx) - return session, authReqModel.State, err + postCommit, err := cmd.SetMilestones(ctx, authReqModel.ClientID, true) + if err != nil { + return nil, "", err + } + if session, err = cmd.PushEvents(ctx); err != nil { + return nil, "", err + } + postCommit(ctx) + return session, authReqModel.State, nil } func (c *Commands) CreateOIDCSession(ctx context.Context, @@ -161,7 +169,15 @@ func (c *Commands) CreateOIDCSession(ctx context.Context, return nil, err } } - return cmd.PushEvents(ctx) + postCommit, err := cmd.SetMilestones(ctx, clientID, sessionID != "") + if err != nil { + return nil, err + } + if session, err = cmd.PushEvents(ctx); err != nil { + return nil, err + } + postCommit(ctx) + return session, nil } type RefreshTokenComplianceChecker func(ctx context.Context, wm *OIDCSessionWriteModel, requestedScope []string) (scope []string, err error) @@ -283,7 +299,7 @@ func (c *Commands) newOIDCSessionAddEvents(ctx context.Context, userID, resource } sessionID = IDPrefixV2 + sessionID return &OIDCSessionEvents{ - eventstore: c.eventstore, + commands: c, idGenerator: c.idGenerator, encryptionAlg: c.keyAlgorithm, events: pending, @@ -341,7 +357,7 @@ func (c *Commands) newOIDCSessionUpdateEvents(ctx context.Context, refreshToken return nil, err } return &OIDCSessionEvents{ - eventstore: c.eventstore, + commands: c, idGenerator: c.idGenerator, encryptionAlg: c.keyAlgorithm, oidcSessionWriteModel: sessionWriteModel, @@ -352,7 +368,7 @@ func (c *Commands) newOIDCSessionUpdateEvents(ctx context.Context, refreshToken } type OIDCSessionEvents struct { - eventstore *eventstore.Eventstore + commands *Commands idGenerator id.Generator encryptionAlg crypto.EncryptionAlgorithm events []eventstore.Command @@ -467,7 +483,7 @@ func (c *OIDCSessionEvents) generateRefreshToken(userID string) (refreshTokenID, } func (c *OIDCSessionEvents) PushEvents(ctx context.Context) (*OIDCSession, error) { - pushedEvents, err := c.eventstore.Push(ctx, c.events...) + pushedEvents, err := c.commands.eventstore.Push(ctx, c.events...) if err != nil { return nil, err } @@ -496,7 +512,7 @@ func (c *OIDCSessionEvents) PushEvents(ctx context.Context) (*OIDCSession, error // we need to use `-` as a delimiter because the OIDC library uses `:` and will check for a length of 2 parts session.TokenID = c.oidcSessionWriteModel.AggregateID + TokenDelimiter + c.accessTokenID } - activity.Trigger(ctx, c.oidcSessionWriteModel.UserResourceOwner, c.oidcSessionWriteModel.UserID, tokenReasonToActivityMethodType(c.oidcSessionWriteModel.AccessTokenReason), c.eventstore.FilterToQueryReducer) + activity.Trigger(ctx, c.oidcSessionWriteModel.UserResourceOwner, c.oidcSessionWriteModel.UserID, tokenReasonToActivityMethodType(c.oidcSessionWriteModel.AccessTokenReason), c.commands.eventstore.FilterToQueryReducer) return session, nil } diff --git a/internal/command/oidc_session_test.go b/internal/command/oidc_session_test.go index 4df173c7d5..86d6bd9033 100644 --- a/internal/command/oidc_session_test.go +++ b/internal/command/oidc_session_test.go @@ -70,7 +70,7 @@ func TestCommands_CreateOIDCSessionFromAuthRequest(t *testing.T) { eventstore: expectEventstore(), }, args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), authRequestID: "", complianceCheck: mockAuthRequestComplianceChecker(nil), }, @@ -86,7 +86,7 @@ func TestCommands_CreateOIDCSessionFromAuthRequest(t *testing.T) { ), }, args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), authRequestID: "V2_authRequestID", complianceCheck: mockAuthRequestComplianceChecker(nil), }, @@ -102,7 +102,7 @@ func TestCommands_CreateOIDCSessionFromAuthRequest(t *testing.T) { ), }, args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), authRequestID: "V2_authRequestID", complianceCheck: mockAuthRequestComplianceChecker(nil), }, @@ -706,6 +706,7 @@ func TestCommands_CreateOIDCSessionFromAuthRequest(t *testing.T) { defaultRefreshTokenIdleLifetime: tt.fields.defaultRefreshTokenIdleLifetime, keyAlgorithm: tt.fields.keyAlgorithm, } + c.setMilestonesCompletedForTest("instanceID") gotSession, gotState, err := c.CreateOIDCSessionFromAuthRequest(tt.args.ctx, tt.args.authRequestID, tt.args.complianceCheck, tt.args.needRefreshToken) require.ErrorIs(t, err, tt.res.err) @@ -762,7 +763,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "orgID", clientID: "clientID", @@ -818,7 +819,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -892,7 +893,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -1089,7 +1090,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -1186,7 +1187,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -1266,7 +1267,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { }), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -1347,7 +1348,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { }), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), userID: "userID", resourceOwner: "org1", clientID: "clientID", @@ -1406,6 +1407,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { keyAlgorithm: tt.fields.keyAlgorithm, checkPermission: tt.fields.checkPermission, } + c.setMilestonesCompletedForTest("instanceID") got, err := c.CreateOIDCSession(tt.args.ctx, tt.args.userID, tt.args.resourceOwner, diff --git a/internal/command/project.go b/internal/command/project.go index 51fec42d03..6923f1169e 100644 --- a/internal/command/project.go +++ b/internal/command/project.go @@ -34,7 +34,11 @@ func (c *Commands) AddProjectWithID(ctx context.Context, project *domain.Project if existingProject.State != domain.ProjectStateUnspecified { return nil, zerrors.ThrowInvalidArgument(nil, "COMMAND-opamwu", "Errors.Project.AlreadyExisting") } - return c.addProjectWithID(ctx, project, resourceOwner, projectID) + project, err = c.addProjectWithID(ctx, project, resourceOwner, projectID) + if err != nil { + return nil, err + } + return project, nil } func (c *Commands) AddProject(ctx context.Context, project *domain.Project, resourceOwner, ownerUserID string) (_ *domain.Project, err error) { @@ -53,7 +57,11 @@ func (c *Commands) AddProject(ctx context.Context, project *domain.Project, reso return nil, err } - return c.addProjectWithIDWithOwner(ctx, project, resourceOwner, ownerUserID, projectID) + project, err = c.addProjectWithIDWithOwner(ctx, project, resourceOwner, ownerUserID, projectID) + if err != nil { + return nil, err + } + return project, nil } func (c *Commands) addProjectWithID(ctx context.Context, projectAdd *domain.Project, resourceOwner, projectID string) (_ *domain.Project, err error) { @@ -71,11 +79,15 @@ func (c *Commands) addProjectWithID(ctx context.Context, projectAdd *domain.Proj projectAdd.HasProjectCheck, projectAdd.PrivateLabelingSetting), } - + postCommit, err := c.projectCreatedMilestone(ctx, &events) + if err != nil { + return nil, err + } pushedEvents, err := c.eventstore.Push(ctx, events...) if err != nil { return nil, err } + postCommit(ctx) err = AppendAndReduce(addedProject, pushedEvents...) if err != nil { return nil, err @@ -103,11 +115,15 @@ func (c *Commands) addProjectWithIDWithOwner(ctx context.Context, projectAdd *do projectAdd.PrivateLabelingSetting), project.NewProjectMemberAddedEvent(ctx, projectAgg, ownerUserID, projectRole), } - + postCommit, err := c.projectCreatedMilestone(ctx, &events) + if err != nil { + return nil, err + } pushedEvents, err := c.eventstore.Push(ctx, events...) if err != nil { return nil, err } + postCommit(ctx) err = AppendAndReduce(addedProject, pushedEvents...) if err != nil { return nil, err diff --git a/internal/command/project_application_oidc.go b/internal/command/project_application_oidc.go index 1f1ec184f3..9852bea23b 100644 --- a/internal/command/project_application_oidc.go +++ b/internal/command/project_application_oidc.go @@ -202,10 +202,15 @@ func (c *Commands) addOIDCApplicationWithID(ctx context.Context, oidcApp *domain )) addedApplication.AppID = oidcApp.AppID + postCommit, err := c.applicationCreatedMilestone(ctx, &events) + if err != nil { + return nil, err + } pushedEvents, err := c.eventstore.Push(ctx, events...) if err != nil { return nil, err } + postCommit(ctx) err = AppendAndReduce(addedApplication, pushedEvents...) if err != nil { return nil, err diff --git a/internal/command/project_application_oidc_test.go b/internal/command/project_application_oidc_test.go index 13bf359597..01c848cd2e 100644 --- a/internal/command/project_application_oidc_test.go +++ b/internal/command/project_application_oidc_test.go @@ -11,6 +11,7 @@ import ( "github.com/zitadel/passwap" "github.com/zitadel/passwap/bcrypt" + "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/command/preparation" "github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/domain" @@ -418,7 +419,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { eventstore: expectEventstore(), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), oidcApp: &domain.OIDCApp{}, resourceOwner: "org1", }, @@ -434,7 +435,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), oidcApp: &domain.OIDCApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -463,7 +464,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), oidcApp: &domain.OIDCApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -521,7 +522,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t, "app1", "client1"), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), oidcApp: &domain.OIDCApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -619,7 +620,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t, "app1", "client1"), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), oidcApp: &domain.OIDCApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -676,7 +677,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &Commands{ + c := &Commands{ eventstore: tt.fields.eventstore(t), idGenerator: tt.fields.idGenerator, newHashedSecret: mockHashedSecret("secret"), @@ -684,7 +685,8 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ClientSecret: emptyConfig, }, } - got, err := r.AddOIDCApplication(tt.args.ctx, tt.args.oidcApp, tt.args.resourceOwner) + c.setMilestonesCompletedForTest("instanceID") + got, err := c.AddOIDCApplication(tt.args.ctx, tt.args.oidcApp, tt.args.resourceOwner) if tt.res.err == nil { assert.NoError(t, err) } diff --git a/internal/command/project_application_saml.go b/internal/command/project_application_saml.go index d6b3679a32..612c2dbd5c 100644 --- a/internal/command/project_application_saml.go +++ b/internal/command/project_application_saml.go @@ -28,10 +28,15 @@ func (c *Commands) AddSAMLApplication(ctx context.Context, application *domain.S return nil, err } addedApplication.AppID = application.AppID + postCommit, err := c.applicationCreatedMilestone(ctx, &events) + if err != nil { + return nil, err + } pushedEvents, err := c.eventstore.Push(ctx, events...) if err != nil { return nil, err } + postCommit(ctx) err = AppendAndReduce(addedApplication, pushedEvents...) if err != nil { return nil, err diff --git a/internal/command/project_application_saml_test.go b/internal/command/project_application_saml_test.go index 978e1ed013..ff774e9f49 100644 --- a/internal/command/project_application_saml_test.go +++ b/internal/command/project_application_saml_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore/v1/models" @@ -76,7 +77,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{}, resourceOwner: "org1", }, @@ -93,7 +94,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -123,7 +124,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -154,7 +155,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -201,7 +202,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t, "app1"), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -260,7 +261,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { httpClient: newTestClient(200, testMetadata), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -305,7 +306,7 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { httpClient: newTestClient(http.StatusNotFound, nil), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), samlApp: &domain.SAMLApp{ ObjectRoot: models.ObjectRoot{ AggregateID: "project1", @@ -325,13 +326,13 @@ func TestCommandSide_AddSAMLApplication(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &Commands{ + c := &Commands{ eventstore: tt.fields.eventstore, idGenerator: tt.fields.idGenerator, httpClient: tt.fields.httpClient, } - - got, err := r.AddSAMLApplication(tt.args.ctx, tt.args.samlApp, tt.args.resourceOwner) + c.setMilestonesCompletedForTest("instanceID") + got, err := c.AddSAMLApplication(tt.args.ctx, tt.args.samlApp, tt.args.resourceOwner) if tt.res.err == nil { assert.NoError(t, err) } diff --git a/internal/command/project_test.go b/internal/command/project_test.go index 1c18a721fe..4f8ad149e3 100644 --- a/internal/command/project_test.go +++ b/internal/command/project_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore/v1/models" @@ -44,7 +45,7 @@ func TestCommandSide_AddProject(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), project: &domain.Project{}, resourceOwner: "org1", }, @@ -60,7 +61,7 @@ func TestCommandSide_AddProject(t *testing.T) { ), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), project: &domain.Project{ Name: "project", ProjectRoleAssertion: true, @@ -121,7 +122,7 @@ func TestCommandSide_AddProject(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t, "project1"), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), project: &domain.Project{ Name: "project", ProjectRoleAssertion: true, @@ -159,7 +160,7 @@ func TestCommandSide_AddProject(t *testing.T) { idGenerator: id_mock.NewIDGeneratorExpectIDs(t, "project1"), }, args: args{ - ctx: context.Background(), + ctx: authz.WithInstanceID(context.Background(), "instanceID"), project: &domain.Project{ Name: "project", ProjectRoleAssertion: true, @@ -187,11 +188,12 @@ func TestCommandSide_AddProject(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &Commands{ + c := &Commands{ eventstore: tt.fields.eventstore, idGenerator: tt.fields.idGenerator, } - got, err := r.AddProject(tt.args.ctx, tt.args.project, tt.args.resourceOwner, tt.args.ownerID) + c.setMilestonesCompletedForTest("instanceID") + got, err := c.AddProject(tt.args.ctx, tt.args.project, tt.args.resourceOwner, tt.args.ownerID) if tt.res.err == nil { assert.NoError(t, err) } diff --git a/internal/eventstore/eventstore.go b/internal/eventstore/eventstore.go index e456135828..a8c8e923b5 100644 --- a/internal/eventstore/eventstore.go +++ b/internal/eventstore/eventstore.go @@ -11,6 +11,7 @@ import ( "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/zerrors" ) @@ -247,6 +248,10 @@ func (es *Eventstore) InstanceIDs(ctx context.Context, maxAge time.Duration, for return instances, nil } +func (es *Eventstore) Client() *database.DB { + return es.querier.Client() +} + type QueryReducer interface { reducer //Query returns the SearchQueryFactory for the events needed in reducer @@ -270,6 +275,8 @@ type Querier interface { LatestSequence(ctx context.Context, queryFactory *SearchQueryBuilder) (float64, error) // InstanceIDs returns the instance ids found by the search query InstanceIDs(ctx context.Context, queryFactory *SearchQueryBuilder) ([]string, error) + // Client returns the underlying database connection + Client() *database.DB } type Pusher interface { diff --git a/internal/eventstore/eventstore_test.go b/internal/eventstore/eventstore_test.go index 53ef4e54cf..33e80892c5 100644 --- a/internal/eventstore/eventstore_test.go +++ b/internal/eventstore/eventstore_test.go @@ -12,6 +12,7 @@ import ( "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/service" + "github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/zerrors" ) @@ -437,6 +438,10 @@ func (repo *testQuerier) InstanceIDs(ctx context.Context, queryFactory *SearchQu return repo.instances, nil } +func (*testQuerier) Client() *database.DB { + return nil +} + func TestEventstore_Push(t *testing.T) { type args struct { events []Command diff --git a/internal/eventstore/repository/mock/repository.mock.go b/internal/eventstore/repository/mock/repository.mock.go index a854de2995..58a6c8f86f 100644 --- a/internal/eventstore/repository/mock/repository.mock.go +++ b/internal/eventstore/repository/mock/repository.mock.go @@ -13,6 +13,7 @@ import ( context "context" reflect "reflect" + database "github.com/zitadel/zitadel/internal/database" eventstore "github.com/zitadel/zitadel/internal/eventstore" gomock "go.uber.org/mock/gomock" ) @@ -40,6 +41,20 @@ func (m *MockQuerier) EXPECT() *MockQuerierMockRecorder { return m.recorder } +// Client mocks base method. +func (m *MockQuerier) Client() *database.DB { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Client") + ret0, _ := ret[0].(*database.DB) + return ret0 +} + +// Client indicates an expected call of Client. +func (mr *MockQuerierMockRecorder) Client() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Client", reflect.TypeOf((*MockQuerier)(nil).Client)) +} + // FilterToReducer mocks base method. func (m *MockQuerier) FilterToReducer(arg0 context.Context, arg1 *eventstore.SearchQueryBuilder, arg2 eventstore.Reducer) error { m.ctrl.T.Helper() diff --git a/internal/eventstore/repository/sql/crdb.go b/internal/eventstore/repository/sql/crdb.go index a60a2ef7b8..1b3e17377c 100644 --- a/internal/eventstore/repository/sql/crdb.go +++ b/internal/eventstore/repository/sql/crdb.go @@ -282,7 +282,7 @@ func (db *CRDB) InstanceIDs(ctx context.Context, searchQuery *eventstore.SearchQ return ids, nil } -func (db *CRDB) db() *database.DB { +func (db *CRDB) Client() *database.DB { return db.DB } diff --git a/internal/eventstore/repository/sql/query.go b/internal/eventstore/repository/sql/query.go index 3cddcb7924..bbc9513864 100644 --- a/internal/eventstore/repository/sql/query.go +++ b/internal/eventstore/repository/sql/query.go @@ -27,7 +27,7 @@ type querier interface { eventQuery(useV1 bool) string maxSequenceQuery(useV1 bool) string instanceIDsQuery(useV1 bool) string - db() *database.DB + Client() *database.DB orderByEventSequence(desc, shouldOrderBySequence, useV1 bool) string dialect.Database } @@ -110,7 +110,7 @@ func query(ctx context.Context, criteria querier, searchQuery *eventstore.Search var contextQuerier interface { QueryContext(context.Context, func(rows *sql.Rows) error, string, ...interface{}) error } - contextQuerier = criteria.db() + contextQuerier = criteria.Client() if q.Tx != nil { contextQuerier = &tx{Tx: q.Tx} } diff --git a/internal/notification/handlers/commands.go b/internal/notification/handlers/commands.go index a50abe18a7..07969a6bba 100644 --- a/internal/notification/handlers/commands.go +++ b/internal/notification/handlers/commands.go @@ -22,5 +22,5 @@ type Commands interface { HumanPhoneVerificationCodeSent(ctx context.Context, orgID, userID string, generatorInfo *senders.CodeGeneratorInfo) error InviteCodeSent(ctx context.Context, orgID, userID string) error UsageNotificationSent(ctx context.Context, dueEvent *quota.NotificationDueEvent) error - MilestonePushed(ctx context.Context, msType milestone.Type, endpoints []string, primaryDomain string) error + MilestonePushed(ctx context.Context, instanceID string, msType milestone.Type, endpoints []string) error } diff --git a/internal/notification/handlers/integration_test/telemetry_pusher_test.go b/internal/notification/handlers/integration_test/telemetry_pusher_test.go index fdff1180a8..6b4ac10258 100644 --- a/internal/notification/handlers/integration_test/telemetry_pusher_test.go +++ b/internal/notification/handlers/integration_test/telemetry_pusher_test.go @@ -16,6 +16,7 @@ import ( "github.com/zitadel/zitadel/internal/integration" "github.com/zitadel/zitadel/internal/integration/sink" + "github.com/zitadel/zitadel/internal/repository/milestone" "github.com/zitadel/zitadel/pkg/grpc/app" "github.com/zitadel/zitadel/pkg/grpc/management" "github.com/zitadel/zitadel/pkg/grpc/object" @@ -32,12 +33,12 @@ func TestServer_TelemetryPushMilestones(t *testing.T) { instance := integration.NewInstance(CTX) iamOwnerCtx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) - t.Log("testing against instance with primary domain", instance.Domain) - awaitMilestone(t, sub, instance.Domain, "InstanceCreated") + t.Log("testing against instance", instance.ID()) + awaitMilestone(t, sub, instance.ID(), milestone.InstanceCreated) projectAdded, err := instance.Client.Mgmt.AddProject(iamOwnerCtx, &management.AddProjectRequest{Name: "integration"}) require.NoError(t, err) - awaitMilestone(t, sub, instance.Domain, "ProjectCreated") + awaitMilestone(t, sub, instance.ID(), milestone.ProjectCreated) redirectURI := "http://localhost:8888" application, err := instance.Client.Mgmt.AddOIDCApp(iamOwnerCtx, &management.AddOIDCAppRequest{ @@ -52,14 +53,14 @@ func TestServer_TelemetryPushMilestones(t *testing.T) { AccessTokenType: app.OIDCTokenType_OIDC_TOKEN_TYPE_JWT, }) require.NoError(t, err) - awaitMilestone(t, sub, instance.Domain, "ApplicationCreated") + awaitMilestone(t, sub, instance.ID(), milestone.ApplicationCreated) // create the session to be used for the authN of the clients sessionID, sessionToken, _, _ := instance.CreatePasswordSession(t, iamOwnerCtx, instance.AdminUserID, "Password1!") console := consoleOIDCConfig(t, instance) loginToClient(t, instance, console.GetClientId(), console.GetRedirectUris()[0], sessionID, sessionToken) - awaitMilestone(t, sub, instance.Domain, "AuthenticationSucceededOnInstance") + awaitMilestone(t, sub, instance.ID(), milestone.AuthenticationSucceededOnInstance) // make sure the client has been projected require.EventuallyWithT(t, func(collectT *assert.CollectT) { @@ -70,11 +71,11 @@ func TestServer_TelemetryPushMilestones(t *testing.T) { assert.NoError(collectT, err) }, time.Minute, time.Second, "app not found") loginToClient(t, instance, application.GetClientId(), redirectURI, sessionID, sessionToken) - awaitMilestone(t, sub, instance.Domain, "AuthenticationSucceededOnApplication") + awaitMilestone(t, sub, instance.ID(), milestone.AuthenticationSucceededOnApplication) _, err = integration.SystemClient().RemoveInstance(CTX, &system.RemoveInstanceRequest{InstanceId: instance.ID()}) require.NoError(t, err) - awaitMilestone(t, sub, instance.Domain, "InstanceDeleted") + awaitMilestone(t, sub, instance.ID(), milestone.InstanceDeleted) } func loginToClient(t *testing.T, instance *integration.Instance, clientID, redirectURI, sessionID, sessionToken string) { @@ -134,7 +135,7 @@ func consoleOIDCConfig(t *testing.T, instance *integration.Instance) *app.OIDCCo return apps.GetResult()[0].GetOidcConfig() } -func awaitMilestone(t *testing.T, sub *sink.Subscription, primaryDomain, expectMilestoneType string) { +func awaitMilestone(t *testing.T, sub *sink.Subscription, instanceID string, expectMilestoneType milestone.Type) { for { select { case req := <-sub.Recv(): @@ -144,17 +145,17 @@ func awaitMilestone(t *testing.T, sub *sink.Subscription, primaryDomain, expectM } t.Log("received milestone", plain.String()) milestone := struct { - Type string `json:"type"` - PrimaryDomain string `json:"primaryDomain"` + InstanceID string `json:"instanceId"` + Type milestone.Type `json:"type"` }{} if err := json.Unmarshal(req.Body, &milestone); err != nil { t.Error(err) } - if milestone.Type == expectMilestoneType && milestone.PrimaryDomain == primaryDomain { + if milestone.Type == expectMilestoneType && milestone.InstanceID == instanceID { return } - case <-time.After(2 * time.Minute): // why does it take so long to get a milestone !? - t.Fatalf("timed out waiting for milestone %s in domain %s", expectMilestoneType, primaryDomain) + case <-time.After(20 * time.Second): + t.Fatalf("timed out waiting for milestone %s for instance %s", expectMilestoneType, instanceID) } } } diff --git a/internal/notification/handlers/mock/commands.mock.go b/internal/notification/handlers/mock/commands.mock.go index 51942be42a..7d41c30f30 100644 --- a/internal/notification/handlers/mock/commands.mock.go +++ b/internal/notification/handlers/mock/commands.mock.go @@ -141,7 +141,7 @@ func (mr *MockCommandsMockRecorder) InviteCodeSent(arg0, arg1, arg2 any) *gomock } // MilestonePushed mocks base method. -func (m *MockCommands) MilestonePushed(arg0 context.Context, arg1 milestone.Type, arg2 []string, arg3 string) error { +func (m *MockCommands) MilestonePushed(arg0 context.Context, arg1 string, arg2 milestone.Type, arg3 []string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MilestonePushed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) diff --git a/internal/notification/handlers/telemetry_pusher.go b/internal/notification/handlers/telemetry_pusher.go index 39a1f59454..be41074bc6 100644 --- a/internal/notification/handlers/telemetry_pusher.go +++ b/internal/notification/handlers/telemetry_pusher.go @@ -2,13 +2,9 @@ package handlers import ( "context" - "fmt" "net/http" "time" - "github.com/zitadel/logging" - - "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/call" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/eventstore" @@ -16,9 +12,7 @@ import ( "github.com/zitadel/zitadel/internal/notification/channels/webhook" _ "github.com/zitadel/zitadel/internal/notification/statik" "github.com/zitadel/zitadel/internal/notification/types" - "github.com/zitadel/zitadel/internal/query" "github.com/zitadel/zitadel/internal/repository/milestone" - "github.com/zitadel/zitadel/internal/repository/pseudo" "github.com/zitadel/zitadel/internal/zerrors" ) @@ -30,7 +24,6 @@ type TelemetryPusherConfig struct { Enabled bool Endpoints []string Headers http.Header - Limit uint64 } type telemetryPusher struct { @@ -54,7 +47,6 @@ func NewTelemetryPusher( queries: queries, channels: channels, } - handlerCfg.TriggerWithoutEvents = pusher.pushMilestones return handler.NewHandler( ctx, &handlerCfg, @@ -68,9 +60,9 @@ func (u *telemetryPusher) Name() string { func (t *telemetryPusher) Reducers() []handler.AggregateReducer { return []handler.AggregateReducer{{ - Aggregate: pseudo.AggregateType, + Aggregate: milestone.AggregateType, EventReducers: []handler.EventReducer{{ - Event: pseudo.ScheduledEventType, + Event: milestone.ReachedEventType, Reduce: t.pushMilestones, }}, }} @@ -78,51 +70,20 @@ func (t *telemetryPusher) Reducers() []handler.AggregateReducer { func (t *telemetryPusher) pushMilestones(event eventstore.Event) (*handler.Statement, error) { ctx := call.WithTimestamp(context.Background()) - scheduledEvent, ok := event.(*pseudo.ScheduledEvent) + e, ok := event.(*milestone.ReachedEvent) if !ok { return nil, zerrors.ThrowInvalidArgumentf(nil, "HANDL-lDTs5", "reduce.wrong.event.type %s", event.Type()) } - - return handler.NewStatement(event, func(ex handler.Executer, projectionName string) error { - isReached, err := query.NewNotNullQuery(query.MilestoneReachedDateColID) - if err != nil { - return err + return handler.NewStatement(event, func(handler.Executer, string) error { + // Do not push the milestone again if this was a migration event. + if e.ReachedDate != nil { + return nil } - isNotPushed, err := query.NewIsNullQuery(query.MilestonePushedDateColID) - if err != nil { - return err - } - hasPrimaryDomain, err := query.NewNotNullQuery(query.MilestonePrimaryDomainColID) - if err != nil { - return err - } - unpushedMilestones, err := t.queries.Queries.SearchMilestones(ctx, scheduledEvent.InstanceIDs, &query.MilestonesSearchQueries{ - SearchRequest: query.SearchRequest{ - Limit: t.cfg.Limit, - SortingColumn: query.MilestoneReachedDateColID, - Asc: true, - }, - Queries: []query.SearchQuery{isReached, isNotPushed, hasPrimaryDomain}, - }) - if err != nil { - return err - } - var errs int - for _, ms := range unpushedMilestones.Milestones { - if err = t.pushMilestone(ctx, scheduledEvent, ms); err != nil { - errs++ - logging.Warnf("pushing milestone %+v failed: %s", *ms, err.Error()) - } - } - if errs > 0 { - return fmt.Errorf("pushing %d of %d milestones failed", errs, unpushedMilestones.Count) - } - return nil + return t.pushMilestone(ctx, e) }), nil } -func (t *telemetryPusher) pushMilestone(ctx context.Context, event *pseudo.ScheduledEvent, ms *query.Milestone) error { - ctx = authz.WithInstanceID(ctx, ms.InstanceID) +func (t *telemetryPusher) pushMilestone(ctx context.Context, e *milestone.ReachedEvent) error { for _, endpoint := range t.cfg.Endpoints { if err := types.SendJSON( ctx, @@ -135,20 +96,18 @@ func (t *telemetryPusher) pushMilestone(ctx context.Context, event *pseudo.Sched &struct { InstanceID string `json:"instanceId"` ExternalDomain string `json:"externalDomain"` - PrimaryDomain string `json:"primaryDomain"` Type milestone.Type `json:"type"` ReachedDate time.Time `json:"reached"` }{ - InstanceID: ms.InstanceID, + InstanceID: e.Agg.InstanceID, ExternalDomain: t.queries.externalDomain, - PrimaryDomain: ms.PrimaryDomain, - Type: ms.Type, - ReachedDate: ms.ReachedDate, + Type: e.MilestoneType, + ReachedDate: e.GetReachedDate(), }, - event, + e, ).WithoutTemplate(); err != nil { return err } } - return t.commands.MilestonePushed(ctx, ms.Type, t.cfg.Endpoints, ms.PrimaryDomain) + return t.commands.MilestonePushed(ctx, e.Agg.InstanceID, e.MilestoneType, t.cfg.Endpoints) } diff --git a/internal/query/milestone.go b/internal/query/milestone.go index 0b9c34fc76..4277b8e68a 100644 --- a/internal/query/milestone.go +++ b/internal/query/milestone.go @@ -54,10 +54,6 @@ var ( name: projection.MilestoneColumnType, table: milestonesTable, } - MilestonePrimaryDomainColID = Column{ - name: projection.MilestoneColumnPrimaryDomain, - table: milestonesTable, - } MilestoneReachedDateColID = Column{ name: projection.MilestoneColumnReachedDate, table: milestonesTable, @@ -76,7 +72,10 @@ func (q *Queries) SearchMilestones(ctx context.Context, instanceIDs []string, qu if len(instanceIDs) == 0 { instanceIDs = []string{authz.GetInstance(ctx).InstanceID()} } - stmt, args, err := queries.toQuery(query).Where(sq.Eq{MilestoneInstanceIDColID.identifier(): instanceIDs}).ToSql() + stmt, args, err := queries.toQuery(query).Where( + sq.Eq{MilestoneInstanceIDColID.identifier(): instanceIDs}, + sq.Eq{InstanceDomainIsPrimaryCol.identifier(): true}, + ).ToSql() if err != nil { return nil, zerrors.ThrowInternal(err, "QUERY-A9i5k", "Errors.Query.SQLStatement") } @@ -96,13 +95,14 @@ func (q *Queries) SearchMilestones(ctx context.Context, instanceIDs []string, qu func prepareMilestonesQuery(ctx context.Context, db prepareDatabase) (sq.SelectBuilder, func(*sql.Rows) (*Milestones, error)) { return sq.Select( MilestoneInstanceIDColID.identifier(), - MilestonePrimaryDomainColID.identifier(), + InstanceDomainDomainCol.identifier(), MilestoneReachedDateColID.identifier(), MilestonePushedDateColID.identifier(), MilestoneTypeColID.identifier(), countColumn.identifier(), ). From(milestonesTable.identifier() + db.Timetravel(call.Took(ctx))). + LeftJoin(join(InstanceDomainInstanceIDCol, MilestoneInstanceIDColID)). PlaceholderFormat(sq.Dollar), func(rows *sql.Rows) (*Milestones, error) { milestones := make([]*Milestone, 0) diff --git a/internal/query/milestone_test.go b/internal/query/milestone_test.go index b0b7ec8b5a..c960724299 100644 --- a/internal/query/milestone_test.go +++ b/internal/query/milestone_test.go @@ -11,13 +11,14 @@ import ( var ( expectedMilestoneQuery = regexp.QuoteMeta(` - SELECT projections.milestones.instance_id, - projections.milestones.primary_domain, - projections.milestones.reached_date, - projections.milestones.last_pushed_date, - projections.milestones.type, + SELECT projections.milestones2.instance_id, + projections.instance_domains.domain, + projections.milestones2.reached_date, + projections.milestones2.last_pushed_date, + projections.milestones2.type, COUNT(*) OVER () - FROM projections.milestones AS OF SYSTEM TIME '-1 ms' + FROM projections.milestones2 AS OF SYSTEM TIME '-1 ms' + LEFT JOIN projections.instance_domains ON projections.milestones2.instance_id = projections.instance_domains.instance_id `) milestoneCols = []string{ diff --git a/internal/query/projection/event_test.go b/internal/query/projection/event_test.go index 50975265be..073f34c688 100644 --- a/internal/query/projection/event_test.go +++ b/internal/query/projection/event_test.go @@ -14,8 +14,9 @@ func testEvent( eventType eventstore.EventType, aggregateType eventstore.AggregateType, data []byte, + opts ...eventOption, ) *repository.Event { - return timedTestEvent(eventType, aggregateType, data, time.Now()) + return timedTestEvent(eventType, aggregateType, data, time.Now(), opts...) } func toSystemEvent(event *repository.Event) *repository.Event { @@ -28,8 +29,9 @@ func timedTestEvent( aggregateType eventstore.AggregateType, data []byte, creationDate time.Time, + opts ...eventOption, ) *repository.Event { - return &repository.Event{ + e := &repository.Event{ Seq: 15, CreationDate: creationDate, Typ: eventType, @@ -42,6 +44,18 @@ func timedTestEvent( ID: "event-id", EditorUser: "editor-user", } + for _, opt := range opts { + opt(e) + } + return e +} + +type eventOption func(e *repository.Event) + +func withVersion(v eventstore.Version) eventOption { + return func(e *repository.Event) { + e.Version = v + } } func baseEvent(*testing.T) eventstore.Event { diff --git a/internal/query/projection/milestones.go b/internal/query/projection/milestones.go index d7ee1997df..c264aa48fe 100644 --- a/internal/query/projection/milestones.go +++ b/internal/query/projection/milestones.go @@ -2,35 +2,26 @@ package projection import ( "context" - "strconv" - internal_authz "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/eventstore" old_handler "github.com/zitadel/zitadel/internal/eventstore/handler" "github.com/zitadel/zitadel/internal/eventstore/handler/v2" - "github.com/zitadel/zitadel/internal/repository/instance" "github.com/zitadel/zitadel/internal/repository/milestone" - "github.com/zitadel/zitadel/internal/repository/oidcsession" - "github.com/zitadel/zitadel/internal/repository/project" ) const ( - MilestonesProjectionTable = "projections.milestones" + MilestonesProjectionTable = "projections.milestones2" - MilestoneColumnInstanceID = "instance_id" - MilestoneColumnType = "type" - MilestoneColumnPrimaryDomain = "primary_domain" - MilestoneColumnReachedDate = "reached_date" - MilestoneColumnPushedDate = "last_pushed_date" - MilestoneColumnIgnoreClientIDs = "ignore_client_ids" + MilestoneColumnInstanceID = "instance_id" + MilestoneColumnType = "type" + MilestoneColumnReachedDate = "reached_date" + MilestoneColumnPushedDate = "last_pushed_date" ) -type milestoneProjection struct { - systemUsers map[string]*internal_authz.SystemAPIUser -} +type milestoneProjection struct{} -func newMilestoneProjection(ctx context.Context, config handler.Config, systemUsers map[string]*internal_authz.SystemAPIUser) *handler.Handler { - return handler.NewHandler(ctx, &config, &milestoneProjection{systemUsers: systemUsers}) +func newMilestoneProjection(ctx context.Context, config handler.Config) *handler.Handler { + return handler.NewHandler(ctx, &config, &milestoneProjection{}) } func (*milestoneProjection) Name() string { @@ -44,8 +35,6 @@ func (*milestoneProjection) Init() *old_handler.Check { handler.NewColumn(MilestoneColumnType, handler.ColumnTypeEnum), handler.NewColumn(MilestoneColumnReachedDate, handler.ColumnTypeTimestamp, handler.Nullable()), handler.NewColumn(MilestoneColumnPushedDate, handler.ColumnTypeTimestamp, handler.Nullable()), - handler.NewColumn(MilestoneColumnPrimaryDomain, handler.ColumnTypeText, handler.Nullable()), - handler.NewColumn(MilestoneColumnIgnoreClientIDs, handler.ColumnTypeTextArray, handler.Nullable()), }, handler.NewPrimaryKey(MilestoneColumnInstanceID, MilestoneColumnType), ), @@ -55,183 +44,47 @@ func (*milestoneProjection) Init() *old_handler.Check { // Reducers implements handler.Projection. func (p *milestoneProjection) Reducers() []handler.AggregateReducer { return []handler.AggregateReducer{ - { - Aggregate: instance.AggregateType, - EventReducers: []handler.EventReducer{ - { - Event: instance.InstanceAddedEventType, - Reduce: p.reduceInstanceAdded, - }, - { - Event: instance.InstanceDomainPrimarySetEventType, - Reduce: p.reduceInstanceDomainPrimarySet, - }, - { - Event: instance.InstanceRemovedEventType, - Reduce: p.reduceInstanceRemoved, - }, - }, - }, - { - Aggregate: project.AggregateType, - EventReducers: []handler.EventReducer{ - { - Event: project.ProjectAddedType, - Reduce: p.reduceProjectAdded, - }, - { - Event: project.ApplicationAddedType, - Reduce: p.reduceApplicationAdded, - }, - { - Event: project.OIDCConfigAddedType, - Reduce: p.reduceOIDCConfigAdded, - }, - { - Event: project.APIConfigAddedType, - Reduce: p.reduceAPIConfigAdded, - }, - }, - }, - { - Aggregate: oidcsession.AggregateType, - EventReducers: []handler.EventReducer{ - { - Event: oidcsession.AddedType, - Reduce: p.reduceOIDCSessionAdded, - }, - }, - }, { Aggregate: milestone.AggregateType, EventReducers: []handler.EventReducer{ + { + Event: milestone.ReachedEventType, + Reduce: p.reduceReached, + }, { Event: milestone.PushedEventType, - Reduce: p.reduceMilestonePushed, + Reduce: p.reducePushed, }, }, }, } } -func (p *milestoneProjection) reduceInstanceAdded(event eventstore.Event) (*handler.Statement, error) { - e, err := assertEvent[*instance.InstanceAddedEvent](event) +func (p *milestoneProjection) reduceReached(event eventstore.Event) (*handler.Statement, error) { + e, err := assertEvent[*milestone.ReachedEvent](event) if err != nil { return nil, err } - allTypes := milestone.AllTypes() - statements := make([]func(eventstore.Event) handler.Exec, 0, len(allTypes)) - for _, msType := range allTypes { - createColumns := []handler.Column{ - handler.NewCol(MilestoneColumnInstanceID, e.Aggregate().InstanceID), - handler.NewCol(MilestoneColumnType, msType), - } - if msType == milestone.InstanceCreated { - createColumns = append(createColumns, handler.NewCol(MilestoneColumnReachedDate, event.CreatedAt())) - } - statements = append(statements, handler.AddCreateStatement(createColumns)) - } - return handler.NewMultiStatement(e, statements...), nil + return handler.NewCreateStatement(event, []handler.Column{ + handler.NewCol(MilestoneColumnInstanceID, e.Agg.InstanceID), + handler.NewCol(MilestoneColumnType, e.MilestoneType), + handler.NewCol(MilestoneColumnReachedDate, e.GetReachedDate()), + }), nil } -func (p *milestoneProjection) reduceInstanceDomainPrimarySet(event eventstore.Event) (*handler.Statement, error) { - e, err := assertEvent[*instance.DomainPrimarySetEvent](event) - if err != nil { - return nil, err - } - return handler.NewUpdateStatement( - e, - []handler.Column{ - handler.NewCol(MilestoneColumnPrimaryDomain, e.Domain), - }, - []handler.Condition{ - handler.NewCond(MilestoneColumnInstanceID, e.Aggregate().InstanceID), - handler.NewIsNullCond(MilestoneColumnPushedDate), - }, - ), nil -} - -func (p *milestoneProjection) reduceProjectAdded(event eventstore.Event) (*handler.Statement, error) { - if _, err := assertEvent[*project.ProjectAddedEvent](event); err != nil { - return nil, err - } - return p.reduceReachedIfUserEventFunc(milestone.ProjectCreated)(event) -} - -func (p *milestoneProjection) reduceApplicationAdded(event eventstore.Event) (*handler.Statement, error) { - if _, err := assertEvent[*project.ApplicationAddedEvent](event); err != nil { - return nil, err - } - return p.reduceReachedIfUserEventFunc(milestone.ApplicationCreated)(event) -} - -func (p *milestoneProjection) reduceOIDCConfigAdded(event eventstore.Event) (*handler.Statement, error) { - e, err := assertEvent[*project.OIDCConfigAddedEvent](event) - if err != nil { - return nil, err - } - return p.reduceAppConfigAdded(e, e.ClientID) -} - -func (p *milestoneProjection) reduceAPIConfigAdded(event eventstore.Event) (*handler.Statement, error) { - e, err := assertEvent[*project.APIConfigAddedEvent](event) - if err != nil { - return nil, err - } - return p.reduceAppConfigAdded(e, e.ClientID) -} - -func (p *milestoneProjection) reduceOIDCSessionAdded(event eventstore.Event) (*handler.Statement, error) { - e, err := assertEvent[*oidcsession.AddedEvent](event) - if err != nil { - return nil, err - } - statements := []func(eventstore.Event) handler.Exec{ - handler.AddUpdateStatement( - []handler.Column{ - handler.NewCol(MilestoneColumnReachedDate, event.CreatedAt()), - }, - []handler.Condition{ - handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID), - handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnInstance), - handler.NewIsNullCond(MilestoneColumnReachedDate), - }, - ), - } - // We ignore authentications without app, for example JWT profile or PAT - if e.ClientID != "" { - statements = append(statements, handler.AddUpdateStatement( - []handler.Column{ - handler.NewCol(MilestoneColumnReachedDate, event.CreatedAt()), - }, - []handler.Condition{ - handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID), - handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnApplication), - handler.Not(handler.NewTextArrayContainsCond(MilestoneColumnIgnoreClientIDs, e.ClientID)), - handler.NewIsNullCond(MilestoneColumnReachedDate), - }, - )) - } - return handler.NewMultiStatement(e, statements...), nil -} - -func (p *milestoneProjection) reduceInstanceRemoved(event eventstore.Event) (*handler.Statement, error) { - if _, err := assertEvent[*instance.InstanceRemovedEvent](event); err != nil { - return nil, err - } - return p.reduceReachedFunc(milestone.InstanceDeleted)(event) -} - -func (p *milestoneProjection) reduceMilestonePushed(event eventstore.Event) (*handler.Statement, error) { +func (p *milestoneProjection) reducePushed(event eventstore.Event) (*handler.Statement, error) { e, err := assertEvent[*milestone.PushedEvent](event) if err != nil { return nil, err } + if e.Agg.Version != milestone.AggregateVersion { + return handler.NewNoOpStatement(event), nil // Skip v1 events. + } if e.MilestoneType != milestone.InstanceDeleted { return handler.NewUpdateStatement( event, []handler.Column{ - handler.NewCol(MilestoneColumnPushedDate, event.CreatedAt()), + handler.NewCol(MilestoneColumnPushedDate, e.GetPushedDate()), }, []handler.Condition{ handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID), @@ -246,58 +99,3 @@ func (p *milestoneProjection) reduceMilestonePushed(event eventstore.Event) (*ha }, ), nil } - -func (p *milestoneProjection) reduceReachedIfUserEventFunc(msType milestone.Type) func(event eventstore.Event) (*handler.Statement, error) { - return func(event eventstore.Event) (*handler.Statement, error) { - if p.isSystemEvent(event) { - return handler.NewNoOpStatement(event), nil - } - return p.reduceReachedFunc(msType)(event) - } -} - -func (p *milestoneProjection) reduceReachedFunc(msType milestone.Type) func(event eventstore.Event) (*handler.Statement, error) { - return func(event eventstore.Event) (*handler.Statement, error) { - return handler.NewUpdateStatement(event, []handler.Column{ - handler.NewCol(MilestoneColumnReachedDate, event.CreatedAt()), - }, - []handler.Condition{ - handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID), - handler.NewCond(MilestoneColumnType, msType), - handler.NewIsNullCond(MilestoneColumnReachedDate), - }), nil - } -} - -func (p *milestoneProjection) reduceAppConfigAdded(event eventstore.Event, clientID string) (*handler.Statement, error) { - if !p.isSystemEvent(event) { - return handler.NewNoOpStatement(event), nil - } - return handler.NewUpdateStatement( - event, - []handler.Column{ - handler.NewArrayAppendCol(MilestoneColumnIgnoreClientIDs, clientID), - }, - []handler.Condition{ - handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID), - handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnApplication), - handler.NewIsNullCond(MilestoneColumnReachedDate), - }, - ), nil -} - -func (p *milestoneProjection) isSystemEvent(event eventstore.Event) bool { - if userId, err := strconv.Atoi(event.Creator()); err == nil && userId > 0 { - return false - } - - // check if it is a hard coded event creator - for _, creator := range []string{"", "system", "OIDC", "LOGIN", "SYSTEM"} { - if creator == event.Creator() { - return true - } - } - - _, ok := p.systemUsers[event.Creator()] - return ok -} diff --git a/internal/query/projection/milestones_test.go b/internal/query/projection/milestones_test.go index 0f443c9f19..4216e01636 100644 --- a/internal/query/projection/milestones_test.go +++ b/internal/query/projection/milestones_test.go @@ -4,13 +4,11 @@ import ( "testing" "time" - "github.com/zitadel/zitadel/internal/database" + "github.com/stretchr/testify/require" + "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore/handler/v2" - "github.com/zitadel/zitadel/internal/repository/instance" "github.com/zitadel/zitadel/internal/repository/milestone" - "github.com/zitadel/zitadel/internal/repository/oidcsession" - "github.com/zitadel/zitadel/internal/repository/project" "github.com/zitadel/zitadel/internal/zerrors" ) @@ -19,6 +17,8 @@ func TestMilestonesProjection_reduces(t *testing.T) { event func(t *testing.T) eventstore.Event } now := time.Now() + date, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(t, err) tests := []struct { name string args args @@ -29,292 +29,54 @@ func TestMilestonesProjection_reduces(t *testing.T) { name: "reduceInstanceAdded", args: args{ event: getEvent(timedTestEvent( - instance.InstanceAddedEventType, - instance.AggregateType, - []byte(`{}`), + milestone.ReachedEventType, + milestone.AggregateType, + []byte(`{"type": "instance_created"}`), now, - ), instance.InstanceAddedEventMapper), + withVersion(milestone.AggregateVersion), + ), milestone.ReachedEventMapper), }, - reduce: (&milestoneProjection{}).reduceInstanceAdded, + reduce: (&milestoneProjection{}).reduceReached, want: wantReduce{ - aggregateType: eventstore.AggregateType("instance"), + aggregateType: eventstore.AggregateType("milestone"), sequence: 15, executer: &testExecuter{ executions: []execution{ { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type, reached_date) VALUES ($1, $2, $3)", + expectedStmt: "INSERT INTO projections.milestones2 (instance_id, type, reached_date) VALUES ($1, $2, $3)", expectedArgs: []interface{}{ "instance-id", milestone.InstanceCreated, now, }, }, - { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)", - expectedArgs: []interface{}{ - "instance-id", - milestone.AuthenticationSucceededOnInstance, - }, - }, - { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)", - expectedArgs: []interface{}{ - "instance-id", - milestone.ProjectCreated, - }, - }, - { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)", - expectedArgs: []interface{}{ - "instance-id", - milestone.ApplicationCreated, - }, - }, - { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)", - expectedArgs: []interface{}{ - "instance-id", - milestone.AuthenticationSucceededOnApplication, - }, - }, - { - expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)", - expectedArgs: []interface{}{ - "instance-id", - milestone.InstanceDeleted, - }, - }, }, }, }, }, { - name: "reduceInstancePrimaryDomainSet", - args: args{ - event: getEvent(testEvent( - instance.InstanceDomainPrimarySetEventType, - instance.AggregateType, - []byte(`{"domain": "my.domain"}`), - ), instance.DomainPrimarySetEventMapper), - }, - reduce: (&milestoneProjection{}).reduceInstanceDomainPrimarySet, - want: wantReduce{ - aggregateType: eventstore.AggregateType("instance"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET primary_domain = $1 WHERE (instance_id = $2) AND (last_pushed_date IS NULL)", - expectedArgs: []interface{}{ - "my.domain", - "instance-id", - }, - }, - }, - }, - }, - }, - { - name: "reduceProjectAdded", + name: "reduceInstanceAdded with reached date", args: args{ event: getEvent(timedTestEvent( - project.ProjectAddedType, - project.AggregateType, - []byte(`{}`), + milestone.ReachedEventType, + milestone.AggregateType, + []byte(`{"type": "instance_created", "reachedDate":"2006-01-02T15:04:05Z"}`), now, - ), project.ProjectAddedEventMapper), + withVersion(milestone.AggregateVersion), + ), milestone.ReachedEventMapper), }, - reduce: (&milestoneProjection{}).reduceProjectAdded, + reduce: (&milestoneProjection{}).reduceReached, want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), + aggregateType: eventstore.AggregateType("milestone"), sequence: 15, executer: &testExecuter{ executions: []execution{ { - expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", + expectedStmt: "INSERT INTO projections.milestones2 (instance_id, type, reached_date) VALUES ($1, $2, $3)", expectedArgs: []interface{}{ - now, "instance-id", - milestone.ProjectCreated, - }, - }, - }, - }, - }, - }, - { - name: "reduceApplicationAdded", - args: args{ - event: getEvent(timedTestEvent( - project.ApplicationAddedType, - project.AggregateType, - []byte(`{}`), - now, - ), project.ApplicationAddedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceApplicationAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - now, - "instance-id", - milestone.ApplicationCreated, - }, - }, - }, - }, - }, - }, - { - name: "reduceOIDCConfigAdded user event", - args: args{ - event: getEvent(testEvent( - project.OIDCConfigAddedType, - project.AggregateType, - []byte(`{}`), - ), project.OIDCConfigAddedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceOIDCConfigAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), - sequence: 15, - executer: &testExecuter{}, - }, - }, - { - name: "reduceOIDCConfigAdded system event", - args: args{ - event: getEvent(toSystemEvent(testEvent( - project.OIDCConfigAddedType, - project.AggregateType, - []byte(`{"clientId": "client-id"}`), - )), project.OIDCConfigAddedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceOIDCConfigAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET ignore_client_ids = array_append(ignore_client_ids, $1) WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - "client-id", - "instance-id", - milestone.AuthenticationSucceededOnApplication, - }, - }, - }, - }, - }, - }, - { - name: "reduceAPIConfigAdded user event", - args: args{ - event: getEvent(testEvent( - project.APIConfigAddedType, - project.AggregateType, - []byte(`{}`), - ), project.APIConfigAddedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceAPIConfigAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), - sequence: 15, - executer: &testExecuter{}, - }, - }, - { - name: "reduceAPIConfigAdded system event", - args: args{ - event: getEvent(toSystemEvent(testEvent( - project.APIConfigAddedType, - project.AggregateType, - []byte(`{"clientId": "client-id"}`), - )), project.APIConfigAddedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceAPIConfigAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("project"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET ignore_client_ids = array_append(ignore_client_ids, $1) WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - "client-id", - "instance-id", - milestone.AuthenticationSucceededOnApplication, - }, - }, - }, - }, - }, - }, - { - name: "reduceOIDCSessionAdded", - args: args{ - event: getEvent(timedTestEvent( - oidcsession.AddedType, - oidcsession.AggregateType, - []byte(`{"clientID": "client-id"}`), - now, - ), eventstore.GenericEventMapper[oidcsession.AddedEvent]), - }, - reduce: (&milestoneProjection{}).reduceOIDCSessionAdded, - want: wantReduce{ - aggregateType: eventstore.AggregateType("oidc_session"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - now, - "instance-id", - milestone.AuthenticationSucceededOnInstance, - }, - }, - { - expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (NOT (ignore_client_ids @> $4)) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - now, - "instance-id", - milestone.AuthenticationSucceededOnApplication, - database.TextArray[string]{"client-id"}, - }, - }, - }, - }, - }, - }, - { - name: "reduceInstanceRemoved", - args: args{ - event: getEvent(timedTestEvent( - instance.InstanceRemovedEventType, - instance.AggregateType, - []byte(`{}`), - now, - ), instance.InstanceRemovedEventMapper), - }, - reduce: (&milestoneProjection{}).reduceInstanceRemoved, - want: wantReduce{ - aggregateType: eventstore.AggregateType("instance"), - sequence: 15, - executer: &testExecuter{ - executions: []execution{ - { - expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)", - expectedArgs: []interface{}{ - now, - "instance-id", - milestone.InstanceDeleted, + milestone.InstanceCreated, + date, }, }, }, @@ -327,18 +89,19 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(timedTestEvent( milestone.PushedEventType, milestone.AggregateType, - []byte(`{"type": "ProjectCreated"}`), + []byte(`{"type": "project_created"}`), now, + withVersion(milestone.AggregateVersion), ), milestone.PushedEventMapper), }, - reduce: (&milestoneProjection{}).reduceMilestonePushed, + reduce: (&milestoneProjection{}).reducePushed, want: wantReduce{ aggregateType: eventstore.AggregateType("milestone"), sequence: 15, executer: &testExecuter{ executions: []execution{ { - expectedStmt: "UPDATE projections.milestones SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", + expectedStmt: "UPDATE projections.milestones2 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", expectedArgs: []interface{}{ now, "instance-id", @@ -349,23 +112,53 @@ func TestMilestonesProjection_reduces(t *testing.T) { }, }, }, + { + name: "reduceMilestonePushed normal milestone with pushed date", + args: args{ + event: getEvent(timedTestEvent( + milestone.PushedEventType, + milestone.AggregateType, + []byte(`{"type": "project_created", "pushedDate":"2006-01-02T15:04:05Z"}`), + now, + withVersion(milestone.AggregateVersion), + ), milestone.PushedEventMapper), + }, + reduce: (&milestoneProjection{}).reducePushed, + want: wantReduce{ + aggregateType: eventstore.AggregateType("milestone"), + sequence: 15, + executer: &testExecuter{ + executions: []execution{ + { + expectedStmt: "UPDATE projections.milestones2 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", + expectedArgs: []interface{}{ + date, + "instance-id", + milestone.ProjectCreated, + }, + }, + }, + }, + }, + }, { name: "reduceMilestonePushed instance deleted milestone", args: args{ event: getEvent(testEvent( milestone.PushedEventType, milestone.AggregateType, - []byte(`{"type": "InstanceDeleted"}`), + []byte(`{"type": "instance_deleted"}`), + withVersion(milestone.AggregateVersion), ), milestone.PushedEventMapper), }, - reduce: (&milestoneProjection{}).reduceMilestonePushed, + reduce: (&milestoneProjection{}).reducePushed, want: wantReduce{ aggregateType: eventstore.AggregateType("milestone"), sequence: 15, executer: &testExecuter{ executions: []execution{ { - expectedStmt: "DELETE FROM projections.milestones WHERE (instance_id = $1)", + expectedStmt: "DELETE FROM projections.milestones2 WHERE (instance_id = $1)", expectedArgs: []interface{}{ "instance-id", }, diff --git a/internal/query/projection/projection.go b/internal/query/projection/projection.go index c4660c6c38..a23ae72330 100644 --- a/internal/query/projection/projection.go +++ b/internal/query/projection/projection.go @@ -156,7 +156,7 @@ func Create(ctx context.Context, sqlClient *database.DB, es handler.EventStore, DeviceAuthProjection = newDeviceAuthProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["device_auth"])) SessionProjection = newSessionProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["sessions"])) AuthRequestProjection = newAuthRequestProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["auth_requests"])) - MilestoneProjection = newMilestoneProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["milestones"]), systemUsers) + MilestoneProjection = newMilestoneProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["milestones"])) QuotaProjection = newQuotaProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["quotas"])) LimitsProjection = newLimitsProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["limits"])) RestrictionsProjection = newRestrictionsProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["restrictions"])) diff --git a/internal/repository/milestone/aggregate.go b/internal/repository/milestone/aggregate.go index bb9ca99cb3..7b8b624d95 100644 --- a/internal/repository/milestone/aggregate.go +++ b/internal/repository/milestone/aggregate.go @@ -9,20 +9,23 @@ import ( const ( AggregateType = "milestone" - AggregateVersion = "v1" + AggregateVersion = "v2" ) type Aggregate struct { eventstore.Aggregate } -func NewAggregate(ctx context.Context, id string) *Aggregate { - instanceID := authz.GetInstance(ctx).InstanceID() +func NewAggregate(ctx context.Context) *Aggregate { + return NewInstanceAggregate(authz.GetInstance(ctx).InstanceID()) +} + +func NewInstanceAggregate(instanceID string) *Aggregate { return &Aggregate{ Aggregate: eventstore.Aggregate{ Type: AggregateType, Version: AggregateVersion, - ID: id, + ID: instanceID, ResourceOwner: instanceID, InstanceID: instanceID, }, diff --git a/internal/repository/milestone/events.go b/internal/repository/milestone/events.go index efd25949b1..a149736a5f 100644 --- a/internal/repository/milestone/events.go +++ b/internal/repository/milestone/events.go @@ -2,23 +2,88 @@ package milestone import ( "context" + "time" "github.com/zitadel/zitadel/internal/eventstore" ) +//go:generate enumer -type Type -json -linecomment -transform=snake +type Type int + const ( - eventTypePrefix = eventstore.EventType("milestone.") - PushedEventType = eventTypePrefix + "pushed" + InstanceCreated Type = iota + AuthenticationSucceededOnInstance + ProjectCreated + ApplicationCreated + AuthenticationSucceededOnApplication + InstanceDeleted ) -var _ eventstore.Command = (*PushedEvent)(nil) +const ( + eventTypePrefix = "milestone." + ReachedEventType = eventTypePrefix + "reached" + PushedEventType = eventTypePrefix + "pushed" +) + +type ReachedEvent struct { + *eventstore.BaseEvent `json:"-"` + MilestoneType Type `json:"type"` + ReachedDate *time.Time `json:"reachedDate,omitempty"` // Defaults to [eventstore.BaseEvent.Creation] when empty +} + +// Payload implements eventstore.Command. +func (e *ReachedEvent) Payload() any { + return e +} + +func (e *ReachedEvent) UniqueConstraints() []*eventstore.UniqueConstraint { + return nil +} + +func (e *ReachedEvent) SetBaseEvent(b *eventstore.BaseEvent) { + e.BaseEvent = b +} + +func (e *ReachedEvent) GetReachedDate() time.Time { + if e.ReachedDate != nil { + return *e.ReachedDate + } + return e.Creation +} + +func NewReachedEvent( + ctx context.Context, + aggregate *Aggregate, + typ Type, +) *ReachedEvent { + return NewReachedEventWithDate(ctx, aggregate, typ, nil) +} + +// NewReachedEventWithDate creates a [ReachedEvent] with a fixed Reached Date. +func NewReachedEventWithDate( + ctx context.Context, + aggregate *Aggregate, + typ Type, + reachedDate *time.Time, +) *ReachedEvent { + return &ReachedEvent{ + BaseEvent: eventstore.NewBaseEventForPush( + ctx, + &aggregate.Aggregate, + ReachedEventType, + ), + MilestoneType: typ, + ReachedDate: reachedDate, + } +} type PushedEvent struct { *eventstore.BaseEvent `json:"-"` - MilestoneType Type `json:"type"` - ExternalDomain string `json:"externalDomain"` - PrimaryDomain string `json:"primaryDomain"` - Endpoints []string `json:"endpoints"` + MilestoneType Type `json:"type"` + ExternalDomain string `json:"externalDomain"` + PrimaryDomain string `json:"primaryDomain"` + Endpoints []string `json:"endpoints"` + PushedDate *time.Time `json:"pushedDate,omitempty"` // Defaults to [eventstore.BaseEvent.Creation] when empty } // Payload implements eventstore.Command. @@ -34,14 +99,31 @@ func (p *PushedEvent) SetBaseEvent(b *eventstore.BaseEvent) { p.BaseEvent = b } -var PushedEventMapper = eventstore.GenericEventMapper[PushedEvent] +func (e *PushedEvent) GetPushedDate() time.Time { + if e.PushedDate != nil { + return *e.PushedDate + } + return e.Creation +} func NewPushedEvent( ctx context.Context, aggregate *Aggregate, - msType Type, + typ Type, endpoints []string, - externalDomain, primaryDomain string, + externalDomain string, +) *PushedEvent { + return NewPushedEventWithDate(ctx, aggregate, typ, endpoints, externalDomain, nil) +} + +// NewPushedEventWithDate creates a [PushedEvent] with a fixed Pushed Date. +func NewPushedEventWithDate( + ctx context.Context, + aggregate *Aggregate, + typ Type, + endpoints []string, + externalDomain string, + pushedDate *time.Time, ) *PushedEvent { return &PushedEvent{ BaseEvent: eventstore.NewBaseEventForPush( @@ -49,9 +131,9 @@ func NewPushedEvent( &aggregate.Aggregate, PushedEventType, ), - MilestoneType: msType, + MilestoneType: typ, Endpoints: endpoints, ExternalDomain: externalDomain, - PrimaryDomain: primaryDomain, + PushedDate: pushedDate, } } diff --git a/internal/repository/milestone/eventstore.go b/internal/repository/milestone/eventstore.go index 18b94d702e..10ff3ddb31 100644 --- a/internal/repository/milestone/eventstore.go +++ b/internal/repository/milestone/eventstore.go @@ -4,6 +4,12 @@ import ( "github.com/zitadel/zitadel/internal/eventstore" ) +var ( + ReachedEventMapper = eventstore.GenericEventMapper[ReachedEvent] + PushedEventMapper = eventstore.GenericEventMapper[PushedEvent] +) + func init() { + eventstore.RegisterFilterEventMapper(AggregateType, ReachedEventType, ReachedEventMapper) eventstore.RegisterFilterEventMapper(AggregateType, PushedEventType, PushedEventMapper) } diff --git a/internal/repository/milestone/type.go b/internal/repository/milestone/type.go deleted file mode 100644 index f57bb032ee..0000000000 --- a/internal/repository/milestone/type.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:generate stringer -type Type - -package milestone - -import ( - "fmt" - "strings" -) - -type Type int - -const ( - unknown Type = iota - - InstanceCreated - AuthenticationSucceededOnInstance - ProjectCreated - ApplicationCreated - AuthenticationSucceededOnApplication - InstanceDeleted - - typesCount -) - -func AllTypes() []Type { - types := make([]Type, typesCount-1) - for i := Type(1); i < typesCount; i++ { - types[i-1] = i - } - return types -} - -func (t *Type) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, t.String())), nil -} - -func (t *Type) UnmarshalJSON(data []byte) error { - *t = typeFromString(strings.Trim(string(data), `"`)) - return nil -} - -func typeFromString(t string) Type { - switch t { - case InstanceCreated.String(): - return InstanceCreated - case AuthenticationSucceededOnInstance.String(): - return AuthenticationSucceededOnInstance - case ProjectCreated.String(): - return ProjectCreated - case ApplicationCreated.String(): - return ApplicationCreated - case AuthenticationSucceededOnApplication.String(): - return AuthenticationSucceededOnApplication - case InstanceDeleted.String(): - return InstanceDeleted - default: - return unknown - } -} diff --git a/internal/repository/milestone/type_enumer.go b/internal/repository/milestone/type_enumer.go new file mode 100644 index 0000000000..696db3f457 --- /dev/null +++ b/internal/repository/milestone/type_enumer.go @@ -0,0 +1,112 @@ +// Code generated by "enumer -type Type -json -linecomment -transform=snake"; DO NOT EDIT. + +package milestone + +import ( + "encoding/json" + "fmt" + "strings" +) + +const _TypeName = "instance_createdauthentication_succeeded_on_instanceproject_createdapplication_createdauthentication_succeeded_on_applicationinstance_deleted" + +var _TypeIndex = [...]uint8{0, 16, 52, 67, 86, 125, 141} + +const _TypeLowerName = "instance_createdauthentication_succeeded_on_instanceproject_createdapplication_createdauthentication_succeeded_on_applicationinstance_deleted" + +func (i Type) String() string { + if i < 0 || i >= Type(len(_TypeIndex)-1) { + return fmt.Sprintf("Type(%d)", i) + } + return _TypeName[_TypeIndex[i]:_TypeIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _TypeNoOp() { + var x [1]struct{} + _ = x[InstanceCreated-(0)] + _ = x[AuthenticationSucceededOnInstance-(1)] + _ = x[ProjectCreated-(2)] + _ = x[ApplicationCreated-(3)] + _ = x[AuthenticationSucceededOnApplication-(4)] + _ = x[InstanceDeleted-(5)] +} + +var _TypeValues = []Type{InstanceCreated, AuthenticationSucceededOnInstance, ProjectCreated, ApplicationCreated, AuthenticationSucceededOnApplication, InstanceDeleted} + +var _TypeNameToValueMap = map[string]Type{ + _TypeName[0:16]: InstanceCreated, + _TypeLowerName[0:16]: InstanceCreated, + _TypeName[16:52]: AuthenticationSucceededOnInstance, + _TypeLowerName[16:52]: AuthenticationSucceededOnInstance, + _TypeName[52:67]: ProjectCreated, + _TypeLowerName[52:67]: ProjectCreated, + _TypeName[67:86]: ApplicationCreated, + _TypeLowerName[67:86]: ApplicationCreated, + _TypeName[86:125]: AuthenticationSucceededOnApplication, + _TypeLowerName[86:125]: AuthenticationSucceededOnApplication, + _TypeName[125:141]: InstanceDeleted, + _TypeLowerName[125:141]: InstanceDeleted, +} + +var _TypeNames = []string{ + _TypeName[0:16], + _TypeName[16:52], + _TypeName[52:67], + _TypeName[67:86], + _TypeName[86:125], + _TypeName[125:141], +} + +// TypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func TypeString(s string) (Type, error) { + if val, ok := _TypeNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _TypeNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Type values", s) +} + +// TypeValues returns all values of the enum +func TypeValues() []Type { + return _TypeValues +} + +// TypeStrings returns a slice of all String values of the enum +func TypeStrings() []string { + strs := make([]string, len(_TypeNames)) + copy(strs, _TypeNames) + return strs +} + +// IsAType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Type) IsAType() bool { + for _, v := range _TypeValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for Type +func (i Type) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Type +func (i *Type) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("Type should be a string, got %s", data) + } + + var err error + *i, err = TypeString(s) + return err +} diff --git a/internal/repository/milestone/type_string.go b/internal/repository/milestone/type_string.go deleted file mode 100644 index ce6a98441c..0000000000 --- a/internal/repository/milestone/type_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package milestone - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[unknown-0] - _ = x[InstanceCreated-1] - _ = x[AuthenticationSucceededOnInstance-2] - _ = x[ProjectCreated-3] - _ = x[ApplicationCreated-4] - _ = x[AuthenticationSucceededOnApplication-5] - _ = x[InstanceDeleted-6] - _ = x[typesCount-7] -} - -const _Type_name = "unknownInstanceCreatedAuthenticationSucceededOnInstanceProjectCreatedApplicationCreatedAuthenticationSucceededOnApplicationInstanceDeletedtypesCount" - -var _Type_index = [...]uint8{0, 7, 22, 55, 69, 87, 123, 138, 148} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} From 1426823d4076e67b3c71089f401b4019dcea0bd2 Mon Sep 17 00:00:00 2001 From: ChandanChainani Date: Mon, 28 Oct 2024 18:06:57 +0530 Subject: [PATCH 18/30] fix(mac): date command options not found #8757 (#8758) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Which Problems Are Solved Closes: #8757 Problem: ```sh ➜ ~ date --rfc-3339=seconds | sed 's/ /T/' date: illegal option -- - usage: date [-jnRu] [-I[date|hours|minutes|seconds]] [-f input_fmt] [-r filename|seconds] [-v[+|-]val[y|m|w|d|H|M|S]] [[[[mm]dd]HH]MM[[cc]yy][.SS] | new_date] [+output_fmt] ``` # How the Problems Are Solved ```sh ➜ date "+%Y-%m-%dT%T%z" | sed -E 's/.([0-9]{2})([0-9]{2})$/-\1:\2/' 2024-10-10T19:09:53-05:30 ➜ TZ=America/Los_Angeles date "+%Y-%m-%dT%T%z" | sed -E 's/.([0-9]{2})([0-9]{2})$/-\1:\2/' 2024-10-10T06:39:41-07:00 ``` - `Mac` support while compiling the source code locally Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 79aaa7f1b2..e728e42b01 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ go_bin := "$$(go env GOPATH)/bin" gen_authopt_path := "$(go_bin)/protoc-gen-authoption" gen_zitadel_path := "$(go_bin)/protoc-gen-zitadel" -now := $(shell date --rfc-3339=seconds | sed 's/ /T/') +now := $(shell date '+%Y-%m-%dT%T%z' | sed -E 's/.([0-9]{2})([0-9]{2})$$/-\1:\2/') VERSION ?= development-$(now) COMMIT_SHA ?= $(shell git rev-parse HEAD) ZITADEL_IMAGE ?= zitadel:local From cff4fe5dfd1c615ca5e7fa2169ff34c4b2ddc732 Mon Sep 17 00:00:00 2001 From: Elio Bischof Date: Tue, 29 Oct 2024 20:02:04 +0100 Subject: [PATCH 19/30] docs: fix and harmonize docker compose files (#8839) # Which Problems Are Solved 1. Postgres spams FATAL: role "root" does not exist as mentioned in https://github.com/zitadel/zitadel/discussions/7832 (even with -U) 2. The compose commands for a ZITADEL deployment with initial service account key don't work out-of-the box with a non-root user, because docker creates non-existing directories to bind-mount with root ownership. ![image](https://github.com/user-attachments/assets/f2fc92d5-2ff4-47a4-bf4d-e9657aa2bb94) ``` time="2024-10-29T09:37:13Z" level=error msg="migration failed" caller="/home/runner/work/zitadel/zitadel/internal/migration/migration.go:68" error="open /machinekey/zitadel-admin-sa.json: permission denied" name=03_default_instance time="2024-10-29T09:37:13Z" level=fatal msg="migration failed" caller="/home/runner/work/zitadel/zitadel/cmd/setup/setup.go:248" error="open /machinekey/zitadel-admin-sa.json: permission denied" name=03_default_instance ``` # How the Problems Are Solved 1. The branch bases on https://github.com/zitadel/zitadel/pull/8826. The env vars are cleaned up and prettified across compose files. 2. A command is added to the docs that creates the directory with the current users permission. The ZITADEL container runs with the current users ID. # Additional Context - Replaces https://github.com/zitadel/zitadel/pull/8826 - Discussion https://github.com/zitadel/zitadel/discussions/7832 - Closes https://github.com/zitadel/zitadel/issues/7725 --------- Co-authored-by: m4tu4g <71326926+m4tu4g@users.noreply.github.com> --- .devcontainer/docker-compose.yml | 23 ++-- docs/docs/self-hosting/deploy/compose.mdx | 3 + .../deploy/docker-compose-sa.yaml | 38 +++---- .../self-hosting/deploy/docker-compose.yaml | 27 ++--- .../manage/reverseproxy/docker-compose.yaml | 106 +++++++++--------- 5 files changed, 98 insertions(+), 99 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index fd92959d3f..cece28632b 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -8,25 +8,24 @@ services: network_mode: service:db command: sleep infinity environment: - - 'ZITADEL_DATABASE_POSTGRES_HOST=db' - - 'ZITADEL_DATABASE_POSTGRES_PORT=5432' - - 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' - - 'ZITADEL_EXTERNALSECURE=false' + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable + ZITADEL_EXTERNALSECURE: false db: image: postgres:latest restart: unless-stopped volumes: - postgres-data:/var/lib/postgresql/data environment: + PGUSER: postgres POSTGRES_PASSWORD: postgres - POSTGRES_USER: postgres - POSTGRES_DB: postgres volumes: postgres-data: diff --git a/docs/docs/self-hosting/deploy/compose.mdx b/docs/docs/self-hosting/deploy/compose.mdx index 9d8efae1ad..370c0e7f5d 100644 --- a/docs/docs/self-hosting/deploy/compose.mdx +++ b/docs/docs/self-hosting/deploy/compose.mdx @@ -51,6 +51,9 @@ By executing the commands below, you will download the following file: # Download the docker compose example configuration. wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose-sa.yaml -O docker-compose.yaml +# create the machine key directory +mkdir machinekey + # Run the database and application containers. docker compose up --detach diff --git a/docs/docs/self-hosting/deploy/docker-compose-sa.yaml b/docs/docs/self-hosting/deploy/docker-compose-sa.yaml index 4e43f9f8dc..95608fd76d 100644 --- a/docs/docs/self-hosting/deploy/docker-compose-sa.yaml +++ b/docs/docs/self-hosting/deploy/docker-compose-sa.yaml @@ -1,27 +1,27 @@ -version: '3.8' - services: zitadel: + # The user should have the permission to write to ./machinekey + user: "${UID:-1000}" restart: 'always' networks: - 'zitadel' image: 'ghcr.io/zitadel/zitadel:latest' command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled' environment: - - 'ZITADEL_DATABASE_POSTGRES_HOST=db' - - 'ZITADEL_DATABASE_POSTGRES_PORT=5432' - - 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' - - 'ZITADEL_EXTERNALSECURE=false' - - 'ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH=/machinekey/zitadel-admin-sa.json' - - 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME=zitadel-admin-sa' - - 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME=Admin' - - 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE=1' + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable + ZITADEL_EXTERNALSECURE: false + ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH: /machinekey/zitadel-admin-sa.json + ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME: zitadel-admin-sa + ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME: Admin + ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE: 1 depends_on: db: condition: 'service_healthy' @@ -34,12 +34,12 @@ services: restart: 'always' image: postgres:16-alpine environment: - - POSTGRES_USER=postgres - - POSTGRES_PASSWORD=postgres + PGUSER: postgres + POSTGRES_PASSWORD: postgres networks: - 'zitadel' healthcheck: - test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"] + test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"] interval: '10s' timeout: '30s' retries: 5 diff --git a/docs/docs/self-hosting/deploy/docker-compose.yaml b/docs/docs/self-hosting/deploy/docker-compose.yaml index 289c80d5b1..e32700ace4 100644 --- a/docs/docs/self-hosting/deploy/docker-compose.yaml +++ b/docs/docs/self-hosting/deploy/docker-compose.yaml @@ -1,5 +1,3 @@ -version: '3.8' - services: zitadel: restart: 'always' @@ -8,16 +6,16 @@ services: image: 'ghcr.io/zitadel/zitadel:latest' command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled' environment: - - 'ZITADEL_DATABASE_POSTGRES_HOST=db' - - 'ZITADEL_DATABASE_POSTGRES_PORT=5432' - - 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' - - 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' - - 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' - - 'ZITADEL_EXTERNALSECURE=false' + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable + ZITADEL_EXTERNALSECURE: false depends_on: db: condition: 'service_healthy' @@ -28,9 +26,8 @@ services: restart: 'always' image: postgres:16-alpine environment: - - POSTGRES_USER=postgres - - POSTGRES_PASSWORD=postgres - - POSTGRES_DB=zitadel + PGUSER: postgres + POSTGRES_PASSWORD: postgres networks: - 'zitadel' healthcheck: diff --git a/docs/docs/self-hosting/manage/reverseproxy/docker-compose.yaml b/docs/docs/self-hosting/manage/reverseproxy/docker-compose.yaml index 851f012a7c..d7d929fa44 100644 --- a/docs/docs/self-hosting/manage/reverseproxy/docker-compose.yaml +++ b/docs/docs/self-hosting/manage/reverseproxy/docker-compose.yaml @@ -7,19 +7,19 @@ services: service: zitadel-init command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' environment: - - ZITADEL_EXTERNALPORT=80 - - ZITADEL_EXTERNALSECURE=false - - ZITADEL_TLS_ENABLED=false + ZITADEL_EXTERNALPORT: 80 + ZITADEL_EXTERNALSECURE: false + ZITADEL_TLS_ENABLED: false # database configuration - - ZITADEL_DATABASE_POSTGRES_HOST=db - - ZITADEL_DATABASE_POSTGRES_PORT=5432 - - ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel - - ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user - - ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw - - ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable - - ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root - - ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres - - ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable networks: - 'zitadel' depends_on: @@ -33,19 +33,19 @@ services: service: zitadel-init command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' environment: - - ZITADEL_EXTERNALPORT=443 - - ZITADEL_EXTERNALSECURE=true - - ZITADEL_TLS_ENABLED=false + ZITADEL_EXTERNALPORT: 443 + ZITADEL_EXTERNALSECURE: true + ZITADEL_TLS_ENABLED: false # database configuration - - ZITADEL_DATABASE_POSTGRES_HOST=db - - ZITADEL_DATABASE_POSTGRES_PORT=5432 - - ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel - - ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user - - ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw - - ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable - - ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root - - ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres - - ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable networks: - 'zitadel' depends_on: @@ -59,21 +59,21 @@ services: service: zitadel-init command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' environment: - - ZITADEL_EXTERNALPORT=443 - - ZITADEL_EXTERNALSECURE=true - - ZITADEL_TLS_ENABLED=true - - ZITADEL_TLS_CERTPATH=/etc/certs/selfsigned.crt - - ZITADEL_TLS_KEYPATH=/etc/certs/selfsigned.key + ZITADEL_EXTERNALPORT: 443 + ZITADEL_EXTERNALSECURE: true + ZITADEL_TLS_ENABLED: true + ZITADEL_TLS_CERTPATH: /etc/certs/selfsigned.crt + ZITADEL_TLS_KEYPATH: /etc/certs/selfsigned.key # database configuration - - ZITADEL_DATABASE_POSTGRES_HOST=db - - ZITADEL_DATABASE_POSTGRES_PORT=5432 - - ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel - - ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user - - ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw - - ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable - - ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root - - ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres - - ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable volumes: - ./selfsigned.crt:/etc/certs/selfsigned.crt - ./selfsigned.key:/etc/certs/selfsigned.key @@ -96,22 +96,22 @@ services: # Using an external domain other than localhost proofs, that the proxy configuration works. # If ZITADEL can't resolve a requests original host to this domain, # it will return a 404 Instance not found error. - - ZITADEL_EXTERNALDOMAIN=127.0.0.1.sslip.io + ZITADEL_EXTERNALDOMAIN: 127.0.0.1.sslip.io # In case something doesn't work as expected, # it can be handy to be able to read the access logs. - - ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED=true + ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED: true # For convenience, ZITADEL should not ask to change the initial admin users password. - - ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false + ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED: false # database configuration - - ZITADEL_DATABASE_POSTGRES_HOST=db - - ZITADEL_DATABASE_POSTGRES_PORT=5432 - - ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel - - ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user - - ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw - - ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable - - ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root - - ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres - - ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable + ZITADEL_DATABASE_POSTGRES_HOST: db + ZITADEL_DATABASE_POSTGRES_PORT: 5432 + ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable networks: - 'zitadel' healthcheck: @@ -125,10 +125,10 @@ services: restart: 'always' image: postgres:16-alpine environment: - - POSTGRES_USER=root - - POSTGRES_PASSWORD=postgres + PGUSER: root + POSTGRES_PASSWORD: postgres healthcheck: - test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"] + test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"] interval: 5s timeout: 60s retries: 10 From 6780c5a07ca491690e0af6d8baeac9aa5d69cabe Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Wed, 30 Oct 2024 09:53:00 +0100 Subject: [PATCH 20/30] fix: add resourceowner to check for project in project grant (#8785) # Which Problems Are Solved Resource owner can be different than expected if the provided x-zitadel-orgid header is provided. # How the Problems Are Solved Check that the project is only checked with the correct resource owner to avoid unexpected situations. # Additional Changes None # Additional Context Closes #8685 --------- Co-authored-by: Livio Spring --- internal/command/project_grant.go | 14 ++-- internal/command/project_grant_model.go | 15 ++++- internal/command/project_grant_test.go | 90 +++++++++++++++++++++++++ internal/command/project_old.go | 4 +- 4 files changed, 114 insertions(+), 9 deletions(-) diff --git a/internal/command/project_grant.go b/internal/command/project_grant.go index ef602c4b3e..feb8a29e4b 100644 --- a/internal/command/project_grant.go +++ b/internal/command/project_grant.go @@ -27,7 +27,7 @@ func (c *Commands) AddProjectGrant(ctx context.Context, grant *domain.ProjectGra if !grant.IsValid() { return nil, zerrors.ThrowInvalidArgument(nil, "PROJECT-3b8fs", "Errors.Project.Grant.Invalid") } - err = c.checkProjectGrantPreCondition(ctx, grant) + err = c.checkProjectGrantPreCondition(ctx, grant, resourceOwner) if err != nil { return nil, err } @@ -67,7 +67,7 @@ func (c *Commands) ChangeProjectGrant(ctx context.Context, grant *domain.Project return nil, err } grant.GrantedOrgID = existingGrant.GrantedOrgID - err = c.checkProjectGrantPreCondition(ctx, grant) + err = c.checkProjectGrantPreCondition(ctx, grant, resourceOwner) if err != nil { return nil, err } @@ -255,11 +255,11 @@ func (c *Commands) projectGrantWriteModelByID(ctx context.Context, grantID, proj return writeModel, nil } -func (c *Commands) checkProjectGrantPreCondition(ctx context.Context, projectGrant *domain.ProjectGrant) error { +func (c *Commands) checkProjectGrantPreCondition(ctx context.Context, projectGrant *domain.ProjectGrant, resourceOwner string) error { if !authz.GetFeatures(ctx).ShouldUseImprovedPerformance(feature.ImprovedPerformanceTypeProjectGrant) { - return c.checkProjectGrantPreConditionOld(ctx, projectGrant) + return c.checkProjectGrantPreConditionOld(ctx, projectGrant, resourceOwner) } - existingRoleKeys, err := c.searchProjectGrantState(ctx, projectGrant.AggregateID, projectGrant.GrantedOrgID) + existingRoleKeys, err := c.searchProjectGrantState(ctx, projectGrant.AggregateID, projectGrant.GrantedOrgID, resourceOwner) if err != nil { return err } @@ -270,11 +270,12 @@ func (c *Commands) checkProjectGrantPreCondition(ctx context.Context, projectGra return nil } -func (c *Commands) searchProjectGrantState(ctx context.Context, projectID, grantedOrgID string) (existingRoleKeys []string, err error) { +func (c *Commands) searchProjectGrantState(ctx context.Context, projectID, grantedOrgID, resourceOwner string) (existingRoleKeys []string, err error) { results, err := c.eventstore.Search( ctx, // project state query map[eventstore.FieldType]any{ + eventstore.FieldTypeResourceOwner: resourceOwner, eventstore.FieldTypeAggregateType: project.AggregateType, eventstore.FieldTypeAggregateID: projectID, eventstore.FieldTypeFieldName: project.ProjectStateSearchField, @@ -289,6 +290,7 @@ func (c *Commands) searchProjectGrantState(ctx context.Context, projectID, grant }, // role query map[eventstore.FieldType]any{ + eventstore.FieldTypeResourceOwner: resourceOwner, eventstore.FieldTypeAggregateType: project.AggregateType, eventstore.FieldTypeAggregateID: projectID, eventstore.FieldTypeFieldName: project.ProjectRoleKeySearchField, diff --git a/internal/command/project_grant_model.go b/internal/command/project_grant_model.go index 791fd54fe3..c658b00b69 100644 --- a/internal/command/project_grant_model.go +++ b/internal/command/project_grant_model.go @@ -121,8 +121,9 @@ type ProjectGrantPreConditionReadModel struct { ExistingRoleKeys []string } -func NewProjectGrantPreConditionReadModel(projectID, grantedOrgID string) *ProjectGrantPreConditionReadModel { +func NewProjectGrantPreConditionReadModel(projectID, grantedOrgID, resourceOwner string) *ProjectGrantPreConditionReadModel { return &ProjectGrantPreConditionReadModel{ + WriteModel: eventstore.WriteModel{ResourceOwner: resourceOwner}, ProjectID: projectID, GrantedOrgID: grantedOrgID, } @@ -132,12 +133,24 @@ func (wm *ProjectGrantPreConditionReadModel) Reduce() error { for _, event := range wm.Events { switch e := event.(type) { case *project.ProjectAddedEvent: + if e.Aggregate().ResourceOwner != wm.ResourceOwner { + continue + } wm.ProjectExists = true case *project.ProjectRemovedEvent: + if e.Aggregate().ResourceOwner != wm.ResourceOwner { + continue + } wm.ProjectExists = false case *project.RoleAddedEvent: + if e.Aggregate().ResourceOwner != wm.ResourceOwner { + continue + } wm.ExistingRoleKeys = append(wm.ExistingRoleKeys, e.Key) case *project.RoleRemovedEvent: + if e.Aggregate().ResourceOwner != wm.ResourceOwner { + continue + } for i, key := range wm.ExistingRoleKeys { if key == e.Key { copy(wm.ExistingRoleKeys[i:], wm.ExistingRoleKeys[i+1:]) diff --git a/internal/command/project_grant_test.go b/internal/command/project_grant_test.go index 42c6875f06..b6f44c27dd 100644 --- a/internal/command/project_grant_test.go +++ b/internal/command/project_grant_test.go @@ -79,6 +79,50 @@ func TestCommandSide_AddProjectGrant(t *testing.T) { err: zerrors.IsPreconditionFailed, }, }, + { + name: "project not existing in org, precondition error", + fields: fields{ + eventstore: eventstoreExpect( + t, + expectFilter( + eventFromEventPusher( + project.NewProjectAddedEvent(context.Background(), + &project.NewAggregate("project1", "otherorg").Aggregate, + "projectname1", true, true, true, + domain.PrivateLabelingSettingUnspecified, + ), + ), + eventFromEventPusher( + org.NewOrgAddedEvent(context.Background(), + &org.NewAggregate("grantedorg1").Aggregate, + "granted org", + ), + ), + eventFromEventPusher( + project.NewRoleAddedEvent(context.Background(), + &project.NewAggregate("project1", "otherorg").Aggregate, + "key1", + "key", + "", + ), + ), + ), + ), + }, + args: args{ + ctx: context.Background(), + projectGrant: &domain.ProjectGrant{ + ObjectRoot: models.ObjectRoot{ + AggregateID: "project1", + }, + GrantedOrgID: "grantedorg1", + }, + resourceOwner: "org1", + }, + res: res{ + err: zerrors.IsPreconditionFailed, + }, + }, { name: "granted org not existing, precondition error", fields: fields{ @@ -325,6 +369,52 @@ func TestCommandSide_ChangeProjectGrant(t *testing.T) { err: zerrors.IsPreconditionFailed, }, }, + { + name: "project not existing in org, precondition error", + fields: fields{ + eventstore: eventstoreExpect( + t, + expectFilter( + eventFromEventPusher(project.NewGrantAddedEvent(context.Background(), + &project.NewAggregate("project1", "org1").Aggregate, + "projectgrant1", + "grantedorg1", + []string{"key1"}, + )), + ), + expectFilter( + eventFromEventPusher( + project.NewProjectAddedEvent(context.Background(), + &project.NewAggregate("project1", "otherorg").Aggregate, + "projectname1", true, true, true, + domain.PrivateLabelingSettingUnspecified, + ), + ), + eventFromEventPusher( + org.NewOrgAddedEvent(context.Background(), + &org.NewAggregate("grantedorg1").Aggregate, + "granted org", + ), + ), + ), + ), + }, + args: args{ + ctx: context.Background(), + projectGrant: &domain.ProjectGrant{ + ObjectRoot: models.ObjectRoot{ + AggregateID: "project1", + }, + GrantID: "projectgrant1", + GrantedOrgID: "grantedorg1", + RoleKeys: []string{"key1"}, + }, + resourceOwner: "org1", + }, + res: res{ + err: zerrors.IsPreconditionFailed, + }, + }, { name: "granted org not existing, precondition error", fields: fields{ diff --git a/internal/command/project_old.go b/internal/command/project_old.go index 434df38a7f..b31b4e58bf 100644 --- a/internal/command/project_old.go +++ b/internal/command/project_old.go @@ -161,8 +161,8 @@ func (c *Commands) removeProjectOld(ctx context.Context, projectID, resourceOwne return writeModelToObjectDetails(&existingProject.WriteModel), nil } -func (c *Commands) checkProjectGrantPreConditionOld(ctx context.Context, projectGrant *domain.ProjectGrant) error { - preConditions := NewProjectGrantPreConditionReadModel(projectGrant.AggregateID, projectGrant.GrantedOrgID) +func (c *Commands) checkProjectGrantPreConditionOld(ctx context.Context, projectGrant *domain.ProjectGrant, resourceOwner string) error { + preConditions := NewProjectGrantPreConditionReadModel(projectGrant.AggregateID, projectGrant.GrantedOrgID, resourceOwner) err := c.eventstore.FilterToQueryReducer(ctx, preConditions) if err != nil { return err From aa211489eec3bf00c879e0cc15f3d8b01e22d4be Mon Sep 17 00:00:00 2001 From: Fabi Date: Thu, 31 Oct 2024 10:24:45 +0100 Subject: [PATCH 21/30] docs: change recommendation from cockroachDB to postgreSQL (#8844) # Which Problems Are Solved As we switched to PostgreSQL with our cloud zitadel, we now want to recommend it instead of CockroachDB. # How the Problems Are Solved Replaced the recommend section with postgresql --- docs/docs/concepts/architecture/software.md | 2 +- docs/docs/concepts/architecture/solution.md | 2 +- docs/docs/self-hosting/deploy/overview.mdx | 2 +- .../self-hosting/manage/database/_postgres.mdx | 1 - docs/docs/self-hosting/manage/production.md | 16 ++++++++-------- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/docs/concepts/architecture/software.md b/docs/docs/concepts/architecture/software.md index cd31f701b2..1a82a02a21 100644 --- a/docs/docs/concepts/architecture/software.md +++ b/docs/docs/concepts/architecture/software.md @@ -144,4 +144,4 @@ The storage layer of ZITADEL is responsible for multiple tasks. For example: - Backup and restore operation for disaster recovery purpose ZITADEL currently supports PostgreSQL and CockroachDB.. -Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide on using one of them. +Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide on using one of them. diff --git a/docs/docs/concepts/architecture/solution.md b/docs/docs/concepts/architecture/solution.md index 710e7ae306..b99b8aa9dc 100644 --- a/docs/docs/concepts/architecture/solution.md +++ b/docs/docs/concepts/architecture/solution.md @@ -11,7 +11,7 @@ Since the storage layer takes the heavy lifting of making sure that data in sync Depending on your projects needs our general recommendation is to run ZITADEL and ZITADELs storage layer across multiple availability zones in the same region or if you need higher guarantees run the storage layer across multiple regions. Consult the [CockroachDB documentation](https://www.cockroachlabs.com/docs/) for more details or use the [CockroachCloud Service](https://www.cockroachlabs.com/docs/cockroachcloud/create-an-account.html) Alternatively you can run ZITADEL also with Postgres which is [Enterprise Supported](/docs/support/software-release-cycles-support#partially-supported). -Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it. +Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-postgresql) before you decide to use it. ## Scalability diff --git a/docs/docs/self-hosting/deploy/overview.mdx b/docs/docs/self-hosting/deploy/overview.mdx index 1e55d60cc3..38517c52f4 100644 --- a/docs/docs/self-hosting/deploy/overview.mdx +++ b/docs/docs/self-hosting/deploy/overview.mdx @@ -14,7 +14,7 @@ Choose your platform and run ZITADEL with the most minimal configuration possibl ## Prerequisites - For test environments, ZITADEL does not need many resources, 1 CPU and 512MB memory are more than enough. (With more CPU, the password hashing might be faster) -- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use Postgresql. +- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide to use Postgresql. ## Releases diff --git a/docs/docs/self-hosting/manage/database/_postgres.mdx b/docs/docs/self-hosting/manage/database/_postgres.mdx index bebb713e66..604d6b39a5 100644 --- a/docs/docs/self-hosting/manage/database/_postgres.mdx +++ b/docs/docs/self-hosting/manage/database/_postgres.mdx @@ -1,7 +1,6 @@ ## ZITADEL with Postgres If you want to use a PostgreSQL database you can [overwrite the default configuration](../configure/configure.mdx). -Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it. Currently versions >= 14 are supported. diff --git a/docs/docs/self-hosting/manage/production.md b/docs/docs/self-hosting/manage/production.md index cdd2a8e965..fde620b13e 100644 --- a/docs/docs/self-hosting/manage/production.md +++ b/docs/docs/self-hosting/manage/production.md @@ -109,17 +109,16 @@ but in the Projections.Customizations.Telemetry section ## Database -### Prefer CockroachDB +### Prefer PostgreSQL ZITADEL supports [CockroachDB](https://www.cockroachlabs.com/) and [PostgreSQL](https://www.postgresql.org/). -We recommend using CockroachDB, -as horizontal scaling is much easier than with PostgreSQL. -Also, if you are concerned about multi-regional data locality, -[the way to go is with CockroachDB](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html). +We recommend using PostgreSQL, as it is the better choice when you want to prioritize performance and latency. + +However, if [multi-regional data locality](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html) is a critical requirement, CockroachDB might be a suitable option. The indexes for the database are optimized using load tests from [ZITADEL Cloud](https://zitadel.com), -which runs with CockroachDB. -If you identify problems with your Postgresql during load tests that indicate that the indexes are not optimized, +which runs with PostgreSQL. +If you identify problems with your CockroachDB during load tests that indicate that the indexes are not optimized, please create an issue in our [github repository](https://github.com/zitadel/zitadel). ### Configure ZITADEL @@ -128,7 +127,7 @@ Depending on your environment, you maybe would want to tweak some settings about ```yaml Database: - cockroach: + postgres: Host: localhost Port: 26257 Database: zitadel @@ -140,6 +139,7 @@ Database: Options: "" ``` + You also might want to configure how [projections](/concepts/eventstore/implementation#projections) are computed. These are the default values: ```yaml From 692c9b7aa8883fd2cd40de7d86195cfb10f2afb4 Mon Sep 17 00:00:00 2001 From: Max Peintner Date: Thu, 31 Oct 2024 11:16:30 +0100 Subject: [PATCH 22/30] fix(login): org register script references (#8842) Closes #8838 This fixes a bug of the `/register/org` page where scripts where not referenced correctly Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> --- .../api/ui/login/static/templates/register_org.html | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/api/ui/login/static/templates/register_org.html b/internal/api/ui/login/static/templates/register_org.html index 2338573eea..dca564ea67 100644 --- a/internal/api/ui/login/static/templates/register_org.html +++ b/internal/api/ui/login/static/templates/register_org.html @@ -112,10 +112,10 @@ - - - - - + + + + + {{template "main-bottom" .}} From 9cf67f30b829bab71adcb94646b1a369c5e60808 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20M=C3=B6hlmann?= Date: Thu, 31 Oct 2024 13:03:40 +0200 Subject: [PATCH 23/30] fix(milestones): offset the type enum (#8849) # Which Problems Are Solved Migration of milestones failed on our QA due to the new milestone Type enum being 0-indexed. The valid range was 0 till 5, inclusive. While on the previous zitadel version this was 1 till 6, inclusive. # How the Problems Are Solved Offset the first constant with `1`. # Additional Changes - none # Additional Context Introduced in https://github.com/zitadel/zitadel/pull/8788 --- internal/repository/milestone/events.go | 2 +- internal/repository/milestone/type_enumer.go | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/internal/repository/milestone/events.go b/internal/repository/milestone/events.go index a149736a5f..86e372e48c 100644 --- a/internal/repository/milestone/events.go +++ b/internal/repository/milestone/events.go @@ -11,7 +11,7 @@ import ( type Type int const ( - InstanceCreated Type = iota + InstanceCreated Type = iota + 1 AuthenticationSucceededOnInstance ProjectCreated ApplicationCreated diff --git a/internal/repository/milestone/type_enumer.go b/internal/repository/milestone/type_enumer.go index 696db3f457..3b32fc6218 100644 --- a/internal/repository/milestone/type_enumer.go +++ b/internal/repository/milestone/type_enumer.go @@ -15,8 +15,9 @@ var _TypeIndex = [...]uint8{0, 16, 52, 67, 86, 125, 141} const _TypeLowerName = "instance_createdauthentication_succeeded_on_instanceproject_createdapplication_createdauthentication_succeeded_on_applicationinstance_deleted" func (i Type) String() string { + i -= 1 if i < 0 || i >= Type(len(_TypeIndex)-1) { - return fmt.Sprintf("Type(%d)", i) + return fmt.Sprintf("Type(%d)", i+1) } return _TypeName[_TypeIndex[i]:_TypeIndex[i+1]] } @@ -25,12 +26,12 @@ func (i Type) String() string { // Re-run the stringer command to generate them again. func _TypeNoOp() { var x [1]struct{} - _ = x[InstanceCreated-(0)] - _ = x[AuthenticationSucceededOnInstance-(1)] - _ = x[ProjectCreated-(2)] - _ = x[ApplicationCreated-(3)] - _ = x[AuthenticationSucceededOnApplication-(4)] - _ = x[InstanceDeleted-(5)] + _ = x[InstanceCreated-(1)] + _ = x[AuthenticationSucceededOnInstance-(2)] + _ = x[ProjectCreated-(3)] + _ = x[ApplicationCreated-(4)] + _ = x[AuthenticationSucceededOnApplication-(5)] + _ = x[InstanceDeleted-(6)] } var _TypeValues = []Type{InstanceCreated, AuthenticationSucceededOnInstance, ProjectCreated, ApplicationCreated, AuthenticationSucceededOnApplication, InstanceDeleted} From 041af26917e6cf66ee3022fccc3288eccbfdeb4b Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Thu, 31 Oct 2024 15:57:17 +0100 Subject: [PATCH 24/30] feat(OIDC): add back channel logout (#8837) # Which Problems Are Solved Currently ZITADEL supports RP-initiated logout for clients. Back-channel logout ensures that user sessions are terminated across all connected applications, even if the user closes their browser or loses connectivity providing a more secure alternative for certain use cases. # How the Problems Are Solved If the feature is activated and the client used for the authentication has a back_channel_logout_uri configured, a `session_logout.back_channel` will be registered. Once a user terminates their session, a (notification) handler will send a SET (form POST) to the registered uri containing a logout_token (with the user's ID and session ID). - A new feature "back_channel_logout" is added on system and instance level - A `back_channel_logout_uri` can be managed on OIDC applications - Added a `session_logout` aggregate to register and inform about sent `back_channel` notifications - Added a `SecurityEventToken` channel and `Form`message type in the notification handlers - Added `TriggeredAtOrigin` fields to `HumanSignedOut` and `TerminateSession` events for notification handling - Exported various functions and types in the `oidc` package to be able to reuse for token signing in the back_channel notifier. - To prevent that current existing session termination events will be handled, a setup step is added to set the `current_states` for the `projections.notifications_back_channel_logout` to the current position - [x] requires https://github.com/zitadel/oidc/pull/671 # Additional Changes - Updated all OTEL dependencies to v1.29.0, since OIDC already updated some of them to that version. - Single Session Termination feature is correctly checked (fixed feature mapping) # Additional Context - closes https://github.com/zitadel/zitadel/issues/8467 - TODO: - Documentation - UI to be done: https://github.com/zitadel/zitadel/issues/8469 --------- Co-authored-by: Hidde Wieringa --- cmd/defaults.yaml | 1 + cmd/mirror/projections.go | 3 + cmd/setup/37.go | 27 ++ cmd/setup/37.sql | 1 + cmd/setup/38.go | 28 ++ cmd/setup/38.sql | 20 + cmd/setup/config.go | 2 + cmd/setup/setup.go | 7 + cmd/start/start.go | 3 + go.mod | 34 +- go.sum | 68 ++-- internal/api/grpc/feature/v2/converter.go | 4 + .../api/grpc/feature/v2/converter_test.go | 18 + .../project_application_converter.go | 2 + internal/api/grpc/project/application.go | 1 + internal/api/oidc/auth_request.go | 15 +- internal/api/oidc/key.go | 8 +- internal/api/oidc/op.go | 1 + internal/api/oidc/server.go | 3 + internal/api/oidc/token.go | 23 +- internal/api/oidc/token_client_credentials.go | 1 + internal/api/oidc/token_code.go | 1 + internal/api/oidc/token_exchange.go | 4 +- internal/api/oidc/token_jwt_profile.go | 1 + internal/api/oidc/token_refresh.go | 1 + .../eventsourcing/eventstore/user.go | 33 +- .../eventsourcing/view/user_session.go | 4 +- internal/auth/repository/user.go | 6 +- internal/command/instance_domain_test.go | 1 + internal/command/instance_features.go | 4 +- internal/command/instance_features_model.go | 5 + internal/command/instance_test.go | 2 + internal/command/logout_session.go | 24 ++ internal/command/logout_session_model.go | 74 ++++ internal/command/oidc_session.go | 25 +- internal/command/oidc_session_test.go | 356 ++++++++++++++++-- internal/command/project_application_oidc.go | 4 + .../command/project_application_oidc_model.go | 9 + .../command/project_application_oidc_test.go | 22 ++ internal/command/project_converter.go | 1 + internal/command/system_features.go | 4 +- internal/command/system_features_model.go | 5 + internal/command/user_human.go | 17 +- internal/command/user_human_test.go | 41 +- internal/domain/application_oidc.go | 1 + internal/feature/feature.go | 4 +- internal/feature/key_enumer.go | 12 +- internal/notification/channels.go | 12 + internal/notification/channels/set/channel.go | 75 ++++ internal/notification/channels/set/config.go | 14 + .../handlers/back_channel_logout.go | 266 +++++++++++++ internal/notification/handlers/ctx.go | 10 + .../handlers/mock/commands.mock.go | 113 +++--- .../handlers/mock/queries.mock.go | 153 +++++--- internal/notification/handlers/queries.go | 6 + .../handlers/user_notifier_test.go | 5 + internal/notification/messages/form.go | 27 ++ internal/notification/projections.go | 19 +- .../senders/security_event_token.go | 49 +++ internal/notification/types/notification.go | 20 + .../types/security_token_event.go | 27 ++ internal/query/app.go | 13 + internal/query/app_test.go | 37 ++ internal/query/instance_features.go | 1 + internal/query/instance_features_model.go | 4 + internal/query/oidc_client.go | 1 + internal/query/oidc_client_by_id.sql | 4 +- internal/query/projection/app.go | 8 +- internal/query/projection/app_test.go | 19 +- .../query/projection/instance_features.go | 4 + internal/query/projection/system_features.go | 4 + internal/query/system_features.go | 1 + internal/query/system_features_model.go | 3 + .../feature/feature_v2/eventstore.go | 2 + .../repository/feature/feature_v2/feature.go | 2 + internal/repository/project/oidc_config.go | 15 +- internal/repository/session/session.go | 2 + .../repository/sessionlogout/aggregate.go | 26 ++ internal/repository/sessionlogout/events.go | 79 ++++ .../repository/sessionlogout/eventstore.go | 15 + internal/repository/user/human.go | 15 +- ...=> active_user_sessions_by_session_id.sql} | 3 +- .../user/repository/view/user_session_view.go | 27 +- proto/zitadel/app.proto | 6 + proto/zitadel/feature/v2/instance.proto | 14 + proto/zitadel/feature/v2/system.proto | 14 + proto/zitadel/management.proto | 12 + 87 files changed, 1778 insertions(+), 280 deletions(-) create mode 100644 cmd/setup/37.go create mode 100644 cmd/setup/37.sql create mode 100644 cmd/setup/38.go create mode 100644 cmd/setup/38.sql create mode 100644 internal/command/logout_session.go create mode 100644 internal/command/logout_session_model.go create mode 100644 internal/notification/channels/set/channel.go create mode 100644 internal/notification/channels/set/config.go create mode 100644 internal/notification/handlers/back_channel_logout.go create mode 100644 internal/notification/messages/form.go create mode 100644 internal/notification/senders/security_event_token.go create mode 100644 internal/notification/types/security_token_event.go create mode 100644 internal/repository/sessionlogout/aggregate.go create mode 100644 internal/repository/sessionlogout/events.go create mode 100644 internal/repository/sessionlogout/eventstore.go rename internal/user/repository/view/{active_user_ids_by_session_id.sql => active_user_sessions_by_session_id.sql} (91%) diff --git a/cmd/defaults.yaml b/cmd/defaults.yaml index a12fe474ba..f691fd2af2 100644 --- a/cmd/defaults.yaml +++ b/cmd/defaults.yaml @@ -411,6 +411,7 @@ OIDC: DefaultLoginURLV2: "/login?authRequest=" # ZITADEL_OIDC_DEFAULTLOGINURLV2 DefaultLogoutURLV2: "/logout?post_logout_redirect=" # ZITADEL_OIDC_DEFAULTLOGOUTURLV2 PublicKeyCacheMaxAge: 24h # ZITADEL_OIDC_PUBLICKEYCACHEMAXAGE + DefaultBackChannelLogoutLifetime: 15m # ZITADEL_OIDC_DEFAULTBACKCHANNELLOGOUTLIFETIME SAML: ProviderConfig: diff --git a/cmd/mirror/projections.go b/cmd/mirror/projections.go index 442609d12a..9b7ec02cb8 100644 --- a/cmd/mirror/projections.go +++ b/cmd/mirror/projections.go @@ -200,6 +200,7 @@ func projections( ctx, config.Projections.Customizations["notifications"], config.Projections.Customizations["notificationsquotas"], + config.Projections.Customizations["backchannel"], config.Projections.Customizations["telemetry"], *config.Telemetry, config.ExternalDomain, @@ -213,6 +214,8 @@ func projections( keys.User, keys.SMTP, keys.SMS, + keys.OIDC, + config.OIDC.DefaultBackChannelLogoutLifetime, ) config.Auth.Spooler.Client = client diff --git a/cmd/setup/37.go b/cmd/setup/37.go new file mode 100644 index 0000000000..1587b5c793 --- /dev/null +++ b/cmd/setup/37.go @@ -0,0 +1,27 @@ +package setup + +import ( + "context" + _ "embed" + + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/eventstore" +) + +var ( + //go:embed 37.sql + addBackChannelLogoutURI string +) + +type Apps7OIDConfigsBackChannelLogoutURI struct { + dbClient *database.DB +} + +func (mig *Apps7OIDConfigsBackChannelLogoutURI) Execute(ctx context.Context, _ eventstore.Event) error { + _, err := mig.dbClient.ExecContext(ctx, addBackChannelLogoutURI) + return err +} + +func (mig *Apps7OIDConfigsBackChannelLogoutURI) String() string { + return "37_apps7_oidc_configs_add_back_channel_logout_uri" +} diff --git a/cmd/setup/37.sql b/cmd/setup/37.sql new file mode 100644 index 0000000000..6c3fdf0dda --- /dev/null +++ b/cmd/setup/37.sql @@ -0,0 +1 @@ +ALTER TABLE IF EXISTS projections.apps7_oidc_configs ADD COLUMN IF NOT EXISTS back_channel_logout_uri TEXT; diff --git a/cmd/setup/38.go b/cmd/setup/38.go new file mode 100644 index 0000000000..0a102c9d12 --- /dev/null +++ b/cmd/setup/38.go @@ -0,0 +1,28 @@ +package setup + +import ( + "context" + _ "embed" + + "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/eventstore" +) + +var ( + //go:embed 38.sql + backChannelLogoutCurrentState string +) + +type BackChannelLogoutNotificationStart struct { + dbClient *database.DB + esClient *eventstore.Eventstore +} + +func (mig *BackChannelLogoutNotificationStart) Execute(ctx context.Context, e eventstore.Event) error { + _, err := mig.dbClient.ExecContext(ctx, backChannelLogoutCurrentState, e.Sequence(), e.CreatedAt(), e.Position()) + return err +} + +func (mig *BackChannelLogoutNotificationStart) String() string { + return "38_back_channel_logout_notification_start_" +} diff --git a/cmd/setup/38.sql b/cmd/setup/38.sql new file mode 100644 index 0000000000..d8915fee4f --- /dev/null +++ b/cmd/setup/38.sql @@ -0,0 +1,20 @@ +INSERT INTO projections.current_states ( + instance_id + , projection_name + , last_updated + , sequence + , event_date + , position + , filter_offset +) + SELECT instance_id + , 'projections.notifications_back_channel_logout' + , now() + , $1 + , $2 + , $3 + , 0 + FROM eventstore.events2 + WHERE aggregate_type = 'instance' + AND event_type = 'instance.added' + ON CONFLICT DO NOTHING; \ No newline at end of file diff --git a/cmd/setup/config.go b/cmd/setup/config.go index 7a5beebcfe..09044456ea 100644 --- a/cmd/setup/config.go +++ b/cmd/setup/config.go @@ -123,6 +123,8 @@ type Steps struct { s34AddCacheSchema *AddCacheSchema s35AddPositionToIndexEsWm *AddPositionToIndexEsWm s36FillV2Milestones *FillV2Milestones + s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI + s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart } func MustNewSteps(v *viper.Viper) *Steps { diff --git a/cmd/setup/setup.go b/cmd/setup/setup.go index e24b69d5b6..7ffef5e853 100644 --- a/cmd/setup/setup.go +++ b/cmd/setup/setup.go @@ -166,6 +166,8 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient} steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient} steps.s36FillV2Milestones = &FillV2Milestones{dbClient: queryDBClient, eventstore: eventstoreClient} + steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient} + steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient} err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil) logging.OnError(err).Fatal("unable to start projections") @@ -211,6 +213,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s34AddCacheSchema, steps.s35AddPositionToIndexEsWm, steps.s36FillV2Milestones, + steps.s38BackChannelLogoutNotificationStart, } { mustExecuteMigration(ctx, eventstoreClient, step, "migration failed") } @@ -227,6 +230,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s27IDPTemplate6SAMLNameIDFormat, steps.s32AddAuthSessionID, steps.s33SMSConfigs3TwilioAddVerifyServiceSid, + steps.s37Apps7OIDConfigsBackChannelLogoutURI, } { mustExecuteMigration(ctx, eventstoreClient, step, "migration failed") } @@ -424,6 +428,7 @@ func initProjections( ctx, config.Projections.Customizations["notifications"], config.Projections.Customizations["notificationsquotas"], + config.Projections.Customizations["backchannel"], config.Projections.Customizations["telemetry"], *config.Telemetry, config.ExternalDomain, @@ -437,6 +442,8 @@ func initProjections( keys.User, keys.SMTP, keys.SMS, + keys.OIDC, + config.OIDC.DefaultBackChannelLogoutLifetime, ) for _, p := range notify_handler.Projections() { err := migration.Migrate(ctx, eventstoreClient, p) diff --git a/cmd/start/start.go b/cmd/start/start.go index 97a38ba50d..8de1105307 100644 --- a/cmd/start/start.go +++ b/cmd/start/start.go @@ -270,6 +270,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server ctx, config.Projections.Customizations["notifications"], config.Projections.Customizations["notificationsquotas"], + config.Projections.Customizations["backchannel"], config.Projections.Customizations["telemetry"], *config.Telemetry, config.ExternalDomain, @@ -283,6 +284,8 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server keys.User, keys.SMTP, keys.SMS, + keys.OIDC, + config.OIDC.DefaultBackChannelLogoutLifetime, ) notification.Start(ctx) diff --git a/go.mod b/go.mod index 7e34525b6d..1e4f67eb7d 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/gorilla/websocket v1.4.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 github.com/h2non/gock v1.2.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/improbable-eng/grpc-web v0.15.0 @@ -52,7 +52,7 @@ require ( github.com/pashagolub/pgxmock/v4 v4.3.0 github.com/pquerna/otp v1.4.0 github.com/rakyll/statik v0.1.7 - github.com/rs/cors v1.11.0 + github.com/rs/cors v1.11.1 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/sony/sonyflake v1.2.0 github.com/spf13/cobra v1.8.1 @@ -62,29 +62,29 @@ require ( github.com/ttacon/libphonenumber v1.2.1 github.com/twilio/twilio-go v1.22.2 github.com/zitadel/logging v0.6.1 - github.com/zitadel/oidc/v3 v3.28.1 + github.com/zitadel/oidc/v3 v3.32.0 github.com/zitadel/passwap v0.6.0 github.com/zitadel/saml v0.2.0 github.com/zitadel/schema v1.3.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 + go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 go.opentelemetry.io/otel/exporters/prometheus v0.50.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 - go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 + go.opentelemetry.io/otel/metric v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/sdk/metric v1.29.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/mock v0.4.0 golang.org/x/crypto v0.27.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/net v0.26.0 - golang.org/x/oauth2 v0.22.0 + golang.org/x/net v0.28.0 + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/text v0.18.0 + golang.org/x/text v0.19.0 google.golang.org/api v0.187.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 sigs.k8s.io/yaml v1.4.0 @@ -94,7 +94,7 @@ require ( cloud.google.com/go/auth v0.6.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -125,7 +125,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect ) require ( @@ -197,7 +197,7 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/sys v0.25.0 gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index dc1e9fb1e8..8645aa8417 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMcJ4= github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -354,8 +354,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= @@ -628,8 +628,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys= @@ -723,8 +723,8 @@ github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8= github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= -github.com/zitadel/oidc/v3 v3.28.1 h1:PsbFm5CzEMQq9HBXUNJ8yvnWmtVYxpwV5Cinj7TTsHo= -github.com/zitadel/oidc/v3 v3.28.1/go.mod h1:WmDFu3dZ9YNKrIoZkmxjGG8QyUR4PbbhsVVSY+rpojM= +github.com/zitadel/oidc/v3 v3.32.0 h1:Mw0EPZRC6h+OXAuT0Uk2BZIjJQNHLqUpaJCm6c3IByc= +github.com/zitadel/oidc/v3 v3.32.0/go.mod h1:DyE/XClysRK/ozFaZSqlYamKVnTh4l6Ln25ihSNI03w= github.com/zitadel/passwap v0.6.0 h1:m9F3epFC0VkBXu25rihSLGyHvWiNlCzU5kk8RoI+SXQ= github.com/zitadel/passwap v0.6.0/go.mod h1:kqAiJ4I4eZvm3Y6oAk6hlEqlZZOkjMHraGXF90GG7LI= github.com/zitadel/saml v0.2.0 h1:vv7r+Xz43eAPCb+fImMaospD+TWRZQDkb78AbSJRcL4= @@ -742,24 +742,24 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -857,13 +857,13 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -934,8 +934,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -983,10 +983,10 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= diff --git a/internal/api/grpc/feature/v2/converter.go b/internal/api/grpc/feature/v2/converter.go index e8b57a2885..7d951f789a 100644 --- a/internal/api/grpc/feature/v2/converter.go +++ b/internal/api/grpc/feature/v2/converter.go @@ -19,6 +19,7 @@ func systemFeaturesToCommand(req *feature_pb.SetSystemFeaturesRequest) *command. ImprovedPerformance: improvedPerformanceListToDomain(req.ImprovedPerformance), OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination, DisableUserTokenEvent: req.DisableUserTokenEvent, + EnableBackChannelLogout: req.EnableBackChannelLogout, } } @@ -34,6 +35,7 @@ func systemFeaturesToPb(f *query.SystemFeatures) *feature_pb.GetSystemFeaturesRe ImprovedPerformance: featureSourceToImprovedPerformanceFlagPb(&f.ImprovedPerformance), OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination), DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent), + EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout), } } @@ -50,6 +52,7 @@ func instanceFeaturesToCommand(req *feature_pb.SetInstanceFeaturesRequest) *comm DebugOIDCParentError: req.DebugOidcParentError, OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination, DisableUserTokenEvent: req.DisableUserTokenEvent, + EnableBackChannelLogout: req.EnableBackChannelLogout, } } @@ -67,6 +70,7 @@ func instanceFeaturesToPb(f *query.InstanceFeatures) *feature_pb.GetInstanceFeat DebugOidcParentError: featureSourceToFlagPb(&f.DebugOIDCParentError), OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination), DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent), + EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout), } } diff --git a/internal/api/grpc/feature/v2/converter_test.go b/internal/api/grpc/feature/v2/converter_test.go index 79bfa34839..43a848e3a6 100644 --- a/internal/api/grpc/feature/v2/converter_test.go +++ b/internal/api/grpc/feature/v2/converter_test.go @@ -80,6 +80,10 @@ func Test_systemFeaturesToPb(t *testing.T) { Level: feature.LevelSystem, Value: true, }, + EnableBackChannelLogout: query.FeatureSource[bool]{ + Level: feature.LevelSystem, + Value: true, + }, } want := &feature_pb.GetSystemFeaturesResponse{ Details: &object.Details{ @@ -123,6 +127,10 @@ func Test_systemFeaturesToPb(t *testing.T) { Enabled: false, Source: feature_pb.Source_SOURCE_UNSPECIFIED, }, + EnableBackChannelLogout: &feature_pb.FeatureFlag{ + Enabled: true, + Source: feature_pb.Source_SOURCE_SYSTEM, + }, } got := systemFeaturesToPb(arg) assert.Equal(t, want, got) @@ -140,6 +148,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) { WebKey: gu.Ptr(true), DebugOidcParentError: gu.Ptr(true), OidcSingleV1SessionTermination: gu.Ptr(true), + EnableBackChannelLogout: gu.Ptr(true), } want := &command.InstanceFeatures{ LoginDefaultOrg: gu.Ptr(true), @@ -152,6 +161,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) { WebKey: gu.Ptr(true), DebugOIDCParentError: gu.Ptr(true), OIDCSingleV1SessionTermination: gu.Ptr(true), + EnableBackChannelLogout: gu.Ptr(true), } got := instanceFeaturesToCommand(arg) assert.Equal(t, want, got) @@ -200,6 +210,10 @@ func Test_instanceFeaturesToPb(t *testing.T) { Level: feature.LevelInstance, Value: true, }, + EnableBackChannelLogout: query.FeatureSource[bool]{ + Level: feature.LevelInstance, + Value: true, + }, } want := &feature_pb.GetInstanceFeaturesResponse{ Details: &object.Details{ @@ -251,6 +265,10 @@ func Test_instanceFeaturesToPb(t *testing.T) { Enabled: false, Source: feature_pb.Source_SOURCE_UNSPECIFIED, }, + EnableBackChannelLogout: &feature_pb.FeatureFlag{ + Enabled: true, + Source: feature_pb.Source_SOURCE_INSTANCE, + }, } got := instanceFeaturesToPb(arg) assert.Equal(t, want, got) diff --git a/internal/api/grpc/management/project_application_converter.go b/internal/api/grpc/management/project_application_converter.go index fcfe8089ee..ea2f45fd0d 100644 --- a/internal/api/grpc/management/project_application_converter.go +++ b/internal/api/grpc/management/project_application_converter.go @@ -57,6 +57,7 @@ func AddOIDCAppRequestToDomain(req *mgmt_pb.AddOIDCAppRequest) *domain.OIDCApp { ClockSkew: req.ClockSkew.AsDuration(), AdditionalOrigins: req.AdditionalOrigins, SkipNativeAppSuccessPage: req.SkipNativeAppSuccessPage, + BackChannelLogoutURI: req.GetBackChannelLogoutUri(), } } @@ -108,6 +109,7 @@ func UpdateOIDCAppConfigRequestToDomain(app *mgmt_pb.UpdateOIDCAppConfigRequest) ClockSkew: app.ClockSkew.AsDuration(), AdditionalOrigins: app.AdditionalOrigins, SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage, + BackChannelLogoutURI: app.BackChannelLogoutUri, } } diff --git a/internal/api/grpc/project/application.go b/internal/api/grpc/project/application.go index 25274eeb1d..e70554ce64 100644 --- a/internal/api/grpc/project/application.go +++ b/internal/api/grpc/project/application.go @@ -61,6 +61,7 @@ func AppOIDCConfigToPb(app *query.OIDCApp) *app_pb.App_OidcConfig { AdditionalOrigins: app.AdditionalOrigins, AllowedOrigins: app.AllowedOrigins, SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage, + BackChannelLogoutUri: app.BackChannelLogoutURI, }, } } diff --git a/internal/api/oidc/auth_request.go b/internal/api/oidc/auth_request.go index dc402036fb..173585ff13 100644 --- a/internal/api/oidc/auth_request.go +++ b/internal/api/oidc/auth_request.go @@ -215,18 +215,18 @@ func (o *OPStorage) TerminateSession(ctx context.Context, userID, clientID strin logging.Error("no user agent id") return zerrors.ThrowPreconditionFailed(nil, "OIDC-fso7F", "no user agent id") } - userIDs, err := o.repo.UserSessionUserIDsByAgentID(ctx, userAgentID) + sessions, err := o.repo.UserSessionsByAgentID(ctx, userAgentID) if err != nil { logging.WithError(err).Error("error retrieving user sessions") return err } - if len(userIDs) == 0 { + if len(sessions) == 0 { return nil } data := authz.CtxData{ UserID: userID, } - err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, userIDs) + err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, sessions) logging.OnError(err).Error("error signing out") return err } @@ -278,18 +278,18 @@ func (o *OPStorage) terminateV1Session(ctx context.Context, userID, sessionID st if err != nil { return err } - return o.command.HumansSignOut(ctx, userAgentID, []string{userID}) + return o.command.HumansSignOut(ctx, userAgentID, []command.HumanSignOutSession{{ID: sessionID, UserID: userID}}) } // otherwise we search for all active sessions within the same user agent of the current session id - userAgentID, userIDs, err := o.repo.ActiveUserIDsBySessionID(ctx, sessionID) + userAgentID, sessions, err := o.repo.ActiveUserSessionsBySessionID(ctx, sessionID) if err != nil { logging.WithError(err).Error("error retrieving user sessions") return err } - if len(userIDs) == 0 { + if len(sessions) == 0 { return nil } - return o.command.HumansSignOut(ctx, userAgentID, userIDs) + return o.command.HumansSignOut(ctx, userAgentID, sessions) } func (o *OPStorage) RevokeToken(ctx context.Context, token, userID, clientID string) (err *oidc.Error) { @@ -588,6 +588,7 @@ func (s *Server) authResponseToken(authReq *AuthRequest, authorizer op.Authorize authReq.UserID, authReq.UserOrgID, client.client.ClientID, + client.client.BackChannelLogoutURI, scope, authReq.Audience, authReq.AuthMethods(), diff --git a/internal/api/oidc/key.go b/internal/api/oidc/key.go index a7e156fe78..535aa846b4 100644 --- a/internal/api/oidc/key.go +++ b/internal/api/oidc/key.go @@ -348,7 +348,7 @@ func (o *OPStorage) getSigningKey(ctx context.Context) (op.SigningKey, error) { return nil, err } if len(keys.Keys) > 0 { - return o.privateKeyToSigningKey(selectSigningKey(keys.Keys)) + return PrivateKeyToSigningKey(SelectSigningKey(keys.Keys), o.encAlg) } var position float64 if keys.State != nil { @@ -377,8 +377,8 @@ func (o *OPStorage) ensureIsLatestKey(ctx context.Context, position float64) (bo return position >= maxSequence, nil } -func (o *OPStorage) privateKeyToSigningKey(key query.PrivateKey) (_ op.SigningKey, err error) { - keyData, err := crypto.Decrypt(key.Key(), o.encAlg) +func PrivateKeyToSigningKey(key query.PrivateKey, algorithm crypto.EncryptionAlgorithm) (_ op.SigningKey, err error) { + keyData, err := crypto.Decrypt(key.Key(), algorithm) if err != nil { return nil, err } @@ -430,7 +430,7 @@ func (o *OPStorage) getMaxKeySequence(ctx context.Context) (float64, error) { ) } -func selectSigningKey(keys []query.PrivateKey) query.PrivateKey { +func SelectSigningKey(keys []query.PrivateKey) query.PrivateKey { return keys[len(keys)-1] } diff --git a/internal/api/oidc/op.go b/internal/api/oidc/op.go index c8dafb50f3..86b89690bf 100644 --- a/internal/api/oidc/op.go +++ b/internal/api/oidc/op.go @@ -42,6 +42,7 @@ type Config struct { DefaultLoginURLV2 string DefaultLogoutURLV2 string PublicKeyCacheMaxAge time.Duration + DefaultBackChannelLogoutLifetime time.Duration } type EndpointConfig struct { diff --git a/internal/api/oidc/server.go b/internal/api/oidc/server.go index 07bc4706be..1a0854e2a6 100644 --- a/internal/api/oidc/server.go +++ b/internal/api/oidc/server.go @@ -167,6 +167,7 @@ func (s *Server) EndSession(ctx context.Context, r *op.Request[oidc.EndSessionRe func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales oidc.Locales) *oidc.DiscoveryConfiguration { issuer := op.IssuerFromContext(ctx) + backChannelLogoutSupported := authz.GetInstance(ctx).Features().EnableBackChannelLogout return &oidc.DiscoveryConfiguration{ Issuer: issuer, @@ -199,6 +200,8 @@ func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales o CodeChallengeMethodsSupported: op.CodeChallengeMethods(s.Provider()), UILocalesSupported: supportedUILocales, RequestParameterSupported: s.Provider().RequestObjectSupported(), + BackChannelLogoutSupported: backChannelLogoutSupported, + BackChannelLogoutSessionSupported: backChannelLogoutSupported, } } diff --git a/internal/api/oidc/token.go b/internal/api/oidc/token.go index 56ed225902..485f455784 100644 --- a/internal/api/oidc/token.go +++ b/internal/api/oidc/token.go @@ -60,12 +60,19 @@ func (s *Server) accessTokenResponseFromSession(ctx context.Context, client op.C return resp, err } -// signerFunc is a getter function that allows add-hoc retrieval of the instance's signer. -type signerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error) +// SignerFunc is a getter function that allows add-hoc retrieval of the instance's signer. +type SignerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error) -// getSignerOnce returns a function which retrieves the instance's signer from the database once. +func (s *Server) getSignerOnce() SignerFunc { + return GetSignerOnce(s.query.GetActiveSigningWebKey, s.Provider().Storage().SigningKey) +} + +// GetSignerOnce returns a function which retrieves the instance's signer from the database once. // Repeated calls of the returned function return the same results. -func (s *Server) getSignerOnce() signerFunc { +func GetSignerOnce( + getActiveSigningWebKey func(ctx context.Context) (*jose.JSONWebKey, error), + getSigningKey func(ctx context.Context) (op.SigningKey, error), +) SignerFunc { var ( once sync.Once signer jose.Signer @@ -79,7 +86,7 @@ func (s *Server) getSignerOnce() signerFunc { if authz.GetFeatures(ctx).WebKey { var webKey *jose.JSONWebKey - webKey, err = s.query.GetActiveSigningWebKey(ctx) + webKey, err = getActiveSigningWebKey(ctx) if err != nil { return } @@ -88,7 +95,7 @@ func (s *Server) getSignerOnce() signerFunc { } var signingKey op.SigningKey - signingKey, err = s.Provider().Storage().SigningKey(ctx) + signingKey, err = getSigningKey(ctx) if err != nil { return } @@ -126,7 +133,7 @@ func (s *Server) getUserInfo(userID, projectID string, projectRoleAssertion, use } } -func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey signerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) { +func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey SignerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) { ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() @@ -170,7 +177,7 @@ func timeToOIDCExpiresIn(exp time.Time) uint64 { return uint64(time.Until(exp) / time.Second) } -func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner signerFunc) (_ string, err error) { +func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner SignerFunc) (_ string, err error) { ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() diff --git a/internal/api/oidc/token_client_credentials.go b/internal/api/oidc/token_client_credentials.go index 2fedd71c44..0b836a03cc 100644 --- a/internal/api/oidc/token_client_credentials.go +++ b/internal/api/oidc/token_client_credentials.go @@ -35,6 +35,7 @@ func (s *Server) ClientCredentialsExchange(ctx context.Context, r *op.ClientRequ client.userID, client.resourceOwner, client.clientID, + "", // backChannelLogoutURI not needed for service user session scope, domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope), []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, diff --git a/internal/api/oidc/token_code.go b/internal/api/oidc/token_code.go index e6899bed01..3aa53e629e 100644 --- a/internal/api/oidc/token_code.go +++ b/internal/api/oidc/token_code.go @@ -75,6 +75,7 @@ func (s *Server) codeExchangeV1(ctx context.Context, client *Client, req *oidc.A authReq.UserID, authReq.UserOrgID, client.client.ClientID, + client.client.BackChannelLogoutURI, scope, authReq.Audience, authReq.AuthMethods(), diff --git a/internal/api/oidc/token_exchange.go b/internal/api/oidc/token_exchange.go index ec43729692..63a594b940 100644 --- a/internal/api/oidc/token_exchange.go +++ b/internal/api/oidc/token_exchange.go @@ -288,6 +288,7 @@ func (s *Server) createExchangeAccessToken( userID, resourceOwner, client.client.ClientID, + client.client.BackChannelLogoutURI, scope, audience, authMethods, @@ -315,7 +316,7 @@ func (s *Server) createExchangeJWT( client *Client, getUserInfo userInfoFunc, roleAssertion bool, - getSigner signerFunc, + getSigner SignerFunc, userID, resourceOwner string, audience, @@ -333,6 +334,7 @@ func (s *Server) createExchangeJWT( userID, resourceOwner, client.client.ClientID, + client.client.BackChannelLogoutURI, scope, audience, authMethods, diff --git a/internal/api/oidc/token_jwt_profile.go b/internal/api/oidc/token_jwt_profile.go index 253432cc83..4717d29f9c 100644 --- a/internal/api/oidc/token_jwt_profile.go +++ b/internal/api/oidc/token_jwt_profile.go @@ -45,6 +45,7 @@ func (s *Server) JWTProfile(ctx context.Context, r *op.Request[oidc.JWTProfileGr client.userID, client.resourceOwner, client.clientID, + "", // backChannelLogoutURI not needed for service user session scope, domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope), []domain.UserAuthMethodType{domain.UserAuthMethodTypePrivateKey}, diff --git a/internal/api/oidc/token_refresh.go b/internal/api/oidc/token_refresh.go index 62f3c6dd3f..f0d92fa521 100644 --- a/internal/api/oidc/token_refresh.go +++ b/internal/api/oidc/token_refresh.go @@ -54,6 +54,7 @@ func (s *Server) refreshTokenV1(ctx context.Context, client *Client, r *op.Clien refreshToken.UserID, refreshToken.ResourceOwner, refreshToken.ClientID, + "", // backChannelLogoutURI is not in refresh token view scope, refreshToken.Audience, AMRToAuthMethodTypes(refreshToken.AuthMethodsReferences), diff --git a/internal/auth/repository/eventsourcing/eventstore/user.go b/internal/auth/repository/eventsourcing/eventstore/user.go index b11f770d77..61895c263d 100644 --- a/internal/auth/repository/eventsourcing/eventstore/user.go +++ b/internal/auth/repository/eventsourcing/eventstore/user.go @@ -6,6 +6,7 @@ import ( "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" + "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/eventstore" @@ -27,26 +28,40 @@ func (repo *UserRepo) Health(ctx context.Context) error { return repo.Eventstore.Health(ctx) } -func (repo *UserRepo) UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) { - userSessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID()) +func (repo *UserRepo) UserSessionsByAgentID(ctx context.Context, agentID string) ([]command.HumanSignOutSession, error) { + sessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID()) if err != nil { return nil, err } - userIDs := make([]string, 0, len(userSessions)) - for _, session := range userSessions { - if session.State.V == domain.UserSessionStateActive { - userIDs = append(userIDs, session.UserID) + signoutSessions := make([]command.HumanSignOutSession, 0, len(sessions)) + for _, session := range sessions { + if session.State.V == domain.UserSessionStateActive && session.ID.Valid { + signoutSessions = append(signoutSessions, command.HumanSignOutSession{ + ID: session.ID.String, + UserID: session.UserID, + }) } } - return userIDs, nil + return signoutSessions, nil } func (repo *UserRepo) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) { return repo.View.UserAgentIDBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID()) } -func (repo *UserRepo) ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) { - return repo.View.ActiveUserIDsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID()) +func (repo *UserRepo) ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, signoutSessions []command.HumanSignOutSession, err error) { + userAgentID, sessions, err := repo.View.ActiveUserSessionsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID()) + if err != nil { + return "", nil, err + } + signoutSessions = make([]command.HumanSignOutSession, 0, len(sessions)) + for sessionID, userID := range sessions { + signoutSessions = append(signoutSessions, command.HumanSignOutSession{ + ID: sessionID, + UserID: userID, + }) + } + return userAgentID, signoutSessions, nil } func (repo *UserRepo) UserEventsByID(ctx context.Context, id string, changeDate time.Time, eventTypes []eventstore.EventType) ([]eventstore.Event, error) { diff --git a/internal/auth/repository/eventsourcing/view/user_session.go b/internal/auth/repository/eventsourcing/view/user_session.go index f25deb99e6..a4618e11fb 100644 --- a/internal/auth/repository/eventsourcing/view/user_session.go +++ b/internal/auth/repository/eventsourcing/view/user_session.go @@ -24,8 +24,8 @@ func (v *View) UserAgentIDBySessionID(ctx context.Context, sessionID, instanceID return view.UserAgentIDBySessionID(ctx, v.client, sessionID, instanceID) } -func (v *View) ActiveUserIDsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, userIDs []string, err error) { - return view.ActiveUserIDsBySessionID(ctx, v.client, sessionID, instanceID) +func (v *View) ActiveUserSessionsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, sessions map[string]string, err error) { + return view.ActiveUserSessionsBySessionID(ctx, v.client, sessionID, instanceID) } func (v *View) GetLatestUserSessionSequence(ctx context.Context, instanceID string) (_ *query.CurrentState, err error) { diff --git a/internal/auth/repository/user.go b/internal/auth/repository/user.go index 6f373ec12e..f09581b32e 100644 --- a/internal/auth/repository/user.go +++ b/internal/auth/repository/user.go @@ -2,10 +2,12 @@ package repository import ( "context" + + "github.com/zitadel/zitadel/internal/command" ) type UserRepository interface { - UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) + UserSessionsByAgentID(ctx context.Context, agentID string) (sessions []command.HumanSignOutSession, err error) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) - ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) + ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, sessions []command.HumanSignOutSession, err error) } diff --git a/internal/command/instance_domain_test.go b/internal/command/instance_domain_test.go index 3f5e73aedd..adaa59ec05 100644 --- a/internal/command/instance_domain_test.go +++ b/internal/command/instance_domain_test.go @@ -155,6 +155,7 @@ func TestCommandSide_AddInstanceDomain(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, false, + "", ), ), ), diff --git a/internal/command/instance_features.go b/internal/command/instance_features.go index 79d3d25ffe..e4509ae130 100644 --- a/internal/command/instance_features.go +++ b/internal/command/instance_features.go @@ -27,6 +27,7 @@ type InstanceFeatures struct { DebugOIDCParentError *bool OIDCSingleV1SessionTermination *bool DisableUserTokenEvent *bool + EnableBackChannelLogout *bool } func (m *InstanceFeatures) isEmpty() bool { @@ -41,7 +42,8 @@ func (m *InstanceFeatures) isEmpty() bool { m.WebKey == nil && m.DebugOIDCParentError == nil && m.OIDCSingleV1SessionTermination == nil && - m.DisableUserTokenEvent == nil + m.DisableUserTokenEvent == nil && + m.EnableBackChannelLogout == nil } func (c *Commands) SetInstanceFeatures(ctx context.Context, f *InstanceFeatures) (*domain.ObjectDetails, error) { diff --git a/internal/command/instance_features_model.go b/internal/command/instance_features_model.go index 5ed0b9c24b..f6c5f39898 100644 --- a/internal/command/instance_features_model.go +++ b/internal/command/instance_features_model.go @@ -71,6 +71,7 @@ func (m *InstanceFeaturesWriteModel) Query() *eventstore.SearchQueryBuilder { feature_v2.InstanceDebugOIDCParentErrorEventType, feature_v2.InstanceOIDCSingleV1SessionTerminationEventType, feature_v2.InstanceDisableUserTokenEvent, + feature_v2.InstanceEnableBackChannelLogout, ). Builder().ResourceOwner(m.ResourceOwner) } @@ -116,6 +117,9 @@ func reduceInstanceFeature(features *InstanceFeatures, key feature.Key, value an case feature.KeyDisableUserTokenEvent: v := value.(bool) features.DisableUserTokenEvent = &v + case feature.KeyEnableBackChannelLogout: + v := value.(bool) + features.EnableBackChannelLogout = &v } } @@ -133,5 +137,6 @@ func (wm *InstanceFeaturesWriteModel) setCommands(ctx context.Context, f *Instan cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.DebugOIDCParentError, f.DebugOIDCParentError, feature_v2.InstanceDebugOIDCParentErrorEventType) cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.OIDCSingleV1SessionTermination, f.OIDCSingleV1SessionTermination, feature_v2.InstanceOIDCSingleV1SessionTerminationEventType) cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.DisableUserTokenEvent, f.DisableUserTokenEvent, feature_v2.InstanceDisableUserTokenEvent) + cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.EnableBackChannelLogout, f.EnableBackChannelLogout, feature_v2.InstanceEnableBackChannelLogout) return cmds } diff --git a/internal/command/instance_test.go b/internal/command/instance_test.go index af32ea538d..c60b2763b3 100644 --- a/internal/command/instance_test.go +++ b/internal/command/instance_test.go @@ -127,6 +127,7 @@ func oidcAppEvents(ctx context.Context, orgID, projectID, id, name, clientID str 0, nil, false, + "", ), } } @@ -439,6 +440,7 @@ func generatedDomainFilters(instanceID, orgID, projectID, appID, generatedDomain 0, nil, false, + "", ), ), expectFilter( diff --git a/internal/command/logout_session.go b/internal/command/logout_session.go new file mode 100644 index 0000000000..fd52c0f970 --- /dev/null +++ b/internal/command/logout_session.go @@ -0,0 +1,24 @@ +package command + +import ( + "context" + + "github.com/zitadel/zitadel/internal/repository/sessionlogout" + "github.com/zitadel/zitadel/internal/telemetry/tracing" +) + +func (c *Commands) BackChannelLogoutSent(ctx context.Context, id, oidcSessionID, instanceID string) (err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + sessionWriteModel := NewSessionLogoutWriteModel(id, instanceID, oidcSessionID) + if err = c.eventstore.FilterToQueryReducer(ctx, sessionWriteModel); err != nil { + return err + } + + return c.pushAppendAndReduce( + ctx, + sessionWriteModel, + sessionlogout.NewBackChannelLogoutSentEvent(ctx, sessionWriteModel.aggregate, oidcSessionID), + ) +} diff --git a/internal/command/logout_session_model.go b/internal/command/logout_session_model.go new file mode 100644 index 0000000000..ed31a87012 --- /dev/null +++ b/internal/command/logout_session_model.go @@ -0,0 +1,74 @@ +package command + +import ( + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/repository/sessionlogout" +) + +type SessionLogoutWriteModel struct { + eventstore.WriteModel + + UserID string + OIDCSessionID string + ClientID string + BackChannelLogoutURI string + BackChannelLogoutSent bool + + aggregate *eventstore.Aggregate +} + +func NewSessionLogoutWriteModel(id string, instanceID string, sessionID string) *SessionLogoutWriteModel { + return &SessionLogoutWriteModel{ + WriteModel: eventstore.WriteModel{ + AggregateID: id, + ResourceOwner: instanceID, + InstanceID: instanceID, + }, + aggregate: &sessionlogout.NewAggregate(id, instanceID).Aggregate, + OIDCSessionID: sessionID, + } +} + +func (wm *SessionLogoutWriteModel) Reduce() error { + for _, event := range wm.Events { + switch e := event.(type) { + case *sessionlogout.BackChannelLogoutRegisteredEvent: + wm.reduceRegistered(e) + case *sessionlogout.BackChannelLogoutSentEvent: + wm.reduceSent(e) + } + } + return wm.WriteModel.Reduce() +} + +func (wm *SessionLogoutWriteModel) Query() *eventstore.SearchQueryBuilder { + query := eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent). + AddQuery(). + AggregateTypes(sessionlogout.AggregateType). + AggregateIDs(wm.AggregateID). + EventTypes( + sessionlogout.BackChannelLogoutRegisteredType, + sessionlogout.BackChannelLogoutSentType, + ). + EventData(map[string]interface{}{ + "oidc_session_id": wm.OIDCSessionID, + }). + Builder() + return query +} + +func (wm *SessionLogoutWriteModel) reduceRegistered(e *sessionlogout.BackChannelLogoutRegisteredEvent) { + if wm.OIDCSessionID != e.OIDCSessionID { + return + } + wm.UserID = e.UserID + wm.ClientID = e.ClientID + wm.BackChannelLogoutURI = e.BackChannelLogoutURI +} + +func (wm *SessionLogoutWriteModel) reduceSent(e *sessionlogout.BackChannelLogoutSentEvent) { + if wm.OIDCSessionID != e.OIDCSessionID { + return + } + wm.BackChannelLogoutSent = true +} diff --git a/internal/command/oidc_session.go b/internal/command/oidc_session.go index f7bb9b4cb6..c2922f5194 100644 --- a/internal/command/oidc_session.go +++ b/internal/command/oidc_session.go @@ -18,6 +18,7 @@ import ( "github.com/zitadel/zitadel/internal/id" "github.com/zitadel/zitadel/internal/repository/authrequest" "github.com/zitadel/zitadel/internal/repository/oidcsession" + "github.com/zitadel/zitadel/internal/repository/sessionlogout" "github.com/zitadel/zitadel/internal/repository/user" "github.com/zitadel/zitadel/internal/telemetry/tracing" "github.com/zitadel/zitadel/internal/zerrors" @@ -133,7 +134,8 @@ func (c *Commands) CreateOIDCSessionFromAuthRequest(ctx context.Context, authReq func (c *Commands) CreateOIDCSession(ctx context.Context, userID, resourceOwner, - clientID string, + clientID, + backChannelLogoutURI string, scope, audience []string, authMethods []domain.UserAuthMethodType, @@ -161,6 +163,7 @@ func (c *Commands) CreateOIDCSession(ctx context.Context, } cmd.AddSession(ctx, userID, resourceOwner, sessionID, clientID, audience, scope, authMethods, authTime, nonce, preferredLanguage, userAgent) + cmd.RegisterLogout(ctx, sessionID, userID, clientID, backChannelLogoutURI) if err = cmd.AddAccessToken(ctx, scope, userID, resourceOwner, reason, actor); err != nil { return nil, err } @@ -433,6 +436,26 @@ func (c *OIDCSessionEvents) SetAuthRequestFailed(ctx context.Context, authReques c.events = append(c.events, authrequest.NewFailedEvent(ctx, authRequestAggregate, domain.OIDCErrorReasonFromError(err))) } +func (c *OIDCSessionEvents) RegisterLogout(ctx context.Context, sessionID, userID, clientID, backChannelLogoutURI string) { + // If there's no SSO session (e.g. service accounts) we do not need to register a logout handler. + // Also, if the client did not register a backchannel_logout_uri it will not support it (https://openid.net/specs/openid-connect-backchannel-1_0.html#BCRegistration) + if sessionID == "" || backChannelLogoutURI == "" { + return + } + if !authz.GetFeatures(ctx).EnableBackChannelLogout { + return + } + + c.events = append(c.events, sessionlogout.NewBackChannelLogoutRegisteredEvent( + ctx, + &sessionlogout.NewAggregate(sessionID, authz.GetInstance(ctx).InstanceID()).Aggregate, + c.oidcSessionWriteModel.AggregateID, + userID, + clientID, + backChannelLogoutURI, + )) +} + func (c *OIDCSessionEvents) AddAccessToken(ctx context.Context, scope []string, userID, resourceOwner string, reason domain.TokenReason, actor *domain.TokenActor) error { accessTokenID, err := c.idGenerator.Next() if err != nil { diff --git a/internal/command/oidc_session_test.go b/internal/command/oidc_session_test.go index 86d6bd9033..43ca622a29 100644 --- a/internal/command/oidc_session_test.go +++ b/internal/command/oidc_session_test.go @@ -24,6 +24,7 @@ import ( "github.com/zitadel/zitadel/internal/repository/authrequest" "github.com/zitadel/zitadel/internal/repository/oidcsession" "github.com/zitadel/zitadel/internal/repository/session" + "github.com/zitadel/zitadel/internal/repository/sessionlogout" "github.com/zitadel/zitadel/internal/repository/user" "github.com/zitadel/zitadel/internal/zerrors" ) @@ -732,21 +733,22 @@ func TestCommands_CreateOIDCSession(t *testing.T) { checkPermission domain.PermissionCheck } type args struct { - ctx context.Context - userID string - resourceOwner string - clientID string - audience []string - scope []string - authMethods []domain.UserAuthMethodType - authTime time.Time - nonce string - preferredLanguage *language.Tag - userAgent *domain.UserAgent - reason domain.TokenReason - actor *domain.TokenActor - needRefreshToken bool - sessionID string + ctx context.Context + userID string + resourceOwner string + clientID string + backChannelLogoutURI string + audience []string + scope []string + authMethods []domain.UserAuthMethodType + authTime time.Time + nonce string + preferredLanguage *language.Tag + userAgent *domain.UserAgent + reason domain.TokenReason + actor *domain.TokenActor + needRefreshToken bool + sessionID string } tests := []struct { name string @@ -763,16 +765,17 @@ func TestCommands_CreateOIDCSession(t *testing.T) { ), }, args: args{ - ctx: authz.WithInstanceID(context.Background(), "instanceID"), - userID: "userID", - resourceOwner: "orgID", - clientID: "clientID", - audience: []string{"audience"}, - scope: []string{"openid", "offline_access"}, - authMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, - authTime: testNow, - nonce: "nonce", - preferredLanguage: &language.Afrikaans, + ctx: authz.WithInstanceID(context.Background(), "instanceID"), + userID: "userID", + resourceOwner: "orgID", + clientID: "clientID", + backChannelLogoutURI: "backChannelLogoutURI", + audience: []string{"audience"}, + scope: []string{"openid", "offline_access"}, + authMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + authTime: testNow, + nonce: "nonce", + preferredLanguage: &language.Afrikaans, userAgent: &domain.UserAgent{ FingerprintID: gu.Ptr("fp1"), IP: net.ParseIP("1.2.3.4"), @@ -1236,6 +1239,308 @@ func TestCommands_CreateOIDCSession(t *testing.T) { SessionID: "sessionID", }, }, + { + name: "with backChannelLogoutURI", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + user.NewHumanAddedEvent( + context.Background(), + &user.NewAggregate("userID", "org1").Aggregate, + "username", + "firstname", + "lastname", + "nickname", + "displayname", + language.Afrikaans, + domain.GenderUnspecified, + "email", + false, + ), + ), + expectFilter(), // token lifetime + expectPush( + oidcsession.NewAddedEvent(context.Background(), &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "userID", "org1", "", "clientID", []string{"audience"}, []string{"openid", "offline_access"}, + []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, testNow, "nonce", &language.Afrikaans, + &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + ), + oidcsession.NewAccessTokenAddedEvent(context.Background(), + &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "at_accessTokenID", []string{"openid", "offline_access"}, time.Hour, domain.TokenReasonAuthRequest, + &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + ), + user.NewUserTokenV2AddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, "at_accessTokenID"), + ), + ), + idGenerator: mock.NewIDGeneratorExpectIDs(t, "oidcSessionID", "accessTokenID"), + defaultAccessTokenLifetime: time.Hour, + defaultRefreshTokenLifetime: 7 * 24 * time.Hour, + defaultRefreshTokenIdleLifetime: 24 * time.Hour, + keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), + }, + args: args{ + ctx: authz.WithInstanceID(context.Background(), "instanceID"), + userID: "userID", + resourceOwner: "org1", + clientID: "clientID", + backChannelLogoutURI: "backChannelLogoutURI", + audience: []string{"audience"}, + scope: []string{"openid", "offline_access"}, + authMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + authTime: testNow, + nonce: "nonce", + preferredLanguage: &language.Afrikaans, + userAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + reason: domain.TokenReasonAuthRequest, + actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + needRefreshToken: false, + }, + want: &OIDCSession{ + TokenID: "V2_oidcSessionID-at_accessTokenID", + ClientID: "clientID", + UserID: "userID", + Audience: []string{"audience"}, + Expiration: time.Time{}.Add(time.Hour), + Scope: []string{"openid", "offline_access"}, + AuthMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + AuthTime: testNow, + Nonce: "nonce", + PreferredLanguage: &language.Afrikaans, + UserAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + Reason: domain.TokenReasonAuthRequest, + Actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + }, + }, + { + name: "with backChannelLogoutURI and sessionID", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + user.NewHumanAddedEvent( + context.Background(), + &user.NewAggregate("userID", "org1").Aggregate, + "username", + "firstname", + "lastname", + "nickname", + "displayname", + language.Afrikaans, + domain.GenderUnspecified, + "email", + false, + ), + ), + expectFilter(), // token lifetime + expectPush( + oidcsession.NewAddedEvent(context.Background(), &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "userID", "org1", "sessionID", "clientID", []string{"audience"}, []string{"openid", "offline_access"}, + []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, testNow, "nonce", &language.Afrikaans, + &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + ), + oidcsession.NewAccessTokenAddedEvent(context.Background(), + &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "at_accessTokenID", []string{"openid", "offline_access"}, time.Hour, domain.TokenReasonAuthRequest, + &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + ), + user.NewUserTokenV2AddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, "at_accessTokenID"), + ), + ), + idGenerator: mock.NewIDGeneratorExpectIDs(t, "oidcSessionID", "accessTokenID"), + defaultAccessTokenLifetime: time.Hour, + defaultRefreshTokenLifetime: 7 * 24 * time.Hour, + defaultRefreshTokenIdleLifetime: 24 * time.Hour, + keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), + }, + args: args{ + ctx: authz.WithInstanceID(context.Background(), "instanceID"), + userID: "userID", + resourceOwner: "org1", + clientID: "clientID", + backChannelLogoutURI: "backChannelLogoutURI", + audience: []string{"audience"}, + scope: []string{"openid", "offline_access"}, + authMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + authTime: testNow, + nonce: "nonce", + preferredLanguage: &language.Afrikaans, + userAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + reason: domain.TokenReasonAuthRequest, + actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + needRefreshToken: false, + sessionID: "sessionID", + }, + want: &OIDCSession{ + TokenID: "V2_oidcSessionID-at_accessTokenID", + ClientID: "clientID", + UserID: "userID", + Audience: []string{"audience"}, + Expiration: time.Time{}.Add(time.Hour), + Scope: []string{"openid", "offline_access"}, + AuthMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + AuthTime: testNow, + Nonce: "nonce", + PreferredLanguage: &language.Afrikaans, + UserAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + Reason: domain.TokenReasonAuthRequest, + Actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + SessionID: "sessionID", + }, + }, + { + name: "with backChannelLogoutURI and sessionID, backchannel logout enabled", + fields: fields{ + eventstore: expectEventstore( + expectFilter( + user.NewHumanAddedEvent( + context.Background(), + &user.NewAggregate("userID", "org1").Aggregate, + "username", + "firstname", + "lastname", + "nickname", + "displayname", + language.Afrikaans, + domain.GenderUnspecified, + "email", + false, + ), + ), + expectFilter(), // token lifetime + expectPush( + oidcsession.NewAddedEvent(context.Background(), &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "userID", "org1", "sessionID", "clientID", []string{"audience"}, []string{"openid", "offline_access"}, + []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, testNow, "nonce", &language.Afrikaans, + &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + ), + sessionlogout.NewBackChannelLogoutRegisteredEvent(context.Background(), + &sessionlogout.NewAggregate("sessionID", "instanceID").Aggregate, + "V2_oidcSessionID", + "userID", + "clientID", + "backChannelLogoutURI", + ), + oidcsession.NewAccessTokenAddedEvent(context.Background(), + &oidcsession.NewAggregate("V2_oidcSessionID", "org1").Aggregate, + "at_accessTokenID", []string{"openid", "offline_access"}, time.Hour, domain.TokenReasonAuthRequest, + &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + ), + user.NewUserTokenV2AddedEvent(context.Background(), &user.NewAggregate("userID", "org1").Aggregate, "at_accessTokenID"), + ), + ), + idGenerator: mock.NewIDGeneratorExpectIDs(t, "oidcSessionID", "accessTokenID"), + defaultAccessTokenLifetime: time.Hour, + defaultRefreshTokenLifetime: 7 * 24 * time.Hour, + defaultRefreshTokenIdleLifetime: 24 * time.Hour, + keyAlgorithm: crypto.CreateMockEncryptionAlg(gomock.NewController(t)), + }, + args: args{ + ctx: authz.WithFeatures(authz.WithInstanceID(context.Background(), "instanceID"), feature.Features{EnableBackChannelLogout: true}), + userID: "userID", + resourceOwner: "org1", + clientID: "clientID", + backChannelLogoutURI: "backChannelLogoutURI", + audience: []string{"audience"}, + scope: []string{"openid", "offline_access"}, + authMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + authTime: testNow, + nonce: "nonce", + preferredLanguage: &language.Afrikaans, + userAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + reason: domain.TokenReasonAuthRequest, + actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + needRefreshToken: false, + sessionID: "sessionID", + }, + want: &OIDCSession{ + TokenID: "V2_oidcSessionID-at_accessTokenID", + ClientID: "clientID", + UserID: "userID", + Audience: []string{"audience"}, + Expiration: time.Time{}.Add(time.Hour), + Scope: []string{"openid", "offline_access"}, + AuthMethods: []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, + AuthTime: testNow, + Nonce: "nonce", + PreferredLanguage: &language.Afrikaans, + UserAgent: &domain.UserAgent{ + FingerprintID: gu.Ptr("fp1"), + IP: net.ParseIP("1.2.3.4"), + Description: gu.Ptr("firefox"), + Header: http.Header{"foo": []string{"bar"}}, + }, + Reason: domain.TokenReasonAuthRequest, + Actor: &domain.TokenActor{ + UserID: "user2", + Issuer: "foo.com", + }, + SessionID: "sessionID", + }, + }, { name: "impersonation not allowed", fields: fields{ @@ -1412,6 +1717,7 @@ func TestCommands_CreateOIDCSession(t *testing.T) { tt.args.userID, tt.args.resourceOwner, tt.args.clientID, + tt.args.backChannelLogoutURI, tt.args.scope, tt.args.audience, tt.args.authMethods, diff --git a/internal/command/project_application_oidc.go b/internal/command/project_application_oidc.go index 9852bea23b..ac486b2e18 100644 --- a/internal/command/project_application_oidc.go +++ b/internal/command/project_application_oidc.go @@ -31,6 +31,7 @@ type addOIDCApp struct { ClockSkew time.Duration AdditionalOrigins []string SkipSuccessPageForNativeApp bool + BackChannelLogoutURI string ClientID string ClientSecret string @@ -108,6 +109,7 @@ func (c *Commands) AddOIDCAppCommand(app *addOIDCApp) preparation.Validation { app.ClockSkew, trimStringSliceWhiteSpaces(app.AdditionalOrigins), app.SkipSuccessPageForNativeApp, + app.BackChannelLogoutURI, ), }, nil }, nil @@ -199,6 +201,7 @@ func (c *Commands) addOIDCApplicationWithID(ctx context.Context, oidcApp *domain oidcApp.ClockSkew, trimStringSliceWhiteSpaces(oidcApp.AdditionalOrigins), oidcApp.SkipNativeAppSuccessPage, + strings.TrimSpace(oidcApp.BackChannelLogoutURI), )) addedApplication.AppID = oidcApp.AppID @@ -256,6 +259,7 @@ func (c *Commands) ChangeOIDCApplication(ctx context.Context, oidc *domain.OIDCA oidc.ClockSkew, trimStringSliceWhiteSpaces(oidc.AdditionalOrigins), oidc.SkipNativeAppSuccessPage, + strings.TrimSpace(oidc.BackChannelLogoutURI), ) if err != nil { return nil, err diff --git a/internal/command/project_application_oidc_model.go b/internal/command/project_application_oidc_model.go index 585fdf6c1d..1ab0ad00d6 100644 --- a/internal/command/project_application_oidc_model.go +++ b/internal/command/project_application_oidc_model.go @@ -36,6 +36,7 @@ type OIDCApplicationWriteModel struct { State domain.AppState AdditionalOrigins []string SkipNativeAppSuccessPage bool + BackChannelLogoutURI string oidc bool } @@ -165,6 +166,7 @@ func (wm *OIDCApplicationWriteModel) appendAddOIDCEvent(e *project.OIDCConfigAdd wm.ClockSkew = e.ClockSkew wm.AdditionalOrigins = e.AdditionalOrigins wm.SkipNativeAppSuccessPage = e.SkipNativeAppSuccessPage + wm.BackChannelLogoutURI = e.BackChannelLogoutURI } func (wm *OIDCApplicationWriteModel) appendChangeOIDCEvent(e *project.OIDCConfigChangedEvent) { @@ -213,6 +215,9 @@ func (wm *OIDCApplicationWriteModel) appendChangeOIDCEvent(e *project.OIDCConfig if e.SkipNativeAppSuccessPage != nil { wm.SkipNativeAppSuccessPage = *e.SkipNativeAppSuccessPage } + if e.BackChannelLogoutURI != nil { + wm.BackChannelLogoutURI = *e.BackChannelLogoutURI + } } func (wm *OIDCApplicationWriteModel) Query() *eventstore.SearchQueryBuilder { @@ -254,6 +259,7 @@ func (wm *OIDCApplicationWriteModel) NewChangedEvent( clockSkew time.Duration, additionalOrigins []string, skipNativeAppSuccessPage bool, + backChannelLogoutURI string, ) (*project.OIDCConfigChangedEvent, bool, error) { changes := make([]project.OIDCConfigChanges, 0) var err error @@ -303,6 +309,9 @@ func (wm *OIDCApplicationWriteModel) NewChangedEvent( if wm.SkipNativeAppSuccessPage != skipNativeAppSuccessPage { changes = append(changes, project.ChangeSkipNativeAppSuccessPage(skipNativeAppSuccessPage)) } + if wm.BackChannelLogoutURI != backChannelLogoutURI { + changes = append(changes, project.ChangeBackChannelLogoutURI(backChannelLogoutURI)) + } if len(changes) == 0 { return nil, false, nil diff --git a/internal/command/project_application_oidc_test.go b/internal/command/project_application_oidc_test.go index 01c848cd2e..8c79d03f82 100644 --- a/internal/command/project_application_oidc_test.go +++ b/internal/command/project_application_oidc_test.go @@ -175,6 +175,7 @@ func TestAddOIDCApp(t *testing.T) { 0, []string{"https://sub.test.ch"}, false, + "", ), }, }, @@ -240,6 +241,7 @@ func TestAddOIDCApp(t *testing.T) { 0, nil, false, + "", ), }, }, @@ -305,6 +307,7 @@ func TestAddOIDCApp(t *testing.T) { 0, nil, false, + "", ), }, }, @@ -370,6 +373,7 @@ func TestAddOIDCApp(t *testing.T) { 0, nil, false, + "", ), }, }, @@ -516,6 +520,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, true, + "https://test.ch/backchannel", ), ), ), @@ -543,6 +548,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{" https://sub.test.ch "}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: " https://test.ch/backchannel ", }, resourceOwner: "org1", }, @@ -571,6 +577,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", State: domain.AppStateActive, Compliance: &domain.Compliance{}, }, @@ -614,6 +621,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, true, + "https://test.ch/backchannel", ), ), ), @@ -641,6 +649,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", }, resourceOwner: "org1", }, @@ -669,6 +678,7 @@ func TestCommandSide_AddOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", State: domain.AppStateActive, Compliance: &domain.Compliance{}, }, @@ -847,6 +857,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, true, + "https://test.ch/backchannel", ), ), ), @@ -875,6 +886,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", }, resourceOwner: "org1", }, @@ -916,6 +928,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, true, + "https://test.ch/backchannel", ), ), ), @@ -944,6 +957,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{" https://sub.test.ch "}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: " https://test.ch/backchannel ", }, resourceOwner: "org1", }, @@ -985,6 +999,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, true, + "https://test.ch/backchannel", ), ), ), @@ -1019,6 +1034,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { ClockSkew: time.Second * 2, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", }, resourceOwner: "org1", }, @@ -1046,6 +1062,7 @@ func TestCommandSide_ChangeOIDCApplication(t *testing.T) { ClockSkew: time.Second * 2, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "https://test.ch/backchannel", Compliance: &domain.Compliance{}, State: domain.AppStateActive, }, @@ -1170,6 +1187,7 @@ func TestCommandSide_ChangeOIDCApplicationSecret(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, false, + "", ), ), ), @@ -1213,6 +1231,7 @@ func TestCommandSide_ChangeOIDCApplicationSecret(t *testing.T) { ClockSkew: time.Second * 1, AdditionalOrigins: []string{"https://sub.test.ch"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "", State: domain.AppStateActive, }, }, @@ -1327,6 +1346,7 @@ func TestCommands_VerifyOIDCClientSecret(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, false, + "", ), ), ), @@ -1362,6 +1382,7 @@ func TestCommands_VerifyOIDCClientSecret(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, false, + "", ), ), ), @@ -1396,6 +1417,7 @@ func TestCommands_VerifyOIDCClientSecret(t *testing.T) { time.Second*1, []string{"https://sub.test.ch"}, false, + "", ), ), ), diff --git a/internal/command/project_converter.go b/internal/command/project_converter.go index 35679d8a14..079dc85654 100644 --- a/internal/command/project_converter.go +++ b/internal/command/project_converter.go @@ -47,6 +47,7 @@ func oidcWriteModelToOIDCConfig(writeModel *OIDCApplicationWriteModel) *domain.O ClockSkew: writeModel.ClockSkew, AdditionalOrigins: writeModel.AdditionalOrigins, SkipNativeAppSuccessPage: writeModel.SkipNativeAppSuccessPage, + BackChannelLogoutURI: writeModel.BackChannelLogoutURI, } } diff --git a/internal/command/system_features.go b/internal/command/system_features.go index e024a6dd18..f089ada207 100644 --- a/internal/command/system_features.go +++ b/internal/command/system_features.go @@ -19,6 +19,7 @@ type SystemFeatures struct { ImprovedPerformance []feature.ImprovedPerformanceType OIDCSingleV1SessionTermination *bool DisableUserTokenEvent *bool + EnableBackChannelLogout *bool } func (m *SystemFeatures) isEmpty() bool { @@ -31,7 +32,8 @@ func (m *SystemFeatures) isEmpty() bool { // nil check to allow unset improvements m.ImprovedPerformance == nil && m.OIDCSingleV1SessionTermination == nil && - m.DisableUserTokenEvent == nil + m.DisableUserTokenEvent == nil && + m.EnableBackChannelLogout == nil } func (c *Commands) SetSystemFeatures(ctx context.Context, f *SystemFeatures) (*domain.ObjectDetails, error) { diff --git a/internal/command/system_features_model.go b/internal/command/system_features_model.go index 5cc70338bb..d3fca66fea 100644 --- a/internal/command/system_features_model.go +++ b/internal/command/system_features_model.go @@ -62,6 +62,7 @@ func (m *SystemFeaturesWriteModel) Query() *eventstore.SearchQueryBuilder { feature_v2.SystemImprovedPerformanceEventType, feature_v2.SystemOIDCSingleV1SessionTerminationEventType, feature_v2.SystemDisableUserTokenEvent, + feature_v2.SystemEnableBackChannelLogout, ). Builder().ResourceOwner(m.ResourceOwner) } @@ -100,6 +101,9 @@ func reduceSystemFeature(features *SystemFeatures, key feature.Key, value any) { case feature.KeyDisableUserTokenEvent: v := value.(bool) features.DisableUserTokenEvent = &v + case feature.KeyEnableBackChannelLogout: + v := value.(bool) + features.EnableBackChannelLogout = &v } } @@ -115,6 +119,7 @@ func (wm *SystemFeaturesWriteModel) setCommands(ctx context.Context, f *SystemFe cmds = appendFeatureSliceUpdate(ctx, cmds, aggregate, wm.ImprovedPerformance, f.ImprovedPerformance, feature_v2.SystemImprovedPerformanceEventType) cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.OIDCSingleV1SessionTermination, f.OIDCSingleV1SessionTermination, feature_v2.SystemOIDCSingleV1SessionTerminationEventType) cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.DisableUserTokenEvent, f.DisableUserTokenEvent, feature_v2.SystemDisableUserTokenEvent) + cmds = appendFeatureUpdate(ctx, cmds, aggregate, wm.EnableBackChannelLogout, f.EnableBackChannelLogout, feature_v2.SystemEnableBackChannelLogout) return cmds } diff --git a/internal/command/user_human.go b/internal/command/user_human.go index 825ae50f9c..91739e0d6d 100644 --- a/internal/command/user_human.go +++ b/internal/command/user_human.go @@ -628,16 +628,21 @@ func createAddHumanEvent(ctx context.Context, aggregate *eventstore.Aggregate, h return addEvent } -func (c *Commands) HumansSignOut(ctx context.Context, agentID string, userIDs []string) error { +type HumanSignOutSession struct { + ID string + UserID string +} + +func (c *Commands) HumansSignOut(ctx context.Context, agentID string, sessions []HumanSignOutSession) error { if agentID == "" { return zerrors.ThrowInvalidArgument(nil, "COMMAND-2M0ds", "Errors.User.UserIDMissing") } - if len(userIDs) == 0 { + if len(sessions) == 0 { return zerrors.ThrowInvalidArgument(nil, "COMMAND-M0od3", "Errors.User.UserIDMissing") } events := make([]eventstore.Command, 0) - for _, userID := range userIDs { - existingUser, err := c.getHumanWriteModelByID(ctx, userID, "") + for _, session := range sessions { + existingUser, err := c.getHumanWriteModelByID(ctx, session.UserID, "") if err != nil { return err } @@ -647,7 +652,9 @@ func (c *Commands) HumansSignOut(ctx context.Context, agentID string, userIDs [] events = append(events, user.NewHumanSignedOutEvent( ctx, UserAggregateFromWriteModel(&existingUser.WriteModel), - agentID)) + agentID, + session.ID, + )) } if len(events) == 0 { return nil diff --git a/internal/command/user_human_test.go b/internal/command/user_human_test.go index fbf3523fc9..78d7248516 100644 --- a/internal/command/user_human_test.go +++ b/internal/command/user_human_test.go @@ -3123,9 +3123,9 @@ func TestCommandSide_HumanSignOut(t *testing.T) { } type ( args struct { - ctx context.Context - agentID string - userIDs []string + ctx context.Context + agentID string + sessions []HumanSignOutSession } ) type res struct { @@ -3144,9 +3144,9 @@ func TestCommandSide_HumanSignOut(t *testing.T) { eventstore: expectEventstore(), }, args: args{ - ctx: context.Background(), - agentID: "", - userIDs: []string{"user1"}, + ctx: context.Background(), + agentID: "", + sessions: []HumanSignOutSession{{ID: "session1", UserID: "user1"}}, }, res: res{ err: zerrors.IsErrorInvalidArgument, @@ -3158,9 +3158,9 @@ func TestCommandSide_HumanSignOut(t *testing.T) { eventstore: expectEventstore(), }, args: args{ - ctx: context.Background(), - agentID: "agent1", - userIDs: []string{}, + ctx: context.Background(), + agentID: "agent1", + sessions: []HumanSignOutSession{}, }, res: res{ err: zerrors.IsErrorInvalidArgument, @@ -3174,9 +3174,9 @@ func TestCommandSide_HumanSignOut(t *testing.T) { ), }, args: args{ - ctx: context.Background(), - agentID: "agent1", - userIDs: []string{"user1"}, + ctx: context.Background(), + agentID: "agent1", + sessions: []HumanSignOutSession{{ID: "session1", UserID: "user1"}}, }, res: res{}, }, @@ -3204,14 +3204,15 @@ func TestCommandSide_HumanSignOut(t *testing.T) { user.NewHumanSignedOutEvent(context.Background(), &user.NewAggregate("user1", "org1").Aggregate, "agent1", + "session1", ), ), ), }, args: args{ - ctx: context.Background(), - agentID: "agent1", - userIDs: []string{"user1"}, + ctx: context.Background(), + agentID: "agent1", + sessions: []HumanSignOutSession{{ID: "session1", UserID: "user1"}}, }, res: res{ want: &domain.ObjectDetails{ @@ -3259,18 +3260,20 @@ func TestCommandSide_HumanSignOut(t *testing.T) { user.NewHumanSignedOutEvent(context.Background(), &user.NewAggregate("user1", "org1").Aggregate, "agent1", + "session1", ), user.NewHumanSignedOutEvent(context.Background(), &user.NewAggregate("user2", "org1").Aggregate, "agent1", + "session2", ), ), ), }, args: args{ - ctx: context.Background(), - agentID: "agent1", - userIDs: []string{"user1", "user2"}, + ctx: context.Background(), + agentID: "agent1", + sessions: []HumanSignOutSession{{ID: "session1", UserID: "user1"}, {ID: "session2", UserID: "user2"}}, }, res: res{ want: &domain.ObjectDetails{ @@ -3284,7 +3287,7 @@ func TestCommandSide_HumanSignOut(t *testing.T) { r := &Commands{ eventstore: tt.fields.eventstore(t), } - err := r.HumansSignOut(tt.args.ctx, tt.args.agentID, tt.args.userIDs) + err := r.HumansSignOut(tt.args.ctx, tt.args.agentID, tt.args.sessions) if tt.res.err == nil { assert.NoError(t, err) } diff --git a/internal/domain/application_oidc.go b/internal/domain/application_oidc.go index 5fe7b1f698..1ffb61f538 100644 --- a/internal/domain/application_oidc.go +++ b/internal/domain/application_oidc.go @@ -45,6 +45,7 @@ type OIDCApp struct { ClockSkew time.Duration AdditionalOrigins []string SkipNativeAppSuccessPage bool + BackChannelLogoutURI string State AppState } diff --git a/internal/feature/feature.go b/internal/feature/feature.go index 3104f6ed59..1d619b25d8 100644 --- a/internal/feature/feature.go +++ b/internal/feature/feature.go @@ -18,6 +18,7 @@ const ( KeyDebugOIDCParentError KeyOIDCSingleV1SessionTermination KeyDisableUserTokenEvent + KeyEnableBackChannelLogout ) //go:generate enumer -type Level -transform snake -trimprefix Level @@ -43,8 +44,9 @@ type Features struct { ImprovedPerformance []ImprovedPerformanceType `json:"improved_performance,omitempty"` WebKey bool `json:"web_key,omitempty"` DebugOIDCParentError bool `json:"debug_oidc_parent_error,omitempty"` - OIDCSingleV1SessionTermination bool `json:"terminate_single_v1_session,omitempty"` + OIDCSingleV1SessionTermination bool `json:"oidc_single_v1_session_termination,omitempty"` DisableUserTokenEvent bool `json:"disable_user_token_event,omitempty"` + EnableBackChannelLogout bool `json:"enable_back_channel_logout,omitempty"` } type ImprovedPerformanceType int32 diff --git a/internal/feature/key_enumer.go b/internal/feature/key_enumer.go index 46d8613fbc..db3cf4161e 100644 --- a/internal/feature/key_enumer.go +++ b/internal/feature/key_enumer.go @@ -7,11 +7,11 @@ import ( "strings" ) -const _KeyName = "unspecifiedlogin_default_orgtrigger_introspection_projectionslegacy_introspectionuser_schematoken_exchangeactionsimproved_performanceweb_keydebug_oidc_parent_erroroidc_single_v1_session_terminationdisable_user_token_event" +const _KeyName = "unspecifiedlogin_default_orgtrigger_introspection_projectionslegacy_introspectionuser_schematoken_exchangeactionsimproved_performanceweb_keydebug_oidc_parent_erroroidc_single_v1_session_terminationdisable_user_token_eventenable_back_channel_logout" -var _KeyIndex = [...]uint8{0, 11, 28, 61, 81, 92, 106, 113, 133, 140, 163, 197, 221} +var _KeyIndex = [...]uint8{0, 11, 28, 61, 81, 92, 106, 113, 133, 140, 163, 197, 221, 247} -const _KeyLowerName = "unspecifiedlogin_default_orgtrigger_introspection_projectionslegacy_introspectionuser_schematoken_exchangeactionsimproved_performanceweb_keydebug_oidc_parent_erroroidc_single_v1_session_terminationdisable_user_token_event" +const _KeyLowerName = "unspecifiedlogin_default_orgtrigger_introspection_projectionslegacy_introspectionuser_schematoken_exchangeactionsimproved_performanceweb_keydebug_oidc_parent_erroroidc_single_v1_session_terminationdisable_user_token_eventenable_back_channel_logout" func (i Key) String() string { if i < 0 || i >= Key(len(_KeyIndex)-1) { @@ -36,9 +36,10 @@ func _KeyNoOp() { _ = x[KeyDebugOIDCParentError-(9)] _ = x[KeyOIDCSingleV1SessionTermination-(10)] _ = x[KeyDisableUserTokenEvent-(11)] + _ = x[KeyEnableBackChannelLogout-(12)] } -var _KeyValues = []Key{KeyUnspecified, KeyLoginDefaultOrg, KeyTriggerIntrospectionProjections, KeyLegacyIntrospection, KeyUserSchema, KeyTokenExchange, KeyActions, KeyImprovedPerformance, KeyWebKey, KeyDebugOIDCParentError, KeyOIDCSingleV1SessionTermination, KeyDisableUserTokenEvent} +var _KeyValues = []Key{KeyUnspecified, KeyLoginDefaultOrg, KeyTriggerIntrospectionProjections, KeyLegacyIntrospection, KeyUserSchema, KeyTokenExchange, KeyActions, KeyImprovedPerformance, KeyWebKey, KeyDebugOIDCParentError, KeyOIDCSingleV1SessionTermination, KeyDisableUserTokenEvent, KeyEnableBackChannelLogout} var _KeyNameToValueMap = map[string]Key{ _KeyName[0:11]: KeyUnspecified, @@ -65,6 +66,8 @@ var _KeyNameToValueMap = map[string]Key{ _KeyLowerName[163:197]: KeyOIDCSingleV1SessionTermination, _KeyName[197:221]: KeyDisableUserTokenEvent, _KeyLowerName[197:221]: KeyDisableUserTokenEvent, + _KeyName[221:247]: KeyEnableBackChannelLogout, + _KeyLowerName[221:247]: KeyEnableBackChannelLogout, } var _KeyNames = []string{ @@ -80,6 +83,7 @@ var _KeyNames = []string{ _KeyName[140:163], _KeyName[163:197], _KeyName[197:221], + _KeyName[221:247], } // KeyString retrieves an enum value from the enum constants string name. diff --git a/internal/notification/channels.go b/internal/notification/channels.go index c70eaecbcc..ba9bcb9d7d 100644 --- a/internal/notification/channels.go +++ b/internal/notification/channels.go @@ -6,6 +6,7 @@ import ( "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/notification/channels/email" + "github.com/zitadel/zitadel/internal/notification/channels/set" "github.com/zitadel/zitadel/internal/notification/channels/sms" "github.com/zitadel/zitadel/internal/notification/channels/webhook" "github.com/zitadel/zitadel/internal/notification/handlers" @@ -104,3 +105,14 @@ func (c *channels) Webhook(ctx context.Context, cfg webhook.Config) (*senders.Ch c.counters.failed.json, ) } + +func (c *channels) SecurityTokenEvent(ctx context.Context, cfg set.Config) (*senders.Chain, error) { + return senders.SecurityEventTokenChannels( + ctx, + cfg, + c.q.GetFileSystemProvider, + c.q.GetLogProvider, + c.counters.success.json, + c.counters.failed.json, + ) +} diff --git a/internal/notification/channels/set/channel.go b/internal/notification/channels/set/channel.go new file mode 100644 index 0000000000..fbd4065739 --- /dev/null +++ b/internal/notification/channels/set/channel.go @@ -0,0 +1,75 @@ +package set + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "time" + + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/notification/channels" + "github.com/zitadel/zitadel/internal/notification/messages" + "github.com/zitadel/zitadel/internal/zerrors" +) + +func InitChannel(ctx context.Context, cfg Config) (channels.NotificationChannel, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + + logging.Debug("successfully initialized security event token json channel") + return channels.HandleMessageFunc(func(message channels.Message) error { + requestCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + msg, ok := message.(*messages.Form) + if !ok { + return zerrors.ThrowInternal(nil, "SET-K686U", "message is not SET") + } + payload, err := msg.GetContent() + if err != nil { + return err + } + req, err := http.NewRequestWithContext(requestCtx, http.MethodPost, cfg.CallURL, strings.NewReader(payload)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + logging.WithFields("instanceID", authz.GetInstance(ctx).InstanceID(), "calling_url", cfg.CallURL).Debug("security event token called") + if resp.StatusCode == http.StatusOK || + resp.StatusCode == http.StatusAccepted || + resp.StatusCode == http.StatusNoContent { + return nil + } + body, err := mapResponse(resp) + logging.WithFields("instanceID", authz.GetInstance(ctx).InstanceID(), "callURL", cfg.CallURL). + OnError(err).Debug("error mapping response") + if resp.StatusCode == http.StatusBadRequest { + logging.WithFields("instanceID", authz.GetInstance(ctx).InstanceID(), "callURL", cfg.CallURL, "status", resp.Status, "body", body). + Error("security event token didn't return a success status") + return nil + } + return zerrors.ThrowInternalf(err, "SET-DF3dq", "security event token to %s didn't return a success status: %s (%v)", cfg.CallURL, resp.Status, body) + }), nil +} + +func mapResponse(resp *http.Response) (map[string]any, error) { + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + requestError := make(map[string]any) + err = json.Unmarshal(body, &requestError) + if err != nil { + return nil, err + } + return requestError, nil +} diff --git a/internal/notification/channels/set/config.go b/internal/notification/channels/set/config.go new file mode 100644 index 0000000000..5d28b3d110 --- /dev/null +++ b/internal/notification/channels/set/config.go @@ -0,0 +1,14 @@ +package set + +import ( + "net/url" +) + +type Config struct { + CallURL string +} + +func (w *Config) Validate() error { + _, err := url.Parse(w.CallURL) + return err +} diff --git a/internal/notification/handlers/back_channel_logout.go b/internal/notification/handlers/back_channel_logout.go new file mode 100644 index 0000000000..43d98ada11 --- /dev/null +++ b/internal/notification/handlers/back_channel_logout.go @@ -0,0 +1,266 @@ +package handlers + +import ( + "context" + "errors" + "slices" + "sync" + "time" + + "github.com/zitadel/logging" + "github.com/zitadel/oidc/v3/pkg/crypto" + "github.com/zitadel/oidc/v3/pkg/oidc" + "github.com/zitadel/oidc/v3/pkg/op" + + "github.com/zitadel/zitadel/internal/api/authz" + http_utils "github.com/zitadel/zitadel/internal/api/http" + zoidc "github.com/zitadel/zitadel/internal/api/oidc" + "github.com/zitadel/zitadel/internal/command" + zcrypto "github.com/zitadel/zitadel/internal/crypto" + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/eventstore/handler/v2" + "github.com/zitadel/zitadel/internal/id" + "github.com/zitadel/zitadel/internal/notification/channels/set" + _ "github.com/zitadel/zitadel/internal/notification/statik" + "github.com/zitadel/zitadel/internal/notification/types" + "github.com/zitadel/zitadel/internal/repository/session" + "github.com/zitadel/zitadel/internal/repository/sessionlogout" + "github.com/zitadel/zitadel/internal/repository/user" + "github.com/zitadel/zitadel/internal/zerrors" +) + +const ( + BackChannelLogoutNotificationsProjectionTable = "projections.notifications_back_channel_logout" +) + +type backChannelLogoutNotifier struct { + commands *command.Commands + queries *NotificationQueries + eventstore *eventstore.Eventstore + keyEncryptionAlg zcrypto.EncryptionAlgorithm + channels types.ChannelChains + idGenerator id.Generator + tokenLifetime time.Duration +} + +func NewBackChannelLogoutNotifier( + ctx context.Context, + config handler.Config, + commands *command.Commands, + queries *NotificationQueries, + es *eventstore.Eventstore, + keyEncryptionAlg zcrypto.EncryptionAlgorithm, + channels types.ChannelChains, + tokenLifetime time.Duration, +) *handler.Handler { + return handler.NewHandler(ctx, &config, &backChannelLogoutNotifier{ + commands: commands, + queries: queries, + eventstore: es, + keyEncryptionAlg: keyEncryptionAlg, + channels: channels, + tokenLifetime: tokenLifetime, + idGenerator: id.SonyFlakeGenerator(), + }) + +} + +func (*backChannelLogoutNotifier) Name() string { + return BackChannelLogoutNotificationsProjectionTable +} + +func (u *backChannelLogoutNotifier) Reducers() []handler.AggregateReducer { + return []handler.AggregateReducer{ + { + Aggregate: session.AggregateType, + EventReducers: []handler.EventReducer{ + { + Event: session.TerminateType, + Reduce: u.reduceSessionTerminated, + }, + }, + }, { + Aggregate: user.AggregateType, + EventReducers: []handler.EventReducer{ + { + Event: user.HumanSignedOutType, + Reduce: u.reduceUserSignedOut, + }, + }, + }, + } +} + +func (u *backChannelLogoutNotifier) reduceUserSignedOut(event eventstore.Event) (*handler.Statement, error) { + e, ok := event.(*user.HumanSignedOutEvent) + if !ok { + return nil, zerrors.ThrowInvalidArgumentf(nil, "HANDL-Gr63h", "reduce.wrong.event.type %s", user.HumanSignedOutType) + } + + return handler.NewStatement(event, func(ex handler.Executer, projectionName string) error { + ctx, err := u.queries.HandlerContext(event.Aggregate()) + if err != nil { + return err + } + if !authz.GetFeatures(ctx).EnableBackChannelLogout { + return nil + } + if e.SessionID == "" { + return nil + } + return u.terminateSession(ctx, e.SessionID, e) + }), nil +} + +func (u *backChannelLogoutNotifier) reduceSessionTerminated(event eventstore.Event) (*handler.Statement, error) { + e, ok := event.(*session.TerminateEvent) + if !ok { + return nil, zerrors.ThrowInvalidArgumentf(nil, "HANDL-D6H2h", "reduce.wrong.event.type %s", session.TerminateType) + } + + return handler.NewStatement(event, func(ex handler.Executer, projectionName string) error { + ctx, err := u.queries.HandlerContext(event.Aggregate()) + if err != nil { + return err + } + if !authz.GetFeatures(ctx).EnableBackChannelLogout { + return nil + } + return u.terminateSession(ctx, e.Aggregate().ID, e) + }), nil +} + +type backChannelLogoutSession struct { + sessionID string + + // sessions contain a map of oidc session IDs and their corresponding clientID + sessions []backChannelLogoutOIDCSessions +} + +func (u *backChannelLogoutNotifier) terminateSession(ctx context.Context, id string, e eventstore.Event) error { + sessions := &backChannelLogoutSession{sessionID: id} + err := u.eventstore.FilterToQueryReducer(ctx, sessions) + if err != nil { + return err + } + + ctx, err = u.queries.Origin(ctx, e) + if err != nil { + return err + } + + getSigner := zoidc.GetSignerOnce(u.queries.GetActiveSigningWebKey, u.signingKey) + + var wg sync.WaitGroup + wg.Add(len(sessions.sessions)) + errs := make([]error, 0, len(sessions.sessions)) + for _, oidcSession := range sessions.sessions { + go func(oidcSession *backChannelLogoutOIDCSessions) { + defer wg.Done() + err := u.sendLogoutToken(ctx, oidcSession, e, getSigner) + if err != nil { + errs = append(errs, err) + return + } + err = u.commands.BackChannelLogoutSent(ctx, oidcSession.SessionID, oidcSession.OIDCSessionID, e.Aggregate().InstanceID) + if err != nil { + errs = append(errs, err) + } + }(&oidcSession) + } + wg.Wait() + return errors.Join(errs...) +} + +func (u *backChannelLogoutNotifier) signingKey(ctx context.Context) (op.SigningKey, error) { + keys, err := u.queries.ActivePrivateSigningKey(ctx, time.Now()) + if err != nil { + return nil, err + } + if len(keys.Keys) == 0 { + logging.WithFields("instanceID", authz.GetInstance(ctx).InstanceID()). + Info("There's no active signing key and automatic rotation is not supported for back channel logout." + + "Please enable the webkey management feature on your instance") + return nil, zerrors.ThrowPreconditionFailed(nil, "HANDL-DF3nf", "no active signing key") + } + return zoidc.PrivateKeyToSigningKey(zoidc.SelectSigningKey(keys.Keys), u.keyEncryptionAlg) +} + +func (u *backChannelLogoutNotifier) sendLogoutToken(ctx context.Context, oidcSession *backChannelLogoutOIDCSessions, e eventstore.Event, getSigner zoidc.SignerFunc) error { + token, err := u.logoutToken(ctx, oidcSession, getSigner) + if err != nil { + return err + } + err = types.SendSecurityTokenEvent(ctx, set.Config{CallURL: oidcSession.BackChannelLogoutURI}, u.channels, &LogoutTokenMessage{LogoutToken: token}, e).WithoutTemplate() + if err != nil { + return err + } + return nil +} + +func (u *backChannelLogoutNotifier) logoutToken(ctx context.Context, oidcSession *backChannelLogoutOIDCSessions, getSigner zoidc.SignerFunc) (string, error) { + jwtID, err := u.idGenerator.Next() + if err != nil { + return "", err + } + token := oidc.NewLogoutTokenClaims( + http_utils.DomainContext(ctx).Origin(), + oidcSession.UserID, + oidc.Audience{oidcSession.ClientID}, + time.Now().Add(u.tokenLifetime), + jwtID, + oidcSession.SessionID, + time.Second, + ) + signer, _, err := getSigner(ctx) + if err != nil { + return "", err + } + return crypto.Sign(token, signer) +} + +type LogoutTokenMessage struct { + LogoutToken string `schema:"logout_token"` +} + +type backChannelLogoutOIDCSessions struct { + SessionID string + OIDCSessionID string + UserID string + ClientID string + BackChannelLogoutURI string +} + +func (b *backChannelLogoutSession) Reduce() error { + return nil +} + +func (b *backChannelLogoutSession) AppendEvents(events ...eventstore.Event) { + for _, event := range events { + switch e := event.(type) { + case *sessionlogout.BackChannelLogoutRegisteredEvent: + b.sessions = append(b.sessions, backChannelLogoutOIDCSessions{ + SessionID: b.sessionID, + OIDCSessionID: e.OIDCSessionID, + UserID: e.UserID, + ClientID: e.ClientID, + BackChannelLogoutURI: e.BackChannelLogoutURI, + }) + case *sessionlogout.BackChannelLogoutSentEvent: + slices.DeleteFunc(b.sessions, func(session backChannelLogoutOIDCSessions) bool { + return session.OIDCSessionID == e.OIDCSessionID + }) + } + } +} + +func (b *backChannelLogoutSession) Query() *eventstore.SearchQueryBuilder { + return eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent). + AddQuery(). + AggregateTypes(sessionlogout.AggregateType). + AggregateIDs(b.sessionID). + EventTypes( + sessionlogout.BackChannelLogoutRegisteredType, + sessionlogout.BackChannelLogoutSentType). + Builder() +} diff --git a/internal/notification/handlers/ctx.go b/internal/notification/handlers/ctx.go index 9dc6d87b1b..b8fc45da68 100644 --- a/internal/notification/handlers/ctx.go +++ b/internal/notification/handlers/ctx.go @@ -13,3 +13,13 @@ func HandlerContext(event *eventstore.Aggregate) context.Context { ctx := authz.WithInstanceID(context.Background(), event.InstanceID) return authz.SetCtxData(ctx, authz.CtxData{UserID: NotifyUserID, OrgID: event.ResourceOwner}) } + +func (n *NotificationQueries) HandlerContext(event *eventstore.Aggregate) (context.Context, error) { + ctx := context.Background() + instance, err := n.InstanceByID(ctx, event.InstanceID) + if err != nil { + return nil, err + } + ctx = authz.WithInstance(ctx, instance) + return authz.SetCtxData(ctx, authz.CtxData{UserID: NotifyUserID, OrgID: event.ResourceOwner}), nil +} diff --git a/internal/notification/handlers/mock/commands.mock.go b/internal/notification/handlers/mock/commands.mock.go index 7d41c30f30..ec327de8e8 100644 --- a/internal/notification/handlers/mock/commands.mock.go +++ b/internal/notification/handlers/mock/commands.mock.go @@ -23,6 +23,7 @@ import ( type MockCommands struct { ctrl *gomock.Controller recorder *MockCommandsMockRecorder + isgomock struct{} } // MockCommandsMockRecorder is the mock recorder for MockCommands. @@ -43,197 +44,197 @@ func (m *MockCommands) EXPECT() *MockCommandsMockRecorder { } // HumanEmailVerificationCodeSent mocks base method. -func (m *MockCommands) HumanEmailVerificationCodeSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) HumanEmailVerificationCodeSent(ctx context.Context, orgID, userID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanEmailVerificationCodeSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "HumanEmailVerificationCodeSent", ctx, orgID, userID) ret0, _ := ret[0].(error) return ret0 } // HumanEmailVerificationCodeSent indicates an expected call of HumanEmailVerificationCodeSent. -func (mr *MockCommandsMockRecorder) HumanEmailVerificationCodeSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanEmailVerificationCodeSent(ctx, orgID, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanEmailVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanEmailVerificationCodeSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanEmailVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanEmailVerificationCodeSent), ctx, orgID, userID) } // HumanInitCodeSent mocks base method. -func (m *MockCommands) HumanInitCodeSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) HumanInitCodeSent(ctx context.Context, orgID, userID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanInitCodeSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "HumanInitCodeSent", ctx, orgID, userID) ret0, _ := ret[0].(error) return ret0 } // HumanInitCodeSent indicates an expected call of HumanInitCodeSent. -func (mr *MockCommandsMockRecorder) HumanInitCodeSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanInitCodeSent(ctx, orgID, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanInitCodeSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanInitCodeSent), ctx, orgID, userID) } // HumanOTPEmailCodeSent mocks base method. -func (m *MockCommands) HumanOTPEmailCodeSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) HumanOTPEmailCodeSent(ctx context.Context, userID, resourceOwner string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanOTPEmailCodeSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "HumanOTPEmailCodeSent", ctx, userID, resourceOwner) ret0, _ := ret[0].(error) return ret0 } // HumanOTPEmailCodeSent indicates an expected call of HumanOTPEmailCodeSent. -func (mr *MockCommandsMockRecorder) HumanOTPEmailCodeSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanOTPEmailCodeSent(ctx, userID, resourceOwner any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPEmailCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPEmailCodeSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPEmailCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPEmailCodeSent), ctx, userID, resourceOwner) } // HumanOTPSMSCodeSent mocks base method. -func (m *MockCommands) HumanOTPSMSCodeSent(arg0 context.Context, arg1, arg2 string, arg3 *senders.CodeGeneratorInfo) error { +func (m *MockCommands) HumanOTPSMSCodeSent(ctx context.Context, userID, resourceOwner string, generatorInfo *senders.CodeGeneratorInfo) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanOTPSMSCodeSent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "HumanOTPSMSCodeSent", ctx, userID, resourceOwner, generatorInfo) ret0, _ := ret[0].(error) return ret0 } // HumanOTPSMSCodeSent indicates an expected call of HumanOTPSMSCodeSent. -func (mr *MockCommandsMockRecorder) HumanOTPSMSCodeSent(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanOTPSMSCodeSent(ctx, userID, resourceOwner, generatorInfo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPSMSCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPSMSCodeSent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPSMSCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPSMSCodeSent), ctx, userID, resourceOwner, generatorInfo) } // HumanPasswordlessInitCodeSent mocks base method. -func (m *MockCommands) HumanPasswordlessInitCodeSent(arg0 context.Context, arg1, arg2, arg3 string) error { +func (m *MockCommands) HumanPasswordlessInitCodeSent(ctx context.Context, userID, resourceOwner, codeID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanPasswordlessInitCodeSent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "HumanPasswordlessInitCodeSent", ctx, userID, resourceOwner, codeID) ret0, _ := ret[0].(error) return ret0 } // HumanPasswordlessInitCodeSent indicates an expected call of HumanPasswordlessInitCodeSent. -func (mr *MockCommandsMockRecorder) HumanPasswordlessInitCodeSent(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanPasswordlessInitCodeSent(ctx, userID, resourceOwner, codeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPasswordlessInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPasswordlessInitCodeSent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPasswordlessInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPasswordlessInitCodeSent), ctx, userID, resourceOwner, codeID) } // HumanPhoneVerificationCodeSent mocks base method. -func (m *MockCommands) HumanPhoneVerificationCodeSent(arg0 context.Context, arg1, arg2 string, arg3 *senders.CodeGeneratorInfo) error { +func (m *MockCommands) HumanPhoneVerificationCodeSent(ctx context.Context, orgID, userID string, generatorInfo *senders.CodeGeneratorInfo) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HumanPhoneVerificationCodeSent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "HumanPhoneVerificationCodeSent", ctx, orgID, userID, generatorInfo) ret0, _ := ret[0].(error) return ret0 } // HumanPhoneVerificationCodeSent indicates an expected call of HumanPhoneVerificationCodeSent. -func (mr *MockCommandsMockRecorder) HumanPhoneVerificationCodeSent(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) HumanPhoneVerificationCodeSent(ctx, orgID, userID, generatorInfo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPhoneVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPhoneVerificationCodeSent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPhoneVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPhoneVerificationCodeSent), ctx, orgID, userID, generatorInfo) } // InviteCodeSent mocks base method. -func (m *MockCommands) InviteCodeSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) InviteCodeSent(ctx context.Context, orgID, userID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InviteCodeSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "InviteCodeSent", ctx, orgID, userID) ret0, _ := ret[0].(error) return ret0 } // InviteCodeSent indicates an expected call of InviteCodeSent. -func (mr *MockCommandsMockRecorder) InviteCodeSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) InviteCodeSent(ctx, orgID, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InviteCodeSent", reflect.TypeOf((*MockCommands)(nil).InviteCodeSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InviteCodeSent", reflect.TypeOf((*MockCommands)(nil).InviteCodeSent), ctx, orgID, userID) } // MilestonePushed mocks base method. -func (m *MockCommands) MilestonePushed(arg0 context.Context, arg1 string, arg2 milestone.Type, arg3 []string) error { +func (m *MockCommands) MilestonePushed(ctx context.Context, instanceID string, msType milestone.Type, endpoints []string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MilestonePushed", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "MilestonePushed", ctx, instanceID, msType, endpoints) ret0, _ := ret[0].(error) return ret0 } // MilestonePushed indicates an expected call of MilestonePushed. -func (mr *MockCommandsMockRecorder) MilestonePushed(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) MilestonePushed(ctx, instanceID, msType, endpoints any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MilestonePushed", reflect.TypeOf((*MockCommands)(nil).MilestonePushed), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MilestonePushed", reflect.TypeOf((*MockCommands)(nil).MilestonePushed), ctx, instanceID, msType, endpoints) } // OTPEmailSent mocks base method. -func (m *MockCommands) OTPEmailSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) OTPEmailSent(ctx context.Context, sessionID, resourceOwner string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OTPEmailSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "OTPEmailSent", ctx, sessionID, resourceOwner) ret0, _ := ret[0].(error) return ret0 } // OTPEmailSent indicates an expected call of OTPEmailSent. -func (mr *MockCommandsMockRecorder) OTPEmailSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) OTPEmailSent(ctx, sessionID, resourceOwner any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPEmailSent", reflect.TypeOf((*MockCommands)(nil).OTPEmailSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPEmailSent", reflect.TypeOf((*MockCommands)(nil).OTPEmailSent), ctx, sessionID, resourceOwner) } // OTPSMSSent mocks base method. -func (m *MockCommands) OTPSMSSent(arg0 context.Context, arg1, arg2 string, arg3 *senders.CodeGeneratorInfo) error { +func (m *MockCommands) OTPSMSSent(ctx context.Context, sessionID, resourceOwner string, generatorInfo *senders.CodeGeneratorInfo) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OTPSMSSent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "OTPSMSSent", ctx, sessionID, resourceOwner, generatorInfo) ret0, _ := ret[0].(error) return ret0 } // OTPSMSSent indicates an expected call of OTPSMSSent. -func (mr *MockCommandsMockRecorder) OTPSMSSent(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) OTPSMSSent(ctx, sessionID, resourceOwner, generatorInfo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPSMSSent", reflect.TypeOf((*MockCommands)(nil).OTPSMSSent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPSMSSent", reflect.TypeOf((*MockCommands)(nil).OTPSMSSent), ctx, sessionID, resourceOwner, generatorInfo) } // PasswordChangeSent mocks base method. -func (m *MockCommands) PasswordChangeSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) PasswordChangeSent(ctx context.Context, orgID, userID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PasswordChangeSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "PasswordChangeSent", ctx, orgID, userID) ret0, _ := ret[0].(error) return ret0 } // PasswordChangeSent indicates an expected call of PasswordChangeSent. -func (mr *MockCommandsMockRecorder) PasswordChangeSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) PasswordChangeSent(ctx, orgID, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordChangeSent", reflect.TypeOf((*MockCommands)(nil).PasswordChangeSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordChangeSent", reflect.TypeOf((*MockCommands)(nil).PasswordChangeSent), ctx, orgID, userID) } // PasswordCodeSent mocks base method. -func (m *MockCommands) PasswordCodeSent(arg0 context.Context, arg1, arg2 string, arg3 *senders.CodeGeneratorInfo) error { +func (m *MockCommands) PasswordCodeSent(ctx context.Context, orgID, userID string, generatorInfo *senders.CodeGeneratorInfo) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PasswordCodeSent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "PasswordCodeSent", ctx, orgID, userID, generatorInfo) ret0, _ := ret[0].(error) return ret0 } // PasswordCodeSent indicates an expected call of PasswordCodeSent. -func (mr *MockCommandsMockRecorder) PasswordCodeSent(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) PasswordCodeSent(ctx, orgID, userID, generatorInfo any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordCodeSent", reflect.TypeOf((*MockCommands)(nil).PasswordCodeSent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordCodeSent", reflect.TypeOf((*MockCommands)(nil).PasswordCodeSent), ctx, orgID, userID, generatorInfo) } // UsageNotificationSent mocks base method. -func (m *MockCommands) UsageNotificationSent(arg0 context.Context, arg1 *quota.NotificationDueEvent) error { +func (m *MockCommands) UsageNotificationSent(ctx context.Context, dueEvent *quota.NotificationDueEvent) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UsageNotificationSent", arg0, arg1) + ret := m.ctrl.Call(m, "UsageNotificationSent", ctx, dueEvent) ret0, _ := ret[0].(error) return ret0 } // UsageNotificationSent indicates an expected call of UsageNotificationSent. -func (mr *MockCommandsMockRecorder) UsageNotificationSent(arg0, arg1 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) UsageNotificationSent(ctx, dueEvent any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UsageNotificationSent", reflect.TypeOf((*MockCommands)(nil).UsageNotificationSent), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UsageNotificationSent", reflect.TypeOf((*MockCommands)(nil).UsageNotificationSent), ctx, dueEvent) } // UserDomainClaimedSent mocks base method. -func (m *MockCommands) UserDomainClaimedSent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockCommands) UserDomainClaimedSent(ctx context.Context, orgID, userID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UserDomainClaimedSent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "UserDomainClaimedSent", ctx, orgID, userID) ret0, _ := ret[0].(error) return ret0 } // UserDomainClaimedSent indicates an expected call of UserDomainClaimedSent. -func (mr *MockCommandsMockRecorder) UserDomainClaimedSent(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCommandsMockRecorder) UserDomainClaimedSent(ctx, orgID, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserDomainClaimedSent", reflect.TypeOf((*MockCommands)(nil).UserDomainClaimedSent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserDomainClaimedSent", reflect.TypeOf((*MockCommands)(nil).UserDomainClaimedSent), ctx, orgID, userID) } diff --git a/internal/notification/handlers/mock/queries.mock.go b/internal/notification/handlers/mock/queries.mock.go index 48d7ec21ec..5669444d4f 100644 --- a/internal/notification/handlers/mock/queries.mock.go +++ b/internal/notification/handlers/mock/queries.mock.go @@ -12,7 +12,10 @@ package mock import ( context "context" reflect "reflect" + time "time" + jose "github.com/go-jose/go-jose/v4" + authz "github.com/zitadel/zitadel/internal/api/authz" domain "github.com/zitadel/zitadel/internal/domain" query "github.com/zitadel/zitadel/internal/query" gomock "go.uber.org/mock/gomock" @@ -23,6 +26,7 @@ import ( type MockQueries struct { ctrl *gomock.Controller recorder *MockQueriesMockRecorder + isgomock struct{} } // MockQueriesMockRecorder is the mock recorder for MockQueries. @@ -43,195 +47,240 @@ func (m *MockQueries) EXPECT() *MockQueriesMockRecorder { } // ActiveLabelPolicyByOrg mocks base method. -func (m *MockQueries) ActiveLabelPolicyByOrg(arg0 context.Context, arg1 string, arg2 bool) (*query.LabelPolicy, error) { +func (m *MockQueries) ActiveLabelPolicyByOrg(ctx context.Context, orgID string, withOwnerRemoved bool) (*query.LabelPolicy, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveLabelPolicyByOrg", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "ActiveLabelPolicyByOrg", ctx, orgID, withOwnerRemoved) ret0, _ := ret[0].(*query.LabelPolicy) ret1, _ := ret[1].(error) return ret0, ret1 } // ActiveLabelPolicyByOrg indicates an expected call of ActiveLabelPolicyByOrg. -func (mr *MockQueriesMockRecorder) ActiveLabelPolicyByOrg(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) ActiveLabelPolicyByOrg(ctx, orgID, withOwnerRemoved any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveLabelPolicyByOrg", reflect.TypeOf((*MockQueries)(nil).ActiveLabelPolicyByOrg), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveLabelPolicyByOrg", reflect.TypeOf((*MockQueries)(nil).ActiveLabelPolicyByOrg), ctx, orgID, withOwnerRemoved) +} + +// ActivePrivateSigningKey mocks base method. +func (m *MockQueries) ActivePrivateSigningKey(ctx context.Context, t time.Time) (*query.PrivateKeys, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ActivePrivateSigningKey", ctx, t) + ret0, _ := ret[0].(*query.PrivateKeys) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ActivePrivateSigningKey indicates an expected call of ActivePrivateSigningKey. +func (mr *MockQueriesMockRecorder) ActivePrivateSigningKey(ctx, t any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActivePrivateSigningKey", reflect.TypeOf((*MockQueries)(nil).ActivePrivateSigningKey), ctx, t) } // CustomTextListByTemplate mocks base method. -func (m *MockQueries) CustomTextListByTemplate(arg0 context.Context, arg1, arg2 string, arg3 bool) (*query.CustomTexts, error) { +func (m *MockQueries) CustomTextListByTemplate(ctx context.Context, aggregateID, template string, withOwnerRemoved bool) (*query.CustomTexts, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CustomTextListByTemplate", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "CustomTextListByTemplate", ctx, aggregateID, template, withOwnerRemoved) ret0, _ := ret[0].(*query.CustomTexts) ret1, _ := ret[1].(error) return ret0, ret1 } // CustomTextListByTemplate indicates an expected call of CustomTextListByTemplate. -func (mr *MockQueriesMockRecorder) CustomTextListByTemplate(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) CustomTextListByTemplate(ctx, aggregateID, template, withOwnerRemoved any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CustomTextListByTemplate", reflect.TypeOf((*MockQueries)(nil).CustomTextListByTemplate), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CustomTextListByTemplate", reflect.TypeOf((*MockQueries)(nil).CustomTextListByTemplate), ctx, aggregateID, template, withOwnerRemoved) +} + +// GetActiveSigningWebKey mocks base method. +func (m *MockQueries) GetActiveSigningWebKey(ctx context.Context) (*jose.JSONWebKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveSigningWebKey", ctx) + ret0, _ := ret[0].(*jose.JSONWebKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveSigningWebKey indicates an expected call of GetActiveSigningWebKey. +func (mr *MockQueriesMockRecorder) GetActiveSigningWebKey(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveSigningWebKey", reflect.TypeOf((*MockQueries)(nil).GetActiveSigningWebKey), ctx) } // GetDefaultLanguage mocks base method. -func (m *MockQueries) GetDefaultLanguage(arg0 context.Context) language.Tag { +func (m *MockQueries) GetDefaultLanguage(ctx context.Context) language.Tag { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDefaultLanguage", arg0) + ret := m.ctrl.Call(m, "GetDefaultLanguage", ctx) ret0, _ := ret[0].(language.Tag) return ret0 } // GetDefaultLanguage indicates an expected call of GetDefaultLanguage. -func (mr *MockQueriesMockRecorder) GetDefaultLanguage(arg0 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) GetDefaultLanguage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultLanguage", reflect.TypeOf((*MockQueries)(nil).GetDefaultLanguage), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultLanguage", reflect.TypeOf((*MockQueries)(nil).GetDefaultLanguage), ctx) } // GetInstanceRestrictions mocks base method. -func (m *MockQueries) GetInstanceRestrictions(arg0 context.Context) (query.Restrictions, error) { +func (m *MockQueries) GetInstanceRestrictions(ctx context.Context) (query.Restrictions, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetInstanceRestrictions", arg0) + ret := m.ctrl.Call(m, "GetInstanceRestrictions", ctx) ret0, _ := ret[0].(query.Restrictions) ret1, _ := ret[1].(error) return ret0, ret1 } // GetInstanceRestrictions indicates an expected call of GetInstanceRestrictions. -func (mr *MockQueriesMockRecorder) GetInstanceRestrictions(arg0 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) GetInstanceRestrictions(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceRestrictions", reflect.TypeOf((*MockQueries)(nil).GetInstanceRestrictions), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceRestrictions", reflect.TypeOf((*MockQueries)(nil).GetInstanceRestrictions), ctx) } // GetNotifyUserByID mocks base method. -func (m *MockQueries) GetNotifyUserByID(arg0 context.Context, arg1 bool, arg2 string) (*query.NotifyUser, error) { +func (m *MockQueries) GetNotifyUserByID(ctx context.Context, shouldTriggered bool, userID string) (*query.NotifyUser, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNotifyUserByID", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "GetNotifyUserByID", ctx, shouldTriggered, userID) ret0, _ := ret[0].(*query.NotifyUser) ret1, _ := ret[1].(error) return ret0, ret1 } // GetNotifyUserByID indicates an expected call of GetNotifyUserByID. -func (mr *MockQueriesMockRecorder) GetNotifyUserByID(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) GetNotifyUserByID(ctx, shouldTriggered, userID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotifyUserByID", reflect.TypeOf((*MockQueries)(nil).GetNotifyUserByID), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotifyUserByID", reflect.TypeOf((*MockQueries)(nil).GetNotifyUserByID), ctx, shouldTriggered, userID) +} + +// InstanceByID mocks base method. +func (m *MockQueries) InstanceByID(ctx context.Context, id string) (authz.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InstanceByID", ctx, id) + ret0, _ := ret[0].(authz.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InstanceByID indicates an expected call of InstanceByID. +func (mr *MockQueriesMockRecorder) InstanceByID(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceByID", reflect.TypeOf((*MockQueries)(nil).InstanceByID), ctx, id) } // MailTemplateByOrg mocks base method. -func (m *MockQueries) MailTemplateByOrg(arg0 context.Context, arg1 string, arg2 bool) (*query.MailTemplate, error) { +func (m *MockQueries) MailTemplateByOrg(ctx context.Context, orgID string, withOwnerRemoved bool) (*query.MailTemplate, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MailTemplateByOrg", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "MailTemplateByOrg", ctx, orgID, withOwnerRemoved) ret0, _ := ret[0].(*query.MailTemplate) ret1, _ := ret[1].(error) return ret0, ret1 } // MailTemplateByOrg indicates an expected call of MailTemplateByOrg. -func (mr *MockQueriesMockRecorder) MailTemplateByOrg(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) MailTemplateByOrg(ctx, orgID, withOwnerRemoved any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MailTemplateByOrg", reflect.TypeOf((*MockQueries)(nil).MailTemplateByOrg), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MailTemplateByOrg", reflect.TypeOf((*MockQueries)(nil).MailTemplateByOrg), ctx, orgID, withOwnerRemoved) } // NotificationPolicyByOrg mocks base method. -func (m *MockQueries) NotificationPolicyByOrg(arg0 context.Context, arg1 bool, arg2 string, arg3 bool) (*query.NotificationPolicy, error) { +func (m *MockQueries) NotificationPolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string, withOwnerRemoved bool) (*query.NotificationPolicy, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NotificationPolicyByOrg", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "NotificationPolicyByOrg", ctx, shouldTriggerBulk, orgID, withOwnerRemoved) ret0, _ := ret[0].(*query.NotificationPolicy) ret1, _ := ret[1].(error) return ret0, ret1 } // NotificationPolicyByOrg indicates an expected call of NotificationPolicyByOrg. -func (mr *MockQueriesMockRecorder) NotificationPolicyByOrg(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) NotificationPolicyByOrg(ctx, shouldTriggerBulk, orgID, withOwnerRemoved any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationPolicyByOrg", reflect.TypeOf((*MockQueries)(nil).NotificationPolicyByOrg), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationPolicyByOrg", reflect.TypeOf((*MockQueries)(nil).NotificationPolicyByOrg), ctx, shouldTriggerBulk, orgID, withOwnerRemoved) } // NotificationProviderByIDAndType mocks base method. -func (m *MockQueries) NotificationProviderByIDAndType(arg0 context.Context, arg1 string, arg2 domain.NotificationProviderType) (*query.DebugNotificationProvider, error) { +func (m *MockQueries) NotificationProviderByIDAndType(ctx context.Context, aggID string, providerType domain.NotificationProviderType) (*query.DebugNotificationProvider, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NotificationProviderByIDAndType", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "NotificationProviderByIDAndType", ctx, aggID, providerType) ret0, _ := ret[0].(*query.DebugNotificationProvider) ret1, _ := ret[1].(error) return ret0, ret1 } // NotificationProviderByIDAndType indicates an expected call of NotificationProviderByIDAndType. -func (mr *MockQueriesMockRecorder) NotificationProviderByIDAndType(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) NotificationProviderByIDAndType(ctx, aggID, providerType any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationProviderByIDAndType", reflect.TypeOf((*MockQueries)(nil).NotificationProviderByIDAndType), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationProviderByIDAndType", reflect.TypeOf((*MockQueries)(nil).NotificationProviderByIDAndType), ctx, aggID, providerType) } // SMSProviderConfigActive mocks base method. -func (m *MockQueries) SMSProviderConfigActive(arg0 context.Context, arg1 string) (*query.SMSConfig, error) { +func (m *MockQueries) SMSProviderConfigActive(ctx context.Context, resourceOwner string) (*query.SMSConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SMSProviderConfigActive", arg0, arg1) + ret := m.ctrl.Call(m, "SMSProviderConfigActive", ctx, resourceOwner) ret0, _ := ret[0].(*query.SMSConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // SMSProviderConfigActive indicates an expected call of SMSProviderConfigActive. -func (mr *MockQueriesMockRecorder) SMSProviderConfigActive(arg0, arg1 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) SMSProviderConfigActive(ctx, resourceOwner any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SMSProviderConfigActive", reflect.TypeOf((*MockQueries)(nil).SMSProviderConfigActive), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SMSProviderConfigActive", reflect.TypeOf((*MockQueries)(nil).SMSProviderConfigActive), ctx, resourceOwner) } // SMTPConfigActive mocks base method. -func (m *MockQueries) SMTPConfigActive(arg0 context.Context, arg1 string) (*query.SMTPConfig, error) { +func (m *MockQueries) SMTPConfigActive(ctx context.Context, resourceOwner string) (*query.SMTPConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SMTPConfigActive", arg0, arg1) + ret := m.ctrl.Call(m, "SMTPConfigActive", ctx, resourceOwner) ret0, _ := ret[0].(*query.SMTPConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // SMTPConfigActive indicates an expected call of SMTPConfigActive. -func (mr *MockQueriesMockRecorder) SMTPConfigActive(arg0, arg1 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) SMTPConfigActive(ctx, resourceOwner any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SMTPConfigActive", reflect.TypeOf((*MockQueries)(nil).SMTPConfigActive), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SMTPConfigActive", reflect.TypeOf((*MockQueries)(nil).SMTPConfigActive), ctx, resourceOwner) } // SearchInstanceDomains mocks base method. -func (m *MockQueries) SearchInstanceDomains(arg0 context.Context, arg1 *query.InstanceDomainSearchQueries) (*query.InstanceDomains, error) { +func (m *MockQueries) SearchInstanceDomains(ctx context.Context, queries *query.InstanceDomainSearchQueries) (*query.InstanceDomains, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SearchInstanceDomains", arg0, arg1) + ret := m.ctrl.Call(m, "SearchInstanceDomains", ctx, queries) ret0, _ := ret[0].(*query.InstanceDomains) ret1, _ := ret[1].(error) return ret0, ret1 } // SearchInstanceDomains indicates an expected call of SearchInstanceDomains. -func (mr *MockQueriesMockRecorder) SearchInstanceDomains(arg0, arg1 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) SearchInstanceDomains(ctx, queries any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchInstanceDomains", reflect.TypeOf((*MockQueries)(nil).SearchInstanceDomains), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchInstanceDomains", reflect.TypeOf((*MockQueries)(nil).SearchInstanceDomains), ctx, queries) } // SearchMilestones mocks base method. -func (m *MockQueries) SearchMilestones(arg0 context.Context, arg1 []string, arg2 *query.MilestonesSearchQueries) (*query.Milestones, error) { +func (m *MockQueries) SearchMilestones(ctx context.Context, instanceIDs []string, queries *query.MilestonesSearchQueries) (*query.Milestones, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SearchMilestones", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "SearchMilestones", ctx, instanceIDs, queries) ret0, _ := ret[0].(*query.Milestones) ret1, _ := ret[1].(error) return ret0, ret1 } // SearchMilestones indicates an expected call of SearchMilestones. -func (mr *MockQueriesMockRecorder) SearchMilestones(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) SearchMilestones(ctx, instanceIDs, queries any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchMilestones", reflect.TypeOf((*MockQueries)(nil).SearchMilestones), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchMilestones", reflect.TypeOf((*MockQueries)(nil).SearchMilestones), ctx, instanceIDs, queries) } // SessionByID mocks base method. -func (m *MockQueries) SessionByID(arg0 context.Context, arg1 bool, arg2, arg3 string) (*query.Session, error) { +func (m *MockQueries) SessionByID(ctx context.Context, shouldTriggerBulk bool, id, sessionToken string) (*query.Session, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SessionByID", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SessionByID", ctx, shouldTriggerBulk, id, sessionToken) ret0, _ := ret[0].(*query.Session) ret1, _ := ret[1].(error) return ret0, ret1 } // SessionByID indicates an expected call of SessionByID. -func (mr *MockQueriesMockRecorder) SessionByID(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockQueriesMockRecorder) SessionByID(ctx, shouldTriggerBulk, id, sessionToken any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SessionByID", reflect.TypeOf((*MockQueries)(nil).SessionByID), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SessionByID", reflect.TypeOf((*MockQueries)(nil).SessionByID), ctx, shouldTriggerBulk, id, sessionToken) } diff --git a/internal/notification/handlers/queries.go b/internal/notification/handlers/queries.go index 49cffc5e49..1c00460531 100644 --- a/internal/notification/handlers/queries.go +++ b/internal/notification/handlers/queries.go @@ -2,9 +2,12 @@ package handlers import ( "context" + "time" + "github.com/go-jose/go-jose/v4" "golang.org/x/text/language" + "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/eventstore" @@ -25,6 +28,9 @@ type Queries interface { SMTPConfigActive(ctx context.Context, resourceOwner string) (*query.SMTPConfig, error) GetDefaultLanguage(ctx context.Context) language.Tag GetInstanceRestrictions(ctx context.Context) (restrictions query.Restrictions, err error) + InstanceByID(ctx context.Context, id string) (instance authz.Instance, err error) + GetActiveSigningWebKey(ctx context.Context) (*jose.JSONWebKey, error) + ActivePrivateSigningKey(ctx context.Context, t time.Time) (keys *query.PrivateKeys, err error) } type NotificationQueries struct { diff --git a/internal/notification/handlers/user_notifier_test.go b/internal/notification/handlers/user_notifier_test.go index 991eb0531d..9692832787 100644 --- a/internal/notification/handlers/user_notifier_test.go +++ b/internal/notification/handlers/user_notifier_test.go @@ -19,6 +19,7 @@ import ( es_repo_mock "github.com/zitadel/zitadel/internal/eventstore/repository/mock" "github.com/zitadel/zitadel/internal/notification/channels/email" channel_mock "github.com/zitadel/zitadel/internal/notification/channels/mock" + "github.com/zitadel/zitadel/internal/notification/channels/set" "github.com/zitadel/zitadel/internal/notification/channels/sms" "github.com/zitadel/zitadel/internal/notification/channels/smtp" "github.com/zitadel/zitadel/internal/notification/channels/twilio" @@ -1663,6 +1664,10 @@ func (c *channels) Webhook(context.Context, webhook.Config) (*senders.Chain, err return &c.Chain, nil } +func (c *channels) SecurityTokenEvent(context.Context, set.Config) (*senders.Chain, error) { + return &c.Chain, nil +} + func expectTemplateQueries(queries *mock.MockQueries, template string) { queries.EXPECT().GetInstanceRestrictions(gomock.Any()).Return(query.Restrictions{ AllowedLanguages: []language.Tag{language.English}, diff --git a/internal/notification/messages/form.go b/internal/notification/messages/form.go new file mode 100644 index 0000000000..5e9a97ca68 --- /dev/null +++ b/internal/notification/messages/form.go @@ -0,0 +1,27 @@ +package messages + +import ( + "net/url" + + "github.com/zitadel/schema" + + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/notification/channels" +) + +var _ channels.Message = (*Form)(nil) + +type Form struct { + Serializable any + TriggeringEvent eventstore.Event +} + +func (msg *Form) GetContent() (string, error) { + values := make(url.Values) + err := schema.NewEncoder().Encode(msg.Serializable, values) + return values.Encode(), err +} + +func (msg *Form) GetTriggeringEvent() eventstore.Event { + return msg.TriggeringEvent +} diff --git a/internal/notification/projections.go b/internal/notification/projections.go index 46434536c2..2be95f1490 100644 --- a/internal/notification/projections.go +++ b/internal/notification/projections.go @@ -2,6 +2,7 @@ package notification import ( "context" + "time" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/crypto" @@ -17,7 +18,7 @@ var projections []*handler.Handler func Register( ctx context.Context, - userHandlerCustomConfig, quotaHandlerCustomConfig, telemetryHandlerCustomConfig projection.CustomConfig, + userHandlerCustomConfig, quotaHandlerCustomConfig, telemetryHandlerCustomConfig, backChannelLogoutHandlerCustomConfig projection.CustomConfig, telemetryCfg handlers.TelemetryPusherConfig, externalDomain string, externalPort uint16, @@ -25,14 +26,24 @@ func Register( commands *command.Commands, queries *query.Queries, es *eventstore.Eventstore, - otpEmailTmpl string, - fileSystemPath string, - userEncryption, smtpEncryption, smsEncryption crypto.EncryptionAlgorithm, + otpEmailTmpl, fileSystemPath string, + userEncryption, smtpEncryption, smsEncryption, keysEncryptionAlg crypto.EncryptionAlgorithm, + tokenLifetime time.Duration, ) { q := handlers.NewNotificationQueries(queries, es, externalDomain, externalPort, externalSecure, fileSystemPath, userEncryption, smtpEncryption, smsEncryption) c := newChannels(q) projections = append(projections, handlers.NewUserNotifier(ctx, projection.ApplyCustomConfig(userHandlerCustomConfig), commands, q, c, otpEmailTmpl)) projections = append(projections, handlers.NewQuotaNotifier(ctx, projection.ApplyCustomConfig(quotaHandlerCustomConfig), commands, q, c)) + projections = append(projections, handlers.NewBackChannelLogoutNotifier( + ctx, + projection.ApplyCustomConfig(backChannelLogoutHandlerCustomConfig), + commands, + q, + es, + keysEncryptionAlg, + c, + tokenLifetime, + )) if telemetryCfg.Enabled { projections = append(projections, handlers.NewTelemetryPusher(ctx, telemetryCfg, projection.ApplyCustomConfig(telemetryHandlerCustomConfig), commands, q, c)) } diff --git a/internal/notification/senders/security_event_token.go b/internal/notification/senders/security_event_token.go new file mode 100644 index 0000000000..8fb21a6557 --- /dev/null +++ b/internal/notification/senders/security_event_token.go @@ -0,0 +1,49 @@ +package senders + +import ( + "context" + + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/notification/channels" + "github.com/zitadel/zitadel/internal/notification/channels/fs" + "github.com/zitadel/zitadel/internal/notification/channels/instrumenting" + "github.com/zitadel/zitadel/internal/notification/channels/log" + "github.com/zitadel/zitadel/internal/notification/channels/set" +) + +const setSpanName = "security_event_token.NotificationChannel" + +func SecurityEventTokenChannels( + ctx context.Context, + setConfig set.Config, + getFileSystemProvider func(ctx context.Context) (*fs.Config, error), + getLogProvider func(ctx context.Context) (*log.Config, error), + successMetricName, + failureMetricName string, +) (*Chain, error) { + if err := setConfig.Validate(); err != nil { + return nil, err + } + channels := make([]channels.NotificationChannel, 0, 3) + setChannel, err := set.InitChannel(ctx, setConfig) + logging.WithFields( + "instance", authz.GetInstance(ctx).InstanceID(), + "callurl", setConfig.CallURL, + ).OnError(err).Debug("initializing SET channel failed") + if err == nil { + channels = append( + channels, + instrumenting.Wrap( + ctx, + setChannel, + setSpanName, + successMetricName, + failureMetricName, + ), + ) + } + channels = append(channels, debugChannels(ctx, getFileSystemProvider, getLogProvider)...) + return ChainChannels(channels...), nil +} diff --git a/internal/notification/types/notification.go b/internal/notification/types/notification.go index 49a437ff18..61c4cf70de 100644 --- a/internal/notification/types/notification.go +++ b/internal/notification/types/notification.go @@ -8,6 +8,7 @@ import ( "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/i18n" "github.com/zitadel/zitadel/internal/notification/channels/email" + "github.com/zitadel/zitadel/internal/notification/channels/set" "github.com/zitadel/zitadel/internal/notification/channels/sms" "github.com/zitadel/zitadel/internal/notification/channels/webhook" "github.com/zitadel/zitadel/internal/notification/senders" @@ -26,6 +27,7 @@ type ChannelChains interface { Email(context.Context) (*senders.Chain, *email.Config, error) SMS(context.Context) (*senders.Chain, *sms.Config, error) Webhook(context.Context, webhook.Config) (*senders.Chain, error) + SecurityTokenEvent(context.Context, set.Config) (*senders.Chain, error) } func SendEmail( @@ -127,3 +129,21 @@ func SendJSON( ) } } + +func SendSecurityTokenEvent( + ctx context.Context, + setConfig set.Config, + channels ChannelChains, + token any, + triggeringEvent eventstore.Event, +) Notify { + return func(_ string, _ map[string]interface{}, _ string, _ bool) error { + return handleSecurityTokenEvent( + ctx, + setConfig, + channels, + token, + triggeringEvent, + ) + } +} diff --git a/internal/notification/types/security_token_event.go b/internal/notification/types/security_token_event.go new file mode 100644 index 0000000000..d8a1d26006 --- /dev/null +++ b/internal/notification/types/security_token_event.go @@ -0,0 +1,27 @@ +package types + +import ( + "context" + + "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/notification/channels/set" + "github.com/zitadel/zitadel/internal/notification/messages" +) + +func handleSecurityTokenEvent( + ctx context.Context, + setConfig set.Config, + channels ChannelChains, + token any, + triggeringEvent eventstore.Event, +) error { + message := &messages.Form{ + Serializable: token, + TriggeringEvent: triggeringEvent, + } + setChannels, err := channels.SecurityTokenEvent(ctx, setConfig) + if err != nil { + return err + } + return setChannels.HandleMessage(message) +} diff --git a/internal/query/app.go b/internal/query/app.go index b94cb9cdaf..fc0101bf06 100644 --- a/internal/query/app.go +++ b/internal/query/app.go @@ -59,6 +59,7 @@ type OIDCApp struct { AdditionalOrigins database.TextArray[string] AllowedOrigins database.TextArray[string] SkipNativeAppSuccessPage bool + BackChannelLogoutURI string } type SAMLApp struct { @@ -243,6 +244,10 @@ var ( name: projection.AppOIDCConfigColumnSkipNativeAppSuccessPage, table: appOIDCConfigsTable, } + AppOIDCConfigColumnBackChannelLogoutURI = Column{ + name: projection.AppOIDCConfigColumnBackChannelLogoutURI, + table: appOIDCConfigsTable, + } ) func (q *Queries) AppByProjectAndAppID(ctx context.Context, shouldTriggerBulk bool, projectID, appID string) (app *App, err error) { @@ -536,6 +541,7 @@ func prepareAppQuery(ctx context.Context, db prepareDatabase, activeOnly bool) ( AppOIDCConfigColumnClockSkew.identifier(), AppOIDCConfigColumnAdditionalOrigins.identifier(), AppOIDCConfigColumnSkipNativeAppSuccessPage.identifier(), + AppOIDCConfigColumnBackChannelLogoutURI.identifier(), AppSAMLConfigColumnAppID.identifier(), AppSAMLConfigColumnEntityID.identifier(), @@ -600,6 +606,7 @@ func scanApp(row *sql.Row) (*App, error) { &oidcConfig.clockSkew, &oidcConfig.additionalOrigins, &oidcConfig.skipNativeAppSuccessPage, + &oidcConfig.backChannelLogoutURI, &samlConfig.appID, &samlConfig.entityID, @@ -649,6 +656,7 @@ func prepareOIDCAppQuery() (sq.SelectBuilder, func(*sql.Row) (*App, error)) { AppOIDCConfigColumnClockSkew.identifier(), AppOIDCConfigColumnAdditionalOrigins.identifier(), AppOIDCConfigColumnSkipNativeAppSuccessPage.identifier(), + AppOIDCConfigColumnBackChannelLogoutURI.identifier(), ).From(appsTable.identifier()). Join(join(AppOIDCConfigColumnAppID, AppColumnID)). PlaceholderFormat(sq.Dollar), func(row *sql.Row) (*App, error) { @@ -685,6 +693,7 @@ func prepareOIDCAppQuery() (sq.SelectBuilder, func(*sql.Row) (*App, error)) { &oidcConfig.clockSkew, &oidcConfig.additionalOrigins, &oidcConfig.skipNativeAppSuccessPage, + &oidcConfig.backChannelLogoutURI, ) if err != nil { @@ -896,6 +905,7 @@ func prepareAppsQuery(ctx context.Context, db prepareDatabase) (sq.SelectBuilder AppOIDCConfigColumnClockSkew.identifier(), AppOIDCConfigColumnAdditionalOrigins.identifier(), AppOIDCConfigColumnSkipNativeAppSuccessPage.identifier(), + AppOIDCConfigColumnBackChannelLogoutURI.identifier(), AppSAMLConfigColumnAppID.identifier(), AppSAMLConfigColumnEntityID.identifier(), @@ -948,6 +958,7 @@ func prepareAppsQuery(ctx context.Context, db prepareDatabase) (sq.SelectBuilder &oidcConfig.clockSkew, &oidcConfig.additionalOrigins, &oidcConfig.skipNativeAppSuccessPage, + &oidcConfig.backChannelLogoutURI, &samlConfig.appID, &samlConfig.entityID, @@ -1020,6 +1031,7 @@ type sqlOIDCConfig struct { responseTypes database.NumberArray[domain.OIDCResponseType] grantTypes database.NumberArray[domain.OIDCGrantType] skipNativeAppSuccessPage sql.NullBool + backChannelLogoutURI sql.NullString } func (c sqlOIDCConfig) set(app *App) { @@ -1043,6 +1055,7 @@ func (c sqlOIDCConfig) set(app *App) { ResponseTypes: c.responseTypes, GrantTypes: c.grantTypes, SkipNativeAppSuccessPage: c.skipNativeAppSuccessPage.Bool, + BackChannelLogoutURI: c.backChannelLogoutURI.String, } compliance := domain.GetOIDCCompliance(app.OIDCConfig.Version, app.OIDCConfig.AppType, app.OIDCConfig.GrantTypes, app.OIDCConfig.ResponseTypes, app.OIDCConfig.AuthMethodType, app.OIDCConfig.RedirectURIs) app.OIDCConfig.ComplianceProblems = compliance.Problems diff --git a/internal/query/app_test.go b/internal/query/app_test.go index 9a9c613868..990ff943f0 100644 --- a/internal/query/app_test.go +++ b/internal/query/app_test.go @@ -48,6 +48,7 @@ var ( ` projections.apps7_oidc_configs.clock_skew,` + ` projections.apps7_oidc_configs.additional_origins,` + ` projections.apps7_oidc_configs.skip_native_app_success_page,` + + ` projections.apps7_oidc_configs.back_channel_logout_uri,` + //saml config ` projections.apps7_saml_configs.app_id,` + ` projections.apps7_saml_configs.entity_id,` + @@ -91,6 +92,7 @@ var ( ` projections.apps7_oidc_configs.clock_skew,` + ` projections.apps7_oidc_configs.additional_origins,` + ` projections.apps7_oidc_configs.skip_native_app_success_page,` + + ` projections.apps7_oidc_configs.back_channel_logout_uri,` + //saml config ` projections.apps7_saml_configs.app_id,` + ` projections.apps7_saml_configs.entity_id,` + @@ -163,6 +165,7 @@ var ( "clock_skew", "additional_origins", "skip_native_app_success_page", + "back_channel_logout_uri", //saml config "app_id", "entity_id", @@ -234,6 +237,7 @@ func Test_AppsPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config nil, nil, @@ -300,6 +304,7 @@ func Test_AppsPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config nil, nil, @@ -369,6 +374,7 @@ func Test_AppsPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config "app-id", "https://test.com/saml/metadata", @@ -440,6 +446,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -482,6 +489,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -526,6 +534,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -568,6 +577,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -612,6 +622,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -654,6 +665,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -698,6 +710,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -740,6 +753,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -784,6 +798,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -826,6 +841,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -870,6 +886,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, true, + "back.channel.logout.ch", // saml config nil, nil, @@ -912,6 +929,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: true, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -956,6 +974,7 @@ func Test_AppsPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -993,6 +1012,7 @@ func Test_AppsPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config nil, nil, @@ -1030,6 +1050,7 @@ func Test_AppsPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config "saml-app-id", "https://test.com/saml/metadata", @@ -1072,6 +1093,7 @@ func Test_AppsPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, { @@ -1205,6 +1227,7 @@ func Test_AppPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config nil, nil, @@ -1265,6 +1288,7 @@ func Test_AppPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config nil, nil, @@ -1330,6 +1354,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1367,6 +1392,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -1411,6 +1437,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1448,6 +1475,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -1492,6 +1520,7 @@ func Test_AppPrepare(t *testing.T) { nil, nil, nil, + nil, // saml config "app-id", "https://test.com/saml/metadata", @@ -1558,6 +1587,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1595,6 +1625,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -1639,6 +1670,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1676,6 +1708,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -1720,6 +1753,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1757,6 +1791,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, @@ -1801,6 +1836,7 @@ func Test_AppPrepare(t *testing.T) { 1 * time.Second, database.TextArray[string]{"additional.origin"}, false, + "back.channel.logout.ch", // saml config nil, nil, @@ -1838,6 +1874,7 @@ func Test_AppPrepare(t *testing.T) { ComplianceProblems: nil, AllowedOrigins: database.TextArray[string]{"https://redirect.to", "additional.origin"}, SkipNativeAppSuccessPage: false, + BackChannelLogoutURI: "back.channel.logout.ch", }, }, }, diff --git a/internal/query/instance_features.go b/internal/query/instance_features.go index 1616d9b366..fed6d851df 100644 --- a/internal/query/instance_features.go +++ b/internal/query/instance_features.go @@ -20,6 +20,7 @@ type InstanceFeatures struct { DebugOIDCParentError FeatureSource[bool] OIDCSingleV1SessionTermination FeatureSource[bool] DisableUserTokenEvent FeatureSource[bool] + EnableBackChannelLogout FeatureSource[bool] } func (q *Queries) GetInstanceFeatures(ctx context.Context, cascade bool) (_ *InstanceFeatures, err error) { diff --git a/internal/query/instance_features_model.go b/internal/query/instance_features_model.go index 80515b4773..5192f7dfc5 100644 --- a/internal/query/instance_features_model.go +++ b/internal/query/instance_features_model.go @@ -71,6 +71,7 @@ func (m *InstanceFeaturesReadModel) Query() *eventstore.SearchQueryBuilder { feature_v2.InstanceDebugOIDCParentErrorEventType, feature_v2.InstanceOIDCSingleV1SessionTerminationEventType, feature_v2.InstanceDisableUserTokenEvent, + feature_v2.InstanceEnableBackChannelLogout, ). Builder().ResourceOwner(m.ResourceOwner) } @@ -96,6 +97,7 @@ func (m *InstanceFeaturesReadModel) populateFromSystem() bool { m.instance.ImprovedPerformance = m.system.ImprovedPerformance m.instance.OIDCSingleV1SessionTermination = m.system.OIDCSingleV1SessionTermination m.instance.DisableUserTokenEvent = m.system.DisableUserTokenEvent + m.instance.EnableBackChannelLogout = m.system.EnableBackChannelLogout return true } @@ -129,6 +131,8 @@ func reduceInstanceFeatureSet[T any](features *InstanceFeatures, event *feature_ features.OIDCSingleV1SessionTermination.set(level, event.Value) case feature.KeyDisableUserTokenEvent: features.DisableUserTokenEvent.set(level, event.Value) + case feature.KeyEnableBackChannelLogout: + features.EnableBackChannelLogout.set(level, event.Value) } return nil } diff --git a/internal/query/oidc_client.go b/internal/query/oidc_client.go index 89d74d4ff8..8790c9737a 100644 --- a/internal/query/oidc_client.go +++ b/internal/query/oidc_client.go @@ -21,6 +21,7 @@ type OIDCClient struct { AppID string `json:"app_id,omitempty"` State domain.AppState `json:"state,omitempty"` ClientID string `json:"client_id,omitempty"` + BackChannelLogoutURI string `json:"back_channel_logout_uri,omitempty"` HashedSecret string `json:"client_secret,omitempty"` RedirectURIs []string `json:"redirect_uris,omitempty"` ResponseTypes []domain.OIDCResponseType `json:"response_types,omitempty"` diff --git a/internal/query/oidc_client_by_id.sql b/internal/query/oidc_client_by_id.sql index ef471387b3..201705c6bf 100644 --- a/internal/query/oidc_client_by_id.sql +++ b/internal/query/oidc_client_by_id.sql @@ -1,7 +1,7 @@ with client as ( select c.instance_id, - c.app_id, a.state, c.client_id, c.client_secret, c.redirect_uris, c.response_types, c.grant_types, - c.application_type, c.auth_method_type, c.post_logout_redirect_uris, c.is_dev_mode, + c.app_id, a.state, c.client_id, c.back_channel_logout_uri, c.client_secret, c.redirect_uris, c.response_types, + c.grant_types, c.application_type, c.auth_method_type, c.post_logout_redirect_uris, c.is_dev_mode, c.access_token_type, c.access_token_role_assertion, c.id_token_role_assertion, c.id_token_userinfo_assertion, c.clock_skew, c.additional_origins, a.project_id, p.project_role_assertion from projections.apps7_oidc_configs c diff --git a/internal/query/projection/app.go b/internal/query/projection/app.go index a162548dd4..7b810c3a97 100644 --- a/internal/query/projection/app.go +++ b/internal/query/projection/app.go @@ -58,6 +58,7 @@ const ( AppOIDCConfigColumnClockSkew = "clock_skew" AppOIDCConfigColumnAdditionalOrigins = "additional_origins" AppOIDCConfigColumnSkipNativeAppSuccessPage = "skip_native_app_success_page" + AppOIDCConfigColumnBackChannelLogoutURI = "back_channel_logout_uri" appSAMLTableSuffix = "saml_configs" AppSAMLConfigColumnAppID = "app_id" @@ -125,6 +126,7 @@ func (*appProjection) Init() *old_handler.Check { handler.NewColumn(AppOIDCConfigColumnClockSkew, handler.ColumnTypeInt64, handler.Default(0)), handler.NewColumn(AppOIDCConfigColumnAdditionalOrigins, handler.ColumnTypeTextArray, handler.Nullable()), handler.NewColumn(AppOIDCConfigColumnSkipNativeAppSuccessPage, handler.ColumnTypeBool, handler.Default(false)), + handler.NewColumn(AppOIDCConfigColumnBackChannelLogoutURI, handler.ColumnTypeText, handler.Nullable()), }, handler.NewPrimaryKey(AppOIDCConfigColumnInstanceID, AppOIDCConfigColumnAppID), appOIDCTableSuffix, @@ -500,6 +502,7 @@ func (p *appProjection) reduceOIDCConfigAdded(event eventstore.Event) (*handler. handler.NewCol(AppOIDCConfigColumnClockSkew, e.ClockSkew), handler.NewCol(AppOIDCConfigColumnAdditionalOrigins, database.TextArray[string](e.AdditionalOrigins)), handler.NewCol(AppOIDCConfigColumnSkipNativeAppSuccessPage, e.SkipNativeAppSuccessPage), + handler.NewCol(AppOIDCConfigColumnBackChannelLogoutURI, e.BackChannelLogoutURI), }, handler.WithTableSuffix(appOIDCTableSuffix), ), @@ -522,7 +525,7 @@ func (p *appProjection) reduceOIDCConfigChanged(event eventstore.Event) (*handle return nil, zerrors.ThrowInvalidArgumentf(nil, "HANDL-GNHU1", "reduce.wrong.event.type %s", project.OIDCConfigChangedType) } - cols := make([]handler.Column, 0, 15) + cols := make([]handler.Column, 0, 16) if e.Version != nil { cols = append(cols, handler.NewCol(AppOIDCConfigColumnVersion, *e.Version)) } @@ -568,6 +571,9 @@ func (p *appProjection) reduceOIDCConfigChanged(event eventstore.Event) (*handle if e.SkipNativeAppSuccessPage != nil { cols = append(cols, handler.NewCol(AppOIDCConfigColumnSkipNativeAppSuccessPage, *e.SkipNativeAppSuccessPage)) } + if e.BackChannelLogoutURI != nil { + cols = append(cols, handler.NewCol(AppOIDCConfigColumnBackChannelLogoutURI, *e.BackChannelLogoutURI)) + } if len(cols) == 0 { return handler.NewNoOpStatement(e), nil diff --git a/internal/query/projection/app_test.go b/internal/query/projection/app_test.go index 49979c4698..74e4e39847 100644 --- a/internal/query/projection/app_test.go +++ b/internal/query/projection/app_test.go @@ -558,7 +558,8 @@ func TestAppProjection_reduces(t *testing.T) { "idTokenUserinfoAssertion": true, "clockSkew": 1000, "additionalOrigins": ["origin.one.ch", "origin.two.ch"], - "skipNativeAppSuccessPage": true + "skipNativeAppSuccessPage": true, + "backChannelLogoutURI": "back.channel.one.ch" }`), ), project.OIDCConfigAddedEventMapper), }, @@ -569,7 +570,7 @@ func TestAppProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "INSERT INTO projections.apps7_oidc_configs (app_id, instance_id, version, client_id, client_secret, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19)", + expectedStmt: "INSERT INTO projections.apps7_oidc_configs (app_id, instance_id, version, client_id, client_secret, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page, back_channel_logout_uri) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)", expectedArgs: []interface{}{ "app-id", "instance-id", @@ -590,6 +591,7 @@ func TestAppProjection_reduces(t *testing.T) { 1 * time.Microsecond, database.TextArray[string]{"origin.one.ch", "origin.two.ch"}, true, + "back.channel.one.ch", }, }, { @@ -630,7 +632,8 @@ func TestAppProjection_reduces(t *testing.T) { "idTokenUserinfoAssertion": true, "clockSkew": 1000, "additionalOrigins": ["origin.one.ch", "origin.two.ch"], - "skipNativeAppSuccessPage": true + "skipNativeAppSuccessPage": true, + "backChannelLogoutURI": "back.channel.one.ch" }`), ), project.OIDCConfigAddedEventMapper), }, @@ -641,7 +644,7 @@ func TestAppProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "INSERT INTO projections.apps7_oidc_configs (app_id, instance_id, version, client_id, client_secret, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19)", + expectedStmt: "INSERT INTO projections.apps7_oidc_configs (app_id, instance_id, version, client_id, client_secret, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page, back_channel_logout_uri) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)", expectedArgs: []interface{}{ "app-id", "instance-id", @@ -662,6 +665,7 @@ func TestAppProjection_reduces(t *testing.T) { 1 * time.Microsecond, database.TextArray[string]{"origin.one.ch", "origin.two.ch"}, true, + "back.channel.one.ch", }, }, { @@ -700,8 +704,8 @@ func TestAppProjection_reduces(t *testing.T) { "idTokenUserinfoAssertion": true, "clockSkew": 1000, "additionalOrigins": ["origin.one.ch", "origin.two.ch"], - "skipNativeAppSuccessPage": true - + "skipNativeAppSuccessPage": true, + "backChannelLogoutURI": "back.channel.one.ch" }`), ), project.OIDCConfigChangedEventMapper), }, @@ -712,7 +716,7 @@ func TestAppProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "UPDATE projections.apps7_oidc_configs SET (version, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page) = ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) WHERE (app_id = $16) AND (instance_id = $17)", + expectedStmt: "UPDATE projections.apps7_oidc_configs SET (version, redirect_uris, response_types, grant_types, application_type, auth_method_type, post_logout_redirect_uris, is_dev_mode, access_token_type, access_token_role_assertion, id_token_role_assertion, id_token_userinfo_assertion, clock_skew, additional_origins, skip_native_app_success_page, back_channel_logout_uri) = ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) WHERE (app_id = $17) AND (instance_id = $18)", expectedArgs: []interface{}{ domain.OIDCVersionV1, database.TextArray[string]{"redirect.one.ch", "redirect.two.ch"}, @@ -729,6 +733,7 @@ func TestAppProjection_reduces(t *testing.T) { 1 * time.Microsecond, database.TextArray[string]{"origin.one.ch", "origin.two.ch"}, true, + "back.channel.one.ch", "app-id", "instance-id", }, diff --git a/internal/query/projection/instance_features.go b/internal/query/projection/instance_features.go index 1b18e42e76..45e360c6db 100644 --- a/internal/query/projection/instance_features.go +++ b/internal/query/projection/instance_features.go @@ -104,6 +104,10 @@ func (*instanceFeatureProjection) Reducers() []handler.AggregateReducer { Event: feature_v2.InstanceDisableUserTokenEvent, Reduce: reduceInstanceSetFeature[bool], }, + { + Event: feature_v2.InstanceEnableBackChannelLogout, + Reduce: reduceInstanceSetFeature[bool], + }, { Event: instance.InstanceRemovedEventType, Reduce: reduceInstanceRemovedHelper(InstanceDomainInstanceIDCol), diff --git a/internal/query/projection/system_features.go b/internal/query/projection/system_features.go index cf3013e57c..65e72fa394 100644 --- a/internal/query/projection/system_features.go +++ b/internal/query/projection/system_features.go @@ -84,6 +84,10 @@ func (*systemFeatureProjection) Reducers() []handler.AggregateReducer { Event: feature_v2.SystemDisableUserTokenEvent, Reduce: reduceSystemSetFeature[bool], }, + { + Event: feature_v2.SystemEnableBackChannelLogout, + Reduce: reduceSystemSetFeature[bool], + }, }, }} } diff --git a/internal/query/system_features.go b/internal/query/system_features.go index ddbd0a08ea..cae68d5fbb 100644 --- a/internal/query/system_features.go +++ b/internal/query/system_features.go @@ -29,6 +29,7 @@ type SystemFeatures struct { ImprovedPerformance FeatureSource[[]feature.ImprovedPerformanceType] OIDCSingleV1SessionTermination FeatureSource[bool] DisableUserTokenEvent FeatureSource[bool] + EnableBackChannelLogout FeatureSource[bool] } func (q *Queries) GetSystemFeatures(ctx context.Context) (_ *SystemFeatures, err error) { diff --git a/internal/query/system_features_model.go b/internal/query/system_features_model.go index f8670c87fe..119aaa4ea1 100644 --- a/internal/query/system_features_model.go +++ b/internal/query/system_features_model.go @@ -59,6 +59,7 @@ func (m *SystemFeaturesReadModel) Query() *eventstore.SearchQueryBuilder { feature_v2.SystemImprovedPerformanceEventType, feature_v2.SystemOIDCSingleV1SessionTerminationEventType, feature_v2.SystemDisableUserTokenEvent, + feature_v2.SystemEnableBackChannelLogout, ). Builder().ResourceOwner(m.ResourceOwner) } @@ -94,6 +95,8 @@ func reduceSystemFeatureSet[T any](features *SystemFeatures, event *feature_v2.S features.OIDCSingleV1SessionTermination.set(level, event.Value) case feature.KeyDisableUserTokenEvent: features.DisableUserTokenEvent.set(level, event.Value) + case feature.KeyEnableBackChannelLogout: + features.EnableBackChannelLogout.set(level, event.Value) } return nil } diff --git a/internal/repository/feature/feature_v2/eventstore.go b/internal/repository/feature/feature_v2/eventstore.go index 866d331db4..9288f0a675 100644 --- a/internal/repository/feature/feature_v2/eventstore.go +++ b/internal/repository/feature/feature_v2/eventstore.go @@ -16,6 +16,7 @@ func init() { eventstore.RegisterFilterEventMapper(AggregateType, SystemImprovedPerformanceEventType, eventstore.GenericEventMapper[SetEvent[[]feature.ImprovedPerformanceType]]) eventstore.RegisterFilterEventMapper(AggregateType, SystemOIDCSingleV1SessionTerminationEventType, eventstore.GenericEventMapper[SetEvent[bool]]) eventstore.RegisterFilterEventMapper(AggregateType, SystemDisableUserTokenEvent, eventstore.GenericEventMapper[SetEvent[bool]]) + eventstore.RegisterFilterEventMapper(AggregateType, SystemEnableBackChannelLogout, eventstore.GenericEventMapper[SetEvent[bool]]) eventstore.RegisterFilterEventMapper(AggregateType, InstanceResetEventType, eventstore.GenericEventMapper[ResetEvent]) eventstore.RegisterFilterEventMapper(AggregateType, InstanceLoginDefaultOrgEventType, eventstore.GenericEventMapper[SetEvent[bool]]) @@ -29,4 +30,5 @@ func init() { eventstore.RegisterFilterEventMapper(AggregateType, InstanceDebugOIDCParentErrorEventType, eventstore.GenericEventMapper[SetEvent[bool]]) eventstore.RegisterFilterEventMapper(AggregateType, InstanceOIDCSingleV1SessionTerminationEventType, eventstore.GenericEventMapper[SetEvent[bool]]) eventstore.RegisterFilterEventMapper(AggregateType, InstanceDisableUserTokenEvent, eventstore.GenericEventMapper[SetEvent[bool]]) + eventstore.RegisterFilterEventMapper(AggregateType, InstanceEnableBackChannelLogout, eventstore.GenericEventMapper[SetEvent[bool]]) } diff --git a/internal/repository/feature/feature_v2/feature.go b/internal/repository/feature/feature_v2/feature.go index 95f7e44360..3fc180a814 100644 --- a/internal/repository/feature/feature_v2/feature.go +++ b/internal/repository/feature/feature_v2/feature.go @@ -21,6 +21,7 @@ var ( SystemImprovedPerformanceEventType = setEventTypeFromFeature(feature.LevelSystem, feature.KeyImprovedPerformance) SystemOIDCSingleV1SessionTerminationEventType = setEventTypeFromFeature(feature.LevelSystem, feature.KeyOIDCSingleV1SessionTermination) SystemDisableUserTokenEvent = setEventTypeFromFeature(feature.LevelSystem, feature.KeyDisableUserTokenEvent) + SystemEnableBackChannelLogout = setEventTypeFromFeature(feature.LevelSystem, feature.KeyEnableBackChannelLogout) InstanceResetEventType = resetEventTypeFromFeature(feature.LevelInstance) InstanceLoginDefaultOrgEventType = setEventTypeFromFeature(feature.LevelInstance, feature.KeyLoginDefaultOrg) @@ -34,6 +35,7 @@ var ( InstanceDebugOIDCParentErrorEventType = setEventTypeFromFeature(feature.LevelInstance, feature.KeyDebugOIDCParentError) InstanceOIDCSingleV1SessionTerminationEventType = setEventTypeFromFeature(feature.LevelInstance, feature.KeyOIDCSingleV1SessionTermination) InstanceDisableUserTokenEvent = setEventTypeFromFeature(feature.LevelInstance, feature.KeyDisableUserTokenEvent) + InstanceEnableBackChannelLogout = setEventTypeFromFeature(feature.LevelInstance, feature.KeyEnableBackChannelLogout) ) const ( diff --git a/internal/repository/project/oidc_config.go b/internal/repository/project/oidc_config.go index 5ea20c220a..498f3233e2 100644 --- a/internal/repository/project/oidc_config.go +++ b/internal/repository/project/oidc_config.go @@ -43,6 +43,7 @@ type OIDCConfigAddedEvent struct { ClockSkew time.Duration `json:"clockSkew,omitempty"` AdditionalOrigins []string `json:"additionalOrigins,omitempty"` SkipNativeAppSuccessPage bool `json:"skipNativeAppSuccessPage,omitempty"` + BackChannelLogoutURI string `json:"backChannelLogoutURI,omitempty"` } func (e *OIDCConfigAddedEvent) Payload() interface{} { @@ -74,6 +75,7 @@ func NewOIDCConfigAddedEvent( clockSkew time.Duration, additionalOrigins []string, skipNativeAppSuccessPage bool, + backChannelLogoutURI string, ) *OIDCConfigAddedEvent { return &OIDCConfigAddedEvent{ BaseEvent: *eventstore.NewBaseEventForPush( @@ -99,6 +101,7 @@ func NewOIDCConfigAddedEvent( ClockSkew: clockSkew, AdditionalOrigins: additionalOrigins, SkipNativeAppSuccessPage: skipNativeAppSuccessPage, + BackChannelLogoutURI: backChannelLogoutURI, } } @@ -184,7 +187,10 @@ func (e *OIDCConfigAddedEvent) Validate(cmd eventstore.Command) bool { return false } } - return e.SkipNativeAppSuccessPage == c.SkipNativeAppSuccessPage + if e.SkipNativeAppSuccessPage != c.SkipNativeAppSuccessPage { + return false + } + return e.BackChannelLogoutURI == c.BackChannelLogoutURI } func OIDCConfigAddedEventMapper(event eventstore.Event) (eventstore.Event, error) { @@ -219,6 +225,7 @@ type OIDCConfigChangedEvent struct { ClockSkew *time.Duration `json:"clockSkew,omitempty"` AdditionalOrigins *[]string `json:"additionalOrigins,omitempty"` SkipNativeAppSuccessPage *bool `json:"skipNativeAppSuccessPage,omitempty"` + BackChannelLogoutURI *string `json:"backChannelLogoutURI,omitempty"` } func (e *OIDCConfigChangedEvent) Payload() interface{} { @@ -345,6 +352,12 @@ func ChangeSkipNativeAppSuccessPage(skipNativeAppSuccessPage bool) func(event *O } } +func ChangeBackChannelLogoutURI(backChannelLogoutURI string) func(event *OIDCConfigChangedEvent) { + return func(e *OIDCConfigChangedEvent) { + e.BackChannelLogoutURI = &backChannelLogoutURI + } +} + func OIDCConfigChangedEventMapper(event eventstore.Event) (eventstore.Event, error) { e := &OIDCConfigChangedEvent{ BaseEvent: *eventstore.BaseEventFromRepo(event), diff --git a/internal/repository/session/session.go b/internal/repository/session/session.go index f5622fd4b4..42304aca8e 100644 --- a/internal/repository/session/session.go +++ b/internal/repository/session/session.go @@ -659,6 +659,8 @@ func NewLifetimeSetEvent( type TerminateEvent struct { eventstore.BaseEvent `json:"-"` + + TriggerOrigin string `json:"triggerOrigin,omitempty"` } func (e *TerminateEvent) Payload() interface{} { diff --git a/internal/repository/sessionlogout/aggregate.go b/internal/repository/sessionlogout/aggregate.go new file mode 100644 index 0000000000..dcdc7a581a --- /dev/null +++ b/internal/repository/sessionlogout/aggregate.go @@ -0,0 +1,26 @@ +package sessionlogout + +import ( + "github.com/zitadel/zitadel/internal/eventstore" +) + +const ( + AggregateType = "session_logout" + AggregateVersion = "v1" +) + +type Aggregate struct { + eventstore.Aggregate +} + +func NewAggregate(id, instanceID string) *Aggregate { + return &Aggregate{ + Aggregate: eventstore.Aggregate{ + Type: AggregateType, + Version: AggregateVersion, + ID: id, + ResourceOwner: instanceID, + InstanceID: instanceID, + }, + } +} diff --git a/internal/repository/sessionlogout/events.go b/internal/repository/sessionlogout/events.go new file mode 100644 index 0000000000..df7c39accf --- /dev/null +++ b/internal/repository/sessionlogout/events.go @@ -0,0 +1,79 @@ +package sessionlogout + +import ( + "context" + + "github.com/zitadel/zitadel/internal/eventstore" +) + +const ( + eventTypePrefix = "session_logout." + backChannelEventTypePrefix = eventTypePrefix + "back_channel." + BackChannelLogoutRegisteredType = backChannelEventTypePrefix + "registered" + BackChannelLogoutSentType = backChannelEventTypePrefix + "sent" +) + +type BackChannelLogoutRegisteredEvent struct { + *eventstore.BaseEvent `json:"-"` + + OIDCSessionID string `json:"oidc_session_id"` + UserID string `json:"user_id"` + ClientID string `json:"client_id"` + BackChannelLogoutURI string `json:"back_channel_logout_uri"` +} + +// Payload implements eventstore.Command. +func (e *BackChannelLogoutRegisteredEvent) Payload() any { + return e +} + +func (e *BackChannelLogoutRegisteredEvent) UniqueConstraints() []*eventstore.UniqueConstraint { + return nil +} + +func (e *BackChannelLogoutRegisteredEvent) SetBaseEvent(b *eventstore.BaseEvent) { + e.BaseEvent = b +} + +func NewBackChannelLogoutRegisteredEvent(ctx context.Context, aggregate *eventstore.Aggregate, oidcSessionID, userID, clientID, backChannelLogoutURI string) *BackChannelLogoutRegisteredEvent { + return &BackChannelLogoutRegisteredEvent{ + BaseEvent: eventstore.NewBaseEventForPush( + ctx, + aggregate, + BackChannelLogoutRegisteredType, + ), + OIDCSessionID: oidcSessionID, + UserID: userID, + ClientID: clientID, + BackChannelLogoutURI: backChannelLogoutURI, + } +} + +type BackChannelLogoutSentEvent struct { + eventstore.BaseEvent `json:"-"` + + OIDCSessionID string `json:"oidc_session_id"` +} + +func (e *BackChannelLogoutSentEvent) Payload() interface{} { + return e +} + +func (e *BackChannelLogoutSentEvent) UniqueConstraints() []*eventstore.UniqueConstraint { + return nil +} + +func (e *BackChannelLogoutSentEvent) SetBaseEvent(event *eventstore.BaseEvent) { + e.BaseEvent = *event +} + +func NewBackChannelLogoutSentEvent(ctx context.Context, aggregate *eventstore.Aggregate, oidcSessionID string) *BackChannelLogoutSentEvent { + return &BackChannelLogoutSentEvent{ + BaseEvent: *eventstore.NewBaseEventForPush( + ctx, + aggregate, + BackChannelLogoutSentType, + ), + OIDCSessionID: oidcSessionID, + } +} diff --git a/internal/repository/sessionlogout/eventstore.go b/internal/repository/sessionlogout/eventstore.go new file mode 100644 index 0000000000..0aa36dd8a8 --- /dev/null +++ b/internal/repository/sessionlogout/eventstore.go @@ -0,0 +1,15 @@ +package sessionlogout + +import ( + "github.com/zitadel/zitadel/internal/eventstore" +) + +var ( + BackChannelLogoutRegisteredEventMapper = eventstore.GenericEventMapper[BackChannelLogoutRegisteredEvent] + BackChannelLogoutSentEventMapper = eventstore.GenericEventMapper[BackChannelLogoutSentEvent] +) + +func init() { + eventstore.RegisterFilterEventMapper(AggregateType, BackChannelLogoutRegisteredType, BackChannelLogoutRegisteredEventMapper) + eventstore.RegisterFilterEventMapper(AggregateType, BackChannelLogoutSentType, BackChannelLogoutSentEventMapper) +} diff --git a/internal/repository/user/human.go b/internal/repository/user/human.go index ae1e9672ef..d503ecc899 100644 --- a/internal/repository/user/human.go +++ b/internal/repository/user/human.go @@ -517,7 +517,9 @@ func NewHumanInviteCheckFailedEvent(ctx context.Context, aggregate *eventstore.A type HumanSignedOutEvent struct { eventstore.BaseEvent `json:"-"` - UserAgentID string `json:"userAgentID"` + UserAgentID string `json:"userAgentID"` + SessionID string `json:"sessionID,omitempty"` + TriggeredAtOrigin string `json:"triggerOrigin,omitempty"` } func (e *HumanSignedOutEvent) Payload() interface{} { @@ -528,10 +530,15 @@ func (e *HumanSignedOutEvent) UniqueConstraints() []*eventstore.UniqueConstraint return nil } +func (e *HumanSignedOutEvent) TriggerOrigin() string { + return e.TriggeredAtOrigin +} + func NewHumanSignedOutEvent( ctx context.Context, aggregate *eventstore.Aggregate, - userAgentID string, + userAgentID, + sessionID string, ) *HumanSignedOutEvent { return &HumanSignedOutEvent{ BaseEvent: *eventstore.NewBaseEventForPush( @@ -539,7 +546,9 @@ func NewHumanSignedOutEvent( aggregate, HumanSignedOutType, ), - UserAgentID: userAgentID, + UserAgentID: userAgentID, + SessionID: sessionID, + TriggeredAtOrigin: http.DomainContext(ctx).Origin(), } } diff --git a/internal/user/repository/view/active_user_ids_by_session_id.sql b/internal/user/repository/view/active_user_sessions_by_session_id.sql similarity index 91% rename from internal/user/repository/view/active_user_ids_by_session_id.sql rename to internal/user/repository/view/active_user_sessions_by_session_id.sql index b7c5aaebb0..d5f4754c3f 100644 --- a/internal/user/repository/view/active_user_ids_by_session_id.sql +++ b/internal/user/repository/view/active_user_sessions_by_session_id.sql @@ -1,6 +1,7 @@ SELECT s.user_agent_id, - s.user_id + s.user_id, + s.id FROM auth.user_sessions s JOIN auth.user_sessions s2 ON s.instance_id = s2.instance_id diff --git a/internal/user/repository/view/user_session_view.go b/internal/user/repository/view/user_session_view.go index f0b956e057..b3d155f1ec 100644 --- a/internal/user/repository/view/user_session_view.go +++ b/internal/user/repository/view/user_session_view.go @@ -20,8 +20,8 @@ var userSessionsByUserAgentQuery string //go:embed user_agent_by_user_session_id.sql var userAgentByUserSessionIDQuery string -//go:embed active_user_ids_by_session_id.sql -var activeUserIDsBySessionIDQuery string +//go:embed active_user_sessions_by_session_id.sql +var activeUserSessionsBySessionIDQuery string func UserSessionByIDs(ctx context.Context, db *database.DB, agentID, userID, instanceID string) (userSession *model.UserSessionView, err error) { err = db.QueryRowContext( @@ -65,36 +65,39 @@ func UserAgentIDBySessionID(ctx context.Context, db *database.DB, sessionID, ins return userAgentID, err } -// ActiveUserIDsBySessionID returns all userIDs with an active session on the same user agent (its id is also returned) based on a sessionID -func ActiveUserIDsBySessionID(ctx context.Context, db *database.DB, sessionID, instanceID string) (userAgentID string, userIDs []string, err error) { +// ActiveUserSessionsBySessionID returns all sessions (sessionID:userID map) with an active session on the same user agent (its id is also returned) based on a sessionID +func ActiveUserSessionsBySessionID(ctx context.Context, db *database.DB, sessionID, instanceID string) (userAgentID string, sessions map[string]string, err error) { err = db.QueryContext( ctx, func(rows *sql.Rows) error { - userAgentID, userIDs, err = scanActiveUserAgentUserIDs(rows) + userAgentID, sessions, err = scanActiveUserAgentUserIDs(rows) return err }, - activeUserIDsBySessionIDQuery, + activeUserSessionsBySessionIDQuery, sessionID, instanceID, ) - return userAgentID, userIDs, err + return userAgentID, sessions, err } -func scanActiveUserAgentUserIDs(rows *sql.Rows) (userAgentID string, userIDs []string, err error) { +func scanActiveUserAgentUserIDs(rows *sql.Rows) (userAgentID string, sessions map[string]string, err error) { + sessions = make(map[string]string) for rows.Next() { - var userID string + var userID, sessionID string err := rows.Scan( &userAgentID, - &userID) + &userID, + &sessionID, + ) if err != nil { return "", nil, err } - userIDs = append(userIDs, userID) + sessions[sessionID] = userID } if err := rows.Close(); err != nil { return "", nil, zerrors.ThrowInternal(err, "VIEW-Sbrws", "Errors.Query.CloseRows") } - return userAgentID, userIDs, nil + return userAgentID, sessions, nil } func scanUserSession(row *sql.Row) (*model.UserSessionView, error) { diff --git a/proto/zitadel/app.proto b/proto/zitadel/app.proto index 58735e7c9e..d18168f2b9 100644 --- a/proto/zitadel/app.proto +++ b/proto/zitadel/app.proto @@ -168,6 +168,12 @@ message OIDCConfig { description: "Skip the successful login page on native apps and directly redirect the user to the callback."; } ]; + string back_channel_logout_uri = 21 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "[\"https://example.com/auth/backchannel\"]"; + description: "ZITADEL will use this URI to notify the application about terminated session according to the OIDC Back-Channel Logout (https://openid.net/specs/openid-connect-backchannel-1_0.html)"; + } + ]; } enum OIDCResponseType { diff --git a/proto/zitadel/feature/v2/instance.proto b/proto/zitadel/feature/v2/instance.proto index ee41c313f2..6717e397ea 100644 --- a/proto/zitadel/feature/v2/instance.proto +++ b/proto/zitadel/feature/v2/instance.proto @@ -86,6 +86,13 @@ message SetInstanceFeaturesRequest{ description: "Do not push user token meta-event user.token.v2.added to improve performance on many concurrent single (machine-)user logins"; } ]; + + optional bool enable_back_channel_logout = 12 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "true"; + description: "If the flag is enabled, you'll be able to use the OIDC Back-Channel Logout to be notified in your application about terminated user sessions."; + } + ]; } message SetInstanceFeaturesResponse { @@ -185,4 +192,11 @@ message GetInstanceFeaturesResponse { description: "Do not push user token meta-event user.token.v2.added to improve performance on many concurrent single (machine-)user logins"; } ]; + + FeatureFlag enable_back_channel_logout = 13 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "true"; + description: "If the flag is enabled, you'll be able to use the OIDC Back-Channel Logout to be notified in your application about terminated user sessions."; + } + ]; } diff --git a/proto/zitadel/feature/v2/system.proto b/proto/zitadel/feature/v2/system.proto index 70ff3c6506..cd8d7cc201 100644 --- a/proto/zitadel/feature/v2/system.proto +++ b/proto/zitadel/feature/v2/system.proto @@ -75,6 +75,13 @@ message SetSystemFeaturesRequest{ description: "Do not push user token meta-event user.token.v2.added to improve performance on many concurrent single (machine-)user logins"; } ]; + + optional bool enable_back_channel_logout = 10 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "true"; + description: "If the flag is enabled, you'll be able to use the OIDC Back-Channel Logout to be notified in your application about terminated user sessions."; + } + ]; } message SetSystemFeaturesResponse { @@ -153,4 +160,11 @@ message GetSystemFeaturesResponse { description: "Do not push user token meta-event user.token.v2.added to improve performance on many concurrent single (machine-)user logins"; } ]; + + FeatureFlag enable_back_channel_logout = 11 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "true"; + description: "If the flag is enabled, you'll be able to use the OIDC Back-Channel Logout to be notified in your application about terminated user sessions."; + } + ]; } diff --git a/proto/zitadel/management.proto b/proto/zitadel/management.proto index cb5bfb1389..0df07ffd4c 100644 --- a/proto/zitadel/management.proto +++ b/proto/zitadel/management.proto @@ -9802,6 +9802,12 @@ message AddOIDCAppRequest { description: "Skip the successful login page on native apps and directly redirect the user to the callback."; } ]; + string back_channel_logout_uri = 18 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "[\"https://example.com/auth/backchannel\"]"; + description: "ZITADEL will use this URI to notify the application about terminated session according to the OIDC Back-Channel Logout (https://openid.net/specs/openid-connect-backchannel-1_0.html)"; + } + ]; } message AddOIDCAppResponse { @@ -9977,6 +9983,12 @@ message UpdateOIDCAppConfigRequest { description: "Skip the successful login page on native apps and directly redirect the user to the callback."; } ]; + string back_channel_logout_uri = 17 [ + (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { + example: "[\"https://example.com/auth/backchannel\"]"; + description: "ZITADEL will use this URI to notify the application about terminated session according to the OIDC Back-Channel Logout (https://openid.net/specs/openid-connect-backchannel-1_0.html)"; + } + ]; } message UpdateOIDCAppConfigResponse { From 9422766e17cf955d11792e999c0cbd5cf85f2939 Mon Sep 17 00:00:00 2001 From: Stefan Benz <46600784+stebenz@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:34:20 +0100 Subject: [PATCH 25/30] chore: remove some integration test flakiness (#8818) Remove some integration test flakiness. --------- Co-authored-by: Livio Spring --- ...ons_allow_public_org_registrations_test.go | 45 +++++++------ .../restrictions_allowed_languages_test.go | 67 ++++++++++--------- .../admin/integration_test/server_test.go | 18 ----- .../integration_test/execution_target_test.go | 9 ++- .../v3alpha/integration_test/query_test.go | 2 +- internal/integration/assert.go | 2 +- 6 files changed, 68 insertions(+), 75 deletions(-) diff --git a/internal/api/grpc/admin/integration_test/restrictions_allow_public_org_registrations_test.go b/internal/api/grpc/admin/integration_test/restrictions_allow_public_org_registrations_test.go index 3663fec4dd..3cbcf8abd0 100644 --- a/internal/api/grpc/admin/integration_test/restrictions_allow_public_org_registrations_test.go +++ b/internal/api/grpc/admin/integration_test/restrictions_allow_public_org_registrations_test.go @@ -10,6 +10,7 @@ import ( "net/http/cookiejar" "net/url" "testing" + "time" "github.com/muhlemmer/gu" "github.com/stretchr/testify/assert" @@ -70,28 +71,34 @@ func awaitPubOrgRegDisallowed(t *testing.T, ctx context.Context, cc *integration // awaitGetSSRGetResponse cuts the CSRF token from the response body if it exists func awaitGetSSRGetResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int) string { var csrfToken []byte - await(t, ctx, func(tt *assert.CollectT) { - resp, err := client.Get(parsedURL.String()) - require.NoError(tt, err) - body, err := io.ReadAll(resp.Body) - require.NoError(tt, err) - searchField := ``)) - } - assert.Equal(tt, resp.StatusCode, expectCode) - }) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, + func(tt *assert.CollectT) { + resp, err := client.Get(parsedURL.String()) + require.NoError(tt, err) + body, err := io.ReadAll(resp.Body) + require.NoError(tt, err) + searchField := ``)) + } + assert.Equal(tt, resp.StatusCode, expectCode) + }, retryDuration, tick, "awaiting successful get SSR get response failed", + ) return string(csrfToken) } // awaitPostFormResponse needs a valid CSRF token to make it to the actual endpoint implementation and get the expected status code func awaitPostFormResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int, csrfToken string) { - await(t, ctx, func(tt *assert.CollectT) { - resp, err := client.PostForm(parsedURL.String(), url.Values{ - "gorilla.csrf.Token": {csrfToken}, - }) - require.NoError(tt, err) - assert.Equal(tt, resp.StatusCode, expectCode) - }) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, + func(tt *assert.CollectT) { + resp, err := client.PostForm(parsedURL.String(), url.Values{ + "gorilla.csrf.Token": {csrfToken}, + }) + require.NoError(tt, err) + assert.Equal(tt, resp.StatusCode, expectCode) + }, retryDuration, tick, "awaiting successful Post Form failed", + ) } diff --git a/internal/api/grpc/admin/integration_test/restrictions_allowed_languages_test.go b/internal/api/grpc/admin/integration_test/restrictions_allowed_languages_test.go index 92f688a900..e00b7f221b 100644 --- a/internal/api/grpc/admin/integration_test/restrictions_allowed_languages_test.go +++ b/internal/api/grpc/admin/integration_test/restrictions_allowed_languages_test.go @@ -51,7 +51,7 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) { require.Equal(ttt, language.Make(defaultLang.Language), language.English) }) tt.Run("the discovery endpoint returns all supported languages", func(ttt *testing.T) { - awaitDiscoveryEndpoint(ttt, instance.Domain, supportedLanguagesStr, nil) + awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, supportedLanguagesStr, nil) }) }) t.Run("restricting the default language fails", func(tt *testing.T) { @@ -92,10 +92,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) { require.Condition(tt, contains(supported.GetLanguages(), supportedLanguagesStr)) }) t.Run("the disallowed language is not listed in the discovery endpoint", func(tt *testing.T) { - awaitDiscoveryEndpoint(tt, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()}) + awaitDiscoveryEndpoint(tt, ctx, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()}) }) t.Run("the login ui is rendered in the default language", func(tt *testing.T) { - awaitLoginUILanguage(tt, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort") + awaitLoginUILanguage(tt, ctx, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort") }) t.Run("preferred languages are not restricted by the supported languages", func(tt *testing.T) { tt.Run("change user profile", func(ttt *testing.T) { @@ -153,10 +153,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) { t.Run("allowing the language makes it usable again", func(tt *testing.T) { tt.Run("the previously disallowed language is listed in the discovery endpoint again", func(ttt *testing.T) { - awaitDiscoveryEndpoint(ttt, instance.Domain, []string{disallowedLanguage.String()}, nil) + awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, []string{disallowedLanguage.String()}, nil) }) tt.Run("the login ui is rendered in the previously disallowed language", func(ttt *testing.T) { - awaitLoginUILanguage(ttt, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña") + awaitLoginUILanguage(ttt, ctx, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña") }) }) } @@ -164,36 +164,36 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) { func setAndAwaitAllowedLanguages(ctx context.Context, cc *integration.Client, t *testing.T, selectLanguages []string) { _, err := cc.Admin.SetRestrictions(ctx, &admin.SetRestrictionsRequest{AllowedLanguages: &admin.SelectLanguages{List: selectLanguages}}) require.NoError(t, err) - awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second) - defer awaitCancel() - await(t, awaitCtx, func(tt *assert.CollectT) { - restrictions, getErr := cc.Admin.GetRestrictions(awaitCtx, &admin.GetRestrictionsRequest{}) - expectLanguages := selectLanguages - if len(selectLanguages) == 0 { - expectLanguages = nil - } - assert.NoError(tt, getErr) - assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages()) - }) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, + func(tt *assert.CollectT) { + restrictions, getErr := cc.Admin.GetRestrictions(ctx, &admin.GetRestrictionsRequest{}) + expectLanguages := selectLanguages + if len(selectLanguages) == 0 { + expectLanguages = nil + } + assert.NoError(tt, getErr) + assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages()) + }, retryDuration, tick, "awaiting successful GetAllowedLanguages failed", + ) } func setAndAwaitDefaultLanguage(ctx context.Context, cc *integration.Client, t *testing.T, lang language.Tag) { _, err := cc.Admin.SetDefaultLanguage(ctx, &admin.SetDefaultLanguageRequest{Language: lang.String()}) require.NoError(t, err) - awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second) - defer awaitCancel() - await(t, awaitCtx, func(tt *assert.CollectT) { - defaultLang, getErr := cc.Admin.GetDefaultLanguage(awaitCtx, &admin.GetDefaultLanguageRequest{}) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, func(tt *assert.CollectT) { + defaultLang, getErr := cc.Admin.GetDefaultLanguage(ctx, &admin.GetDefaultLanguageRequest{}) assert.NoError(tt, getErr) assert.Equal(tt, lang.String(), defaultLang.GetLanguage()) - }) + }, retryDuration, tick, "awaiting successful GetDefaultLanguage failed", + ) } -func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notContainsUILocales []string) { - awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second) - defer awaitCancel() - await(t, awaitCtx, func(tt *assert.CollectT) { - req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil) +func awaitDiscoveryEndpoint(t *testing.T, ctx context.Context, domain string, containsUILocales, notContainsUILocales []string) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, func(tt *assert.CollectT) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil) require.NoError(tt, err) resp, err := http.DefaultClient.Do(req) require.NoError(tt, err) @@ -213,14 +213,14 @@ func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notC if notContainsUILocales != nil { assert.Condition(tt, not(contains(doc.UILocalesSupported, notContainsUILocales))) } - }) + }, retryDuration, tick, "awaiting successful call to Discovery endpoint failed", + ) } -func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) { - awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second) - defer awaitCancel() - await(t, awaitCtx, func(tt *assert.CollectT) { - req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil) +func awaitLoginUILanguage(t *testing.T, ctx context.Context, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) { + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) + require.EventuallyWithT(t, func(tt *assert.CollectT) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil) req.Header.Set("Accept-Language", acceptLanguage.String()) require.NoError(tt, err) resp, err := http.DefaultClient.Do(req) @@ -232,7 +232,8 @@ func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.T }() require.NoError(tt, err) assert.Containsf(tt, string(body), containsText, "login ui language is in "+expectLang.String()) - }) + }, retryDuration, tick, "awaiting successful LoginUI in specific language failed", + ) } // We would love to use assert.Contains here, but it doesn't work with slices of strings diff --git a/internal/api/grpc/admin/integration_test/server_test.go b/internal/api/grpc/admin/integration_test/server_test.go index e29b4d5c78..d762b08bfb 100644 --- a/internal/api/grpc/admin/integration_test/server_test.go +++ b/internal/api/grpc/admin/integration_test/server_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/zitadel/zitadel/internal/integration" admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin" @@ -34,23 +33,6 @@ func TestMain(m *testing.M) { }()) } -func await(t *testing.T, ctx context.Context, cb func(*assert.CollectT)) { - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) - require.EventuallyWithT( - t, - func(tt *assert.CollectT) { - defer func() { - // Panics are not recovered and don't mark the test as failed, so we need to do that ourselves - assert.Nil(tt, recover(), "panic in await callback") - }() - cb(tt) - }, - retryDuration, - tick, - "awaiting successful callback failed", - ) -} - var _ assert.TestingT = (*noopAssertionT)(nil) type noopAssertionT struct{} diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go index b62d0ee37f..c70ed227c1 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go @@ -288,7 +288,9 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in if !assert.NoError(ttt, err) { return } - assert.Len(ttt, got.GetResult(), 1) + if !assert.Len(ttt, got.GetResult(), 1) { + return + } gotTargets := got.GetResult()[0].GetExecution().GetTargets() // always first check length, otherwise its failed anyway if assert.Len(ttt, gotTargets, len(targets)) { @@ -296,7 +298,6 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in assert.EqualExportedValues(ttt, targets[i].GetType(), gotTargets[i].GetType()) } } - }, retryDuration, tick, "timeout waiting for expected execution result") return } @@ -316,7 +317,9 @@ func waitForTarget(ctx context.Context, t *testing.T, instance *integration.Inst if !assert.NoError(ttt, err) { return } - assert.Len(ttt, got.GetResult(), 1) + if !assert.Len(ttt, got.GetResult(), 1) { + return + } config := got.GetResult()[0].GetConfig() assert.Equal(ttt, config.GetEndpoint(), endpoint) switch ty { diff --git a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go index e3d3233604..d6f584b35e 100644 --- a/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go +++ b/internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go @@ -216,7 +216,7 @@ func TestServer_GetTarget(t *testing.T) { err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want) require.NoError(t, err) } - retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) + retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 2*time.Minute) require.EventuallyWithT(t, func(ttt *assert.CollectT) { got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req) if tt.wantErr { diff --git a/internal/integration/assert.go b/internal/integration/assert.go index 8e875ee48e..6743c8297e 100644 --- a/internal/integration/assert.go +++ b/internal/integration/assert.go @@ -107,7 +107,7 @@ func AssertListDetails[L ListDetails, D ListDetailsMsg[L]](t assert.TestingT, ex if wantDetails.GetTimestamp() != nil { gotCD := gotDetails.GetTimestamp().AsTime() wantCD := time.Now() - assert.WithinRange(t, gotCD, wantCD.Add(-1*time.Minute), wantCD.Add(time.Minute)) + assert.WithinRange(t, gotCD, wantCD.Add(-10*time.Minute), wantCD.Add(time.Minute)) } } From 9c3e5e467b230e28167dd8280bd5973357a86716 Mon Sep 17 00:00:00 2001 From: Silvan Date: Mon, 4 Nov 2024 10:06:14 +0100 Subject: [PATCH 26/30] perf(query): remove transactions for queries (#8614) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Which Problems Are Solved Queries currently execute 3 statements, begin, query, commit # How the Problems Are Solved remove transaction handling from query methods in database package # Additional Changes - Bump versions of `core_grpc_dependencies`-receipt in Makefile # Additional info During load tests we saw a lot of idle transactions of `zitadel_queries` application name which is the connection pool used to query data in zitadel. Executed query: `select query_start - xact_start, pid, application_name, backend_start, xact_start, query_start, state_change, wait_event_type, wait_event,substring(query, 1, 200) query from pg_stat_activity where datname = 'zitadel' and state <> 'idle';` Mostly the last query executed was `begin isolation level read committed read only`. example: ``` ?column? | pid | application_name | backend_start | xact_start | query_start | state_change | wait_event_type | wait_event | query -----------------+-------+----------------------------+-------------------------------+-------------------------------+-------------------------------+-------------------------------+-----------------+--------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 00:00:00 | 33030 | zitadel_queries | 2024-10-16 16:25:53.906036+00 | 2024-10-16 16:30:19.191661+00 | 2024-10-16 16:30:19.191661+00 | 2024-10-16 16:30:19.19169+00 | Client | ClientRead | begin isolation level read committed read only 00:00:00 | 33035 | zitadel_queries | 2024-10-16 16:25:53.909629+00 | 2024-10-16 16:30:19.19179+00 | 2024-10-16 16:30:19.19179+00 | 2024-10-16 16:30:19.191805+00 | Client | ClientRead | begin isolation level read committed read only 00:00:00.00412 | 33028 | zitadel_queries | 2024-10-16 16:25:53.904247+00 | 2024-10-16 16:30:19.187734+00 | 2024-10-16 16:30:19.191854+00 | 2024-10-16 16:30:19.191964+00 | Client | ClientRead | SELECT created_at, event_type, "sequence", "position", payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision FROM eventstore.events2 WHERE instance_id = $1 AND aggregate_type 00:00:00.084662 | 33134 | zitadel_es_pusher | 2024-10-16 16:29:54.979692+00 | 2024-10-16 16:30:19.178578+00 | 2024-10-16 16:30:19.26324+00 | 2024-10-16 16:30:19.263267+00 | Client | ClientRead | RELEASE SAVEPOINT cockroach_restart 00:00:00.084768 | 33139 | zitadel_es_pusher | 2024-10-16 16:29:54.979585+00 | 2024-10-16 16:30:19.180762+00 | 2024-10-16 16:30:19.26553+00 | 2024-10-16 16:30:19.265531+00 | LWLock | WALWriteLock | commit 00:00:00.077377 | 33136 | zitadel_es_pusher | 2024-10-16 16:29:54.978582+00 | 2024-10-16 16:30:19.187883+00 | 2024-10-16 16:30:19.26526+00 | 2024-10-16 16:30:19.265431+00 | Client | ClientRead | WITH existing AS ( + | | | | | | | | | (SELECT instance_id, aggregate_type, aggregate_id, "sequence" FROM eventstore.events2 WHERE instance_id = $1 AND aggregate_type = $2 AND aggregate_id = $3 ORDER BY "sequence" DE 00:00:00.012309 | 33123 | zitadel_es_pusher | 2024-10-16 16:29:54.963484+00 | 2024-10-16 16:30:19.175066+00 | 2024-10-16 16:30:19.187375+00 | 2024-10-16 16:30:19.187376+00 | IO | WalSync | commit 00:00:00 | 33034 | zitadel_queries | 2024-10-16 16:25:53.90791+00 | 2024-10-16 16:30:19.262921+00 | 2024-10-16 16:30:19.262921+00 | 2024-10-16 16:30:19.263133+00 | Client | ClientRead | begin isolation level read committed read only 00:00:00 | 33039 | zitadel_queries | 2024-10-16 16:25:53.914106+00 | 2024-10-16 16:30:19.191676+00 | 2024-10-16 16:30:19.191676+00 | 2024-10-16 16:30:19.191687+00 | Client | ClientRead | begin isolation level read committed read only 00:00:00.24539 | 33083 | zitadel_projection_spooler | 2024-10-16 16:27:49.895548+00 | 2024-10-16 16:30:19.020058+00 | 2024-10-16 16:30:19.265448+00 | 2024-10-16 16:30:19.26546+00 | Client | ClientRead | SAVEPOINT exec_stmt 00:00:00 | 33125 | zitadel_es_pusher | 2024-10-16 16:29:54.963859+00 | 2024-10-16 16:30:19.191715+00 | 2024-10-16 16:30:19.191715+00 | 2024-10-16 16:30:19.191729+00 | Client | ClientRead | begin 00:00:00.004292 | 33032 | zitadel_queries | 2024-10-16 16:25:53.906624+00 | 2024-10-16 16:30:19.187713+00 | 2024-10-16 16:30:19.192005+00 | 2024-10-16 16:30:19.192062+00 | Client | ClientRead | SELECT created_at, event_type, "sequence", "position", payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision FROM eventstore.events2 WHERE instance_id = $1 AND aggregate_type 00:00:00 | 33031 | zitadel_queries | 2024-10-16 16:25:53.906422+00 | 2024-10-16 16:30:19.191625+00 | 2024-10-16 16:30:19.191625+00 | 2024-10-16 16:30:19.191645+00 | Client | ClientRead | begin isolation level read committed read only ``` The amount of idle transactions is significantly less if the query transactions are removed: example: ``` ?column? | pid | application_name | backend_start | xact_start | query_start | state_change | wait_event_type | wait_event | query -----------------+-------+----------------------------+-------------------------------+-------------------------------+-------------------------------+-------------------------------+-----------------+------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 00:00:00.000094 | 32741 | zitadel_queries | 2024-10-16 16:23:49.73935+00 | 2024-10-16 16:24:59.785589+00 | 2024-10-16 16:24:59.785683+00 | 2024-10-16 16:24:59.785684+00 | | | SELECT created_at, event_type, "sequence", "position", payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision FROM eventstore.events2 WHERE instance_id = $1 AND aggregate_type 00:00:00 | 32762 | zitadel_es_pusher | 2024-10-16 16:24:02.275136+00 | 2024-10-16 16:24:59.784586+00 | 2024-10-16 16:24:59.784586+00 | 2024-10-16 16:24:59.784607+00 | Client | ClientRead | begin 00:00:00.000167 | 32742 | zitadel_queries | 2024-10-16 16:23:49.740489+00 | 2024-10-16 16:24:59.784274+00 | 2024-10-16 16:24:59.784441+00 | 2024-10-16 16:24:59.784442+00 | | | with usr as ( + | | | | | | | | | select u.id, u.creation_date, u.change_date, u.sequence, u.state, u.resource_owner, u.username, n.login_name as preferred_login_name + | | | | | | | | | from projections.users13 u + | | | | | | | | | left join projections.l 00:00:00.256014 | 32759 | zitadel_projection_spooler | 2024-10-16 16:24:01.418429+00 | 2024-10-16 16:24:59.52959+00 | 2024-10-16 16:24:59.785604+00 | 2024-10-16 16:24:59.785649+00 | Client | ClientRead | UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL) 00:00:00.014199 | 32773 | zitadel_es_pusher | 2024-10-16 16:24:02.320404+00 | 2024-10-16 16:24:59.769509+00 | 2024-10-16 16:24:59.783708+00 | 2024-10-16 16:24:59.783709+00 | IO | WalSync | commit 00:00:00 | 32765 | zitadel_es_pusher | 2024-10-16 16:24:02.28173+00 | 2024-10-16 16:24:59.780413+00 | 2024-10-16 16:24:59.780413+00 | 2024-10-16 16:24:59.780426+00 | Client | ClientRead | begin 00:00:00.012729 | 32777 | zitadel_es_pusher | 2024-10-16 16:24:02.339737+00 | 2024-10-16 16:24:59.767432+00 | 2024-10-16 16:24:59.780161+00 | 2024-10-16 16:24:59.780195+00 | Client | ClientRead | RELEASE SAVEPOINT cockroach_restart ``` --------- Co-authored-by: Tim Möhlmann Co-authored-by: Livio Spring Co-authored-by: Max Peintner Co-authored-by: Elio Bischof Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Miguel Cabrerizo <30386061+doncicuto@users.noreply.github.com> Co-authored-by: Joakim Lodén Co-authored-by: Yxnt Co-authored-by: Stefan Benz Co-authored-by: Harsha Reddy Co-authored-by: Zach H --- .releaserc.js | 1 + Makefile | 12 ++-- internal/crypto/database/database_test.go | 6 -- internal/database/database.go | 30 +------- internal/database/database_test.go | 7 +- .../eventstore/repository/sql/query_test.go | 5 -- internal/eventstore/v3/push.go | 17 +++-- internal/integration/oidc.go | 8 +++ internal/query/device_auth_test.go | 2 - internal/query/prepare_test.go | 10 --- internal/view/repository/db_mock_test.go | 25 ------- internal/view/repository/query.go | 11 --- internal/view/repository/requests.go | 12 +--- pkg/grpc/protoc/v2/options.pb.go | 70 ++++--------------- 14 files changed, 44 insertions(+), 172 deletions(-) diff --git a/.releaserc.js b/.releaserc.js index b5347a7504..be0029b395 100644 --- a/.releaserc.js +++ b/.releaserc.js @@ -10,6 +10,7 @@ module.exports = { "@semantic-release/github", { draftRelease: true, + successComment: false, assets: [ { path: ".artifacts/zitadel-linux-amd64/zitadel-linux-amd64.tar.gz", diff --git a/Makefile b/Makefile index e728e42b01..6a41683390 100644 --- a/Makefile +++ b/Makefile @@ -63,12 +63,12 @@ endif .PHONY: core_grpc_dependencies core_grpc_dependencies: - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions - go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions - go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions - go install github.com/envoyproxy/protoc-gen-validate@v1.0.4 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions - go install github.com/bufbuild/buf/cmd/buf@v1.34.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.35.1 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions + go install github.com/envoyproxy/protoc-gen-validate@v1.1.0 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions + go install github.com/bufbuild/buf/cmd/buf@v1.45.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions .PHONY: core_api core_api: core_api_generator core_grpc_dependencies diff --git a/internal/crypto/database/database_test.go b/internal/crypto/database/database_test.go index b8217d5ed4..1e3b9c0f22 100644 --- a/internal/crypto/database/database_test.go +++ b/internal/crypto/database/database_test.go @@ -468,17 +468,13 @@ func dbMock(t *testing.T, expectations ...func(m sqlmock.Sqlmock)) db { func expectQueryErr(query string, err error, args ...driver.Value) func(m sqlmock.Sqlmock) { return func(m sqlmock.Sqlmock) { - m.ExpectBegin() m.ExpectQuery(regexp.QuoteMeta(query)).WithArgs(args...).WillReturnError(err) - m.ExpectRollback() } } func expectQueryScanErr(stmt string, cols []string, rows [][]driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) { return func(m sqlmock.Sqlmock) { - m.ExpectBegin() q := m.ExpectQuery(regexp.QuoteMeta(stmt)).WithArgs(args...) - m.ExpectRollback() result := m.NewRows(cols) count := uint64(len(rows)) for _, row := range rows { @@ -494,9 +490,7 @@ func expectQueryScanErr(stmt string, cols []string, rows [][]driver.Value, args func expectQuery(stmt string, cols []string, rows [][]driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) { return func(m sqlmock.Sqlmock) { - m.ExpectBegin() q := m.ExpectQuery(regexp.QuoteMeta(stmt)).WithArgs(args...) - m.ExpectCommit() result := m.NewRows(cols) count := uint64(len(rows)) for _, row := range rows { diff --git a/internal/database/database.go b/internal/database/database.go index c7d73b3b92..d6ccf2873c 100644 --- a/internal/database/database.go +++ b/internal/database/database.go @@ -40,20 +40,7 @@ func (db *DB) Query(scan func(*sql.Rows) error, query string, args ...any) error } func (db *DB) QueryContext(ctx context.Context, scan func(rows *sql.Rows) error, query string, args ...any) (err error) { - tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true, Isolation: sql.LevelReadCommitted}) - if err != nil { - return err - } - defer func() { - if err != nil { - rollbackErr := tx.Rollback() - logging.OnError(rollbackErr).Info("commit of read only transaction failed") - return - } - err = tx.Commit() - }() - - rows, err := tx.QueryContext(ctx, query, args...) + rows, err := db.DB.QueryContext(ctx, query, args...) if err != nil { return err } @@ -73,20 +60,7 @@ func (db *DB) QueryRow(scan func(*sql.Row) error, query string, args ...any) (er } func (db *DB) QueryRowContext(ctx context.Context, scan func(row *sql.Row) error, query string, args ...any) (err error) { - tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true, Isolation: sql.LevelReadCommitted}) - if err != nil { - return err - } - defer func() { - if err != nil { - rollbackErr := tx.Rollback() - logging.OnError(rollbackErr).Info("commit of read only transaction failed") - return - } - err = tx.Commit() - }() - - row := tx.QueryRowContext(ctx, query, args...) + row := db.DB.QueryRowContext(ctx, query, args...) logging.OnError(row.Err()).Error("unexpected query error") err = scan(row) diff --git a/internal/database/database_test.go b/internal/database/database_test.go index 38c76e698a..5a18564e6a 100644 --- a/internal/database/database_test.go +++ b/internal/database/database_test.go @@ -31,7 +31,7 @@ func TestQueryJSONObject(t *testing.T) { { name: "tx error", mock: func(t *testing.T) *mock.SQLMock { - return mock.NewSQLMock(t, mock.ExpectBegin(sql.ErrConnDone)) + return mock.NewSQLMock(t, mock.ExpectQuery("select $1;", mock.WithQueryErr(sql.ErrConnDone))) }, wantErr: zerrors.ThrowInternal(sql.ErrConnDone, "DATAB-Oath6", "Errors.Internal"), }, @@ -39,7 +39,6 @@ func TestQueryJSONObject(t *testing.T) { name: "no rows", mock: func(t *testing.T) *mock.SQLMock { return mock.NewSQLMock(t, - mock.ExpectBegin(nil), mock.ExpectQuery(query, mock.WithQueryArgs(arg), mock.WithQueryResult([]string{"json"}, [][]driver.Value{}), @@ -52,12 +51,10 @@ func TestQueryJSONObject(t *testing.T) { name: "unmarshal error", mock: func(t *testing.T) *mock.SQLMock { return mock.NewSQLMock(t, - mock.ExpectBegin(nil), mock.ExpectQuery(query, mock.WithQueryArgs(arg), mock.WithQueryResult([]string{"json"}, [][]driver.Value{{`~~~`}}), ), - mock.ExpectCommit(nil), ) }, wantErr: zerrors.ThrowInternal(nil, "DATAB-Vohs6", "Errors.Internal"), @@ -66,12 +63,10 @@ func TestQueryJSONObject(t *testing.T) { name: "success", mock: func(t *testing.T) *mock.SQLMock { return mock.NewSQLMock(t, - mock.ExpectBegin(nil), mock.ExpectQuery(query, mock.WithQueryArgs(arg), mock.WithQueryResult([]string{"json"}, [][]driver.Value{{`{"a":1}`}}), ), - mock.ExpectCommit(nil), ) }, want: &dst{A: 1}, diff --git a/internal/eventstore/repository/sql/query_test.go b/internal/eventstore/repository/sql/query_test.go index 5d54b27c21..906c153deb 100644 --- a/internal/eventstore/repository/sql/query_test.go +++ b/internal/eventstore/repository/sql/query_test.go @@ -870,9 +870,7 @@ type dbMock struct { } func (m *dbMock) expectQuery(t *testing.T, expectedQuery string, args []driver.Value, events ...*repository.Event) *dbMock { - m.mock.ExpectBegin() query := m.mock.ExpectQuery(expectedQuery).WithArgs(args...) - m.mock.ExpectCommit() rows := m.mock.NewRows([]string{"sequence"}) for _, event := range events { rows = rows.AddRow(event.Seq) @@ -882,9 +880,7 @@ func (m *dbMock) expectQuery(t *testing.T, expectedQuery string, args []driver.V } func (m *dbMock) expectQueryScanErr(t *testing.T, expectedQuery string, args []driver.Value, events ...*repository.Event) *dbMock { - m.mock.ExpectBegin() query := m.mock.ExpectQuery(expectedQuery).WithArgs(args...) - m.mock.ExpectRollback() rows := m.mock.NewRows([]string{"sequence"}) for _, event := range events { rows = rows.AddRow(event.Seq) @@ -894,7 +890,6 @@ func (m *dbMock) expectQueryScanErr(t *testing.T, expectedQuery string, args []d } func (m *dbMock) expectQueryErr(t *testing.T, expectedQuery string, args []driver.Value, err error) *dbMock { - m.mock.ExpectBegin() m.mock.ExpectQuery(expectedQuery).WithArgs(args...).WillReturnError(err) return m } diff --git a/internal/eventstore/v3/push.go b/internal/eventstore/v3/push.go index a1c66e28e9..f09c3de515 100644 --- a/internal/eventstore/v3/push.go +++ b/internal/eventstore/v3/push.go @@ -14,10 +14,14 @@ import ( "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/eventstore" + "github.com/zitadel/zitadel/internal/telemetry/tracing" "github.com/zitadel/zitadel/internal/zerrors" ) func (es *Eventstore) Push(ctx context.Context, commands ...eventstore.Command) (events []eventstore.Event, err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + tx, err := es.client.BeginTx(ctx, nil) if err != nil { return nil, err @@ -27,18 +31,21 @@ func (es *Eventstore) Push(ctx context.Context, commands ...eventstore.Command) sequences []*latestSequence ) - err = crdb.ExecuteInTx(ctx, &transaction{tx}, func() error { - sequences, err = latestSequences(ctx, tx, commands) + err = crdb.ExecuteInTx(ctx, &transaction{tx}, func() (err error) { + inTxCtx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + sequences, err = latestSequences(inTxCtx, tx, commands) if err != nil { return err } - events, err = insertEvents(ctx, tx, sequences, commands) + events, err = insertEvents(inTxCtx, tx, sequences, commands) if err != nil { return err } - if err = handleUniqueConstraints(ctx, tx, commands); err != nil { + if err = handleUniqueConstraints(inTxCtx, tx, commands); err != nil { return err } @@ -51,7 +58,7 @@ func (es *Eventstore) Push(ctx context.Context, commands ...eventstore.Command) } } - return handleFieldCommands(ctx, tx, commands) + return handleFieldCommands(inTxCtx, tx, commands) }) if err != nil { diff --git a/internal/integration/oidc.go b/internal/integration/oidc.go index 3186ccdaa6..a581abe91b 100644 --- a/internal/integration/oidc.go +++ b/internal/integration/oidc.go @@ -411,5 +411,13 @@ func (i *Instance) CreateOIDCJWTProfileClient(ctx context.Context) (machine *man if err != nil { return nil, "", nil, err } + mustAwait(func() error { + _, err := i.Client.Mgmt.GetMachineKeyByIDs(ctx, &management.GetMachineKeyByIDsRequest{ + UserId: machine.GetUserId(), + KeyId: keyResp.GetKeyId(), + }) + return err + }) + return machine, name, keyResp.GetKeyDetails(), nil } diff --git a/internal/query/device_auth_test.go b/internal/query/device_auth_test.go index ff6a91e222..f81a11b411 100644 --- a/internal/query/device_auth_test.go +++ b/internal/query/device_auth_test.go @@ -57,11 +57,9 @@ func TestQueries_DeviceAuthRequestByUserCode(t *testing.T) { } defer client.Close() - mock.ExpectBegin() mock.ExpectQuery(expectedDeviceAuthWhereUserCodeQuery).WillReturnRows( mock.NewRows(deviceAuthSelectColumns).AddRow(expectedDeviceAuthValues...), ) - mock.ExpectCommit() q := Queries{ client: &database.DB{DB: client}, } diff --git a/internal/query/prepare_test.go b/internal/query/prepare_test.go index 2131a767af..0c0dd6d40c 100644 --- a/internal/query/prepare_test.go +++ b/internal/query/prepare_test.go @@ -82,9 +82,7 @@ type sqlExpectation func(sqlmock.Sqlmock) sqlmock.Sqlmock func mockQuery(stmt string, cols []string, row []driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) sqlmock.Sqlmock { return func(m sqlmock.Sqlmock) sqlmock.Sqlmock { - m.ExpectBegin() q := m.ExpectQuery(stmt).WithArgs(args...) - m.ExpectCommit() result := m.NewRows(cols) if len(row) > 0 { result.AddRow(row...) @@ -96,9 +94,7 @@ func mockQuery(stmt string, cols []string, row []driver.Value, args ...driver.Va func mockQueryScanErr(stmt string, cols []string, row []driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) sqlmock.Sqlmock { return func(m sqlmock.Sqlmock) sqlmock.Sqlmock { - m.ExpectBegin() q := m.ExpectQuery(stmt).WithArgs(args...) - m.ExpectRollback() result := m.NewRows(cols) if len(row) > 0 { result.AddRow(row...) @@ -110,9 +106,7 @@ func mockQueryScanErr(stmt string, cols []string, row []driver.Value, args ...dr func mockQueries(stmt string, cols []string, rows [][]driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) sqlmock.Sqlmock { return func(m sqlmock.Sqlmock) sqlmock.Sqlmock { - m.ExpectBegin() q := m.ExpectQuery(stmt).WithArgs(args...) - m.ExpectCommit() result := m.NewRows(cols) count := uint64(len(rows)) for _, row := range rows { @@ -129,9 +123,7 @@ func mockQueries(stmt string, cols []string, rows [][]driver.Value, args ...driv func mockQueriesScanErr(stmt string, cols []string, rows [][]driver.Value, args ...driver.Value) func(m sqlmock.Sqlmock) sqlmock.Sqlmock { return func(m sqlmock.Sqlmock) sqlmock.Sqlmock { - m.ExpectBegin() q := m.ExpectQuery(stmt).WithArgs(args...) - m.ExpectRollback() result := m.NewRows(cols) count := uint64(len(rows)) for _, row := range rows { @@ -148,10 +140,8 @@ func mockQueriesScanErr(stmt string, cols []string, rows [][]driver.Value, args func mockQueryErr(stmt string, err error, args ...driver.Value) func(m sqlmock.Sqlmock) sqlmock.Sqlmock { return func(m sqlmock.Sqlmock) sqlmock.Sqlmock { - m.ExpectBegin() q := m.ExpectQuery(stmt).WithArgs(args...) q.WillReturnError(err) - m.ExpectRollback() return m } } diff --git a/internal/view/repository/db_mock_test.go b/internal/view/repository/db_mock_test.go index b78d68d4da..a2a73ccae4 100644 --- a/internal/view/repository/db_mock_test.go +++ b/internal/view/repository/db_mock_test.go @@ -176,58 +176,48 @@ func (db *dbMock) expectRollback(err error) *dbMock { func (db *dbMock) expectGetByID(table, key, value string) *dbMock { query := fmt.Sprintf(expectedGetByID, table, key) - db.mock.ExpectBegin() db.mock.ExpectQuery(query). WithArgs(value). WillReturnRows(db.mock.NewRows([]string{key}). AddRow(key)) - db.mock.ExpectCommit() return db } func (db *dbMock) expectGetByIDErr(table, key, value string, err error) *dbMock { query := fmt.Sprintf(expectedGetByID, table, key) - db.mock.ExpectBegin() db.mock.ExpectQuery(query). WithArgs(value). WillReturnError(err) - db.mock.ExpectCommit() return db } func (db *dbMock) expectGetByQuery(table, key, method, value string) *dbMock { query := fmt.Sprintf(expectedGetByQuery, table, key, method) - db.mock.ExpectBegin() db.mock.ExpectQuery(query). WithArgs(value). WillReturnRows(db.mock.NewRows([]string{key}). AddRow(key)) - db.mock.ExpectCommit() return db } func (db *dbMock) expectGetByQueryCaseSensitive(table, key, method, value string) *dbMock { query := fmt.Sprintf(expectedGetByQueryCaseSensitive, table, key, method) - db.mock.ExpectBegin() db.mock.ExpectQuery(query). WithArgs(value). WillReturnRows(db.mock.NewRows([]string{key}). AddRow(key)) - db.mock.ExpectCommit() return db } func (db *dbMock) expectGetByQueryErr(table, key, method, value string, err error) *dbMock { query := fmt.Sprintf(expectedGetByQuery, table, key, method) - db.mock.ExpectBegin() db.mock.ExpectQuery(query). WithArgs(value). WillReturnError(err) - db.mock.ExpectCommit() return db } @@ -324,14 +314,11 @@ func (db *dbMock) expectGetSearchRequestNoParams(table string, resultAmount, tot rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() - db.mock.ExpectQuery(queryCount). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -344,12 +331,10 @@ func (db *dbMock) expectGetSearchRequestWithLimit(table string, limit, resultAmo rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -362,12 +347,10 @@ func (db *dbMock) expectGetSearchRequestWithOffset(table string, offset, resultA rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -380,12 +363,10 @@ func (db *dbMock) expectGetSearchRequestWithSorting(table, sorting string, sorti rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -398,14 +379,12 @@ func (db *dbMock) expectGetSearchRequestWithSearchQuery(table, key, method, valu rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WithArgs(value). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WithArgs(value). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -418,14 +397,12 @@ func (db *dbMock) expectGetSearchRequestWithAllParams(table, key, method, value, rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WithArgs(value). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WithArgs(value). WillReturnRows(rows) - db.mock.ExpectCommit() return db } @@ -438,11 +415,9 @@ func (db *dbMock) expectGetSearchRequestErr(table string, resultAmount, total in rows.AddRow(fmt.Sprintf("hodor-%d", i)) } - db.mock.ExpectBegin() db.mock.ExpectQuery(queryCount). WillReturnRows(db.mock.NewRows([]string{"count"}).AddRow(total)) db.mock.ExpectQuery(query). WillReturnError(err) - db.mock.ExpectCommit() return db } diff --git a/internal/view/repository/query.go b/internal/view/repository/query.go index fc2b6d8ac3..1462f76668 100644 --- a/internal/view/repository/query.go +++ b/internal/view/repository/query.go @@ -1,12 +1,9 @@ package repository import ( - "context" - "database/sql" "fmt" "github.com/jinzhu/gorm" - "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/domain" @@ -51,14 +48,6 @@ func PrepareSearchQuery(table string, request SearchRequest) func(db *gorm.DB, r } } - query = query.BeginTx(context.Background(), &sql.TxOptions{ReadOnly: true}) - defer func() { - if err := query.Commit().Error; err != nil { - logging.OnError(err).Info("commit failed") - } - query.RollbackUnlessCommitted() - }() - query = query.Count(&count) if res == nil { return count, nil diff --git a/internal/view/repository/requests.go b/internal/view/repository/requests.go index 54b4e79c3f..f389dee713 100644 --- a/internal/view/repository/requests.go +++ b/internal/view/repository/requests.go @@ -1,8 +1,6 @@ package repository import ( - "context" - "database/sql" "errors" "fmt" "strings" @@ -24,15 +22,7 @@ func PrepareGetByQuery(table string, queries ...SearchQuery) func(db *gorm.DB, r } } - tx := query.BeginTx(context.Background(), &sql.TxOptions{ReadOnly: true}) - defer func() { - if err := tx.Commit().Error; err != nil { - logging.OnError(err).Info("commit failed") - } - tx.RollbackUnlessCommitted() - }() - - err := tx.Take(res).Error + err := query.Take(res).Error if err == nil { return nil } diff --git a/pkg/grpc/protoc/v2/options.pb.go b/pkg/grpc/protoc/v2/options.pb.go index cdb10700c5..da7e038ee7 100644 --- a/pkg/grpc/protoc/v2/options.pb.go +++ b/pkg/grpc/protoc/v2/options.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: zitadel/protoc_gen_zitadel/v2/options.proto @@ -32,11 +32,9 @@ type Options struct { func (x *Options) Reset() { *x = Options{} - if protoimpl.UnsafeEnabled { - mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Options) String() string { @@ -47,7 +45,7 @@ func (*Options) ProtoMessage() {} func (x *Options) ProtoReflect() protoreflect.Message { mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -87,11 +85,9 @@ type AuthOption struct { func (x *AuthOption) Reset() { *x = AuthOption{} - if protoimpl.UnsafeEnabled { - mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuthOption) String() string { @@ -102,7 +98,7 @@ func (*AuthOption) ProtoMessage() {} func (x *AuthOption) ProtoReflect() protoreflect.Message { mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -141,11 +137,9 @@ type CustomHTTPResponse struct { func (x *CustomHTTPResponse) Reset() { *x = CustomHTTPResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CustomHTTPResponse) String() string { @@ -156,7 +150,7 @@ func (*CustomHTTPResponse) ProtoMessage() {} func (x *CustomHTTPResponse) ProtoReflect() protoreflect.Message { mi := &file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -273,44 +267,6 @@ func file_zitadel_protoc_gen_zitadel_v2_options_proto_init() { if File_zitadel_protoc_gen_zitadel_v2_options_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Options); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*AuthOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_zitadel_protoc_gen_zitadel_v2_options_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*CustomHTTPResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ From 250f2344c8c2292ca9b861cdd12223d0b4719d43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20M=C3=B6hlmann?= Date: Mon, 4 Nov 2024 11:44:51 +0100 Subject: [PATCH 27/30] feat(cache): redis cache (#8822) # Which Problems Are Solved Add a cache implementation using Redis single mode. This does not add support for Redis Cluster or sentinel. # How the Problems Are Solved Added the `internal/cache/redis` package. All operations occur atomically, including setting of secondary indexes, using LUA scripts where needed. The [`miniredis`](https://github.com/alicebob/miniredis) package is used to run unit tests. # Additional Changes - Move connector code to `internal/cache/connector/...` and remove duplicate code from `query` and `command` packages. - Fix a missed invalidation on the restrictions projection # Additional Context Closes #8130 --- .github/workflows/core-integration-test.yml | 4 + Makefile | 2 +- cmd/defaults.yaml | 138 +++- cmd/mirror/projections.go | 13 +- cmd/setup/03.go | 6 +- cmd/setup/config.go | 4 +- cmd/setup/config_change.go | 5 +- cmd/setup/setup.go | 11 +- cmd/start/config.go | 4 +- cmd/start/start.go | 11 +- go.mod | 5 + go.sum | 14 + internal/cache/cache.go | 44 +- internal/cache/connector/connector.go | 69 ++ internal/cache/connector/gomap/connector.go | 23 + internal/cache/{ => connector}/gomap/gomap.go | 8 +- .../cache/{ => connector}/gomap/gomap_test.go | 24 +- internal/cache/{ => connector}/noop/noop.go | 0 internal/cache/connector/pg/connector.go | 28 + .../pg/create_partition.sql.tmpl | 0 internal/cache/{ => connector}/pg/delete.sql | 0 internal/cache/{ => connector}/pg/get.sql | 0 .../cache/{ => connector}/pg/invalidate.sql | 0 internal/cache/{ => connector}/pg/pg.go | 40 +- internal/cache/{ => connector}/pg/pg_test.go | 89 ++- internal/cache/{ => connector}/pg/prune.sql | 0 internal/cache/{ => connector}/pg/set.sql | 0 .../cache/{ => connector}/pg/truncate.sql | 0 internal/cache/connector/redis/_remove.lua | 10 + internal/cache/connector/redis/_select.lua | 3 + internal/cache/connector/redis/_util.lua | 17 + internal/cache/connector/redis/connector.go | 154 ++++ internal/cache/connector/redis/get.lua | 29 + internal/cache/connector/redis/invalidate.lua | 9 + internal/cache/connector/redis/redis.go | 172 +++++ internal/cache/connector/redis/redis_test.go | 714 ++++++++++++++++++ internal/cache/connector/redis/set.lua | 27 + internal/cache/connector_enumer.go | 98 +++ internal/cache/pruner.go | 14 +- internal/cache/pruner_test.go | 2 +- internal/cache/purpose_enumer.go | 82 ++ internal/command/cache.go | 69 +- internal/command/command.go | 7 +- internal/command/instance_test.go | 2 +- internal/command/milestone_test.go | 6 +- .../integration/config/docker-compose.yaml | 6 + internal/integration/config/zitadel.yaml | 16 +- internal/query/cache.go | 72 +- internal/query/instance.go | 3 +- internal/query/query.go | 6 +- 50 files changed, 1767 insertions(+), 293 deletions(-) create mode 100644 internal/cache/connector/connector.go create mode 100644 internal/cache/connector/gomap/connector.go rename internal/cache/{ => connector}/gomap/gomap.go (95%) rename internal/cache/{ => connector}/gomap/gomap_test.go (94%) rename internal/cache/{ => connector}/noop/noop.go (100%) create mode 100644 internal/cache/connector/pg/connector.go rename internal/cache/{ => connector}/pg/create_partition.sql.tmpl (100%) rename internal/cache/{ => connector}/pg/delete.sql (100%) rename internal/cache/{ => connector}/pg/get.sql (100%) rename internal/cache/{ => connector}/pg/invalidate.sql (100%) rename internal/cache/{ => connector}/pg/pg.go (78%) rename internal/cache/{ => connector}/pg/pg_test.go (83%) rename internal/cache/{ => connector}/pg/prune.sql (100%) rename internal/cache/{ => connector}/pg/set.sql (100%) rename internal/cache/{ => connector}/pg/truncate.sql (100%) create mode 100644 internal/cache/connector/redis/_remove.lua create mode 100644 internal/cache/connector/redis/_select.lua create mode 100644 internal/cache/connector/redis/_util.lua create mode 100644 internal/cache/connector/redis/connector.go create mode 100644 internal/cache/connector/redis/get.lua create mode 100644 internal/cache/connector/redis/invalidate.lua create mode 100644 internal/cache/connector/redis/redis.go create mode 100644 internal/cache/connector/redis/redis_test.go create mode 100644 internal/cache/connector/redis/set.lua create mode 100644 internal/cache/connector_enumer.go create mode 100644 internal/cache/purpose_enumer.go diff --git a/.github/workflows/core-integration-test.yml b/.github/workflows/core-integration-test.yml index 2673d4addf..cc9d898f5c 100644 --- a/.github/workflows/core-integration-test.yml +++ b/.github/workflows/core-integration-test.yml @@ -36,6 +36,10 @@ jobs: --health-timeout 5s --health-retries 5 --health-start-period 10s + cache: + image: redis:latest + ports: + - 6379:6379 steps: - uses: actions/checkout@v4 diff --git a/Makefile b/Makefile index 6a41683390..27e76c0614 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,7 @@ core_unit_test: .PHONY: core_integration_db_up core_integration_db_up: - docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} + docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} cache .PHONY: core_integration_db_down core_integration_db_down: diff --git a/cmd/defaults.yaml b/cmd/defaults.yaml index f691fd2af2..8015dd8dad 100644 --- a/cmd/defaults.yaml +++ b/cmd/defaults.yaml @@ -185,34 +185,136 @@ Database: # Caches are EXPERIMENTAL. The following config may have breaking changes in the future. # If no config is provided, caching is disabled by default. -# Caches: +Caches: # Connectors are reused by caches. -# Connectors: + Connectors: # Memory connector works with local server memory. # It is the simplest (and probably fastest) cache implementation. # Unsuitable for deployments with multiple containers, # as each container's cache may hold a different state of the same object. -# Memory: -# Enabled: true + Memory: + Enabled: false # AutoPrune removes invalidated or expired object from the cache. -# AutoPrune: -# Interval: 15m -# TimeOut: 30s + AutoPrune: + Interval: 1m + TimeOut: 5s + Postgres: + Enabled: false + AutoPrune: + Interval: 15m + TimeOut: 30s + Redis: + Enabled: false + # The network type, either tcp or unix. + # Default is tcp. + # Network string + # host:port address. + Addr: localhost:6379 + # ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. + ClientName: ZITADEL_cache + # Use the specified Username to authenticate the current connection + # with one of the connections defined in the ACL list when connecting + # to a Redis 6.0 instance, or greater, that is using the Redis ACL system. + Username: zitadel + # Optional password. Must match the password specified in the + # requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + # or the User Password when connecting to a Redis 6.0 instance, or greater, + # that is using the Redis ACL system. + Password: "" + # Each ZITADEL cache uses an incremental DB namespace. + # This option offsets the first DB so it doesn't conflict with other databases on the same server. + # Note that ZITADEL uses FLUSHDB command to truncate a cache. + # This can have destructive consequences when overlapping DB namespaces are used. + DBOffset: 10 + # Maximum number of retries before giving up. + # Default is 3 retries; -1 (not 0) disables retries. + MaxRetries: 3 + # Minimum backoff between each retry. + # Default is 8 milliseconds; -1 disables backoff. + MinRetryBackoff: 8ms + # Maximum backoff between each retry. + # Default is 512 milliseconds; -1 disables backoff. + MaxRetryBackoff: 512ms + # Dial timeout for establishing new connections. + # Default is 5 seconds. + DialTimeout: 1s + # Timeout for socket reads. If reached, commands will fail + # with a timeout instead of blocking. Supported values: + # - `0` - default timeout (3 seconds). + # - `-1` - no timeout (block indefinitely). + # - `-2` - disables SetReadDeadline calls completely. + ReadTimeout: 100ms + # Timeout for socket writes. If reached, commands will fail + # with a timeout instead of blocking. Supported values: + # - `0` - default timeout (3 seconds). + # - `-1` - no timeout (block indefinitely). + # - `-2` - disables SetWriteDeadline calls completely. + WriteTimeout: 100ms + # Type of connection pool. + # true for FIFO pool, false for LIFO pool. + # Note that FIFO has slightly higher overhead compared to LIFO, + # but it helps closing idle connections faster reducing the pool size. + PoolFIFO: false + # Base number of socket connections. + # Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. + # If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, + # you can limit it through MaxActiveConns + PoolSize: 20 + # Amount of time client waits for connection if all connections + # are busy before returning an error. + # Default is ReadTimeout + 1 second. + PoolTimeout: 100ms + # Minimum number of idle connections which is useful when establishing + # new connection is slow. + # Default is 0. the idle connections are not closed by default. + MinIdleConns: 5 + # Maximum number of idle connections. + # Default is 0. the idle connections are not closed by default. + MaxIdleConns: 10 + # Maximum number of connections allocated by the pool at a given time. + # When zero, there is no limit on the number of connections in the pool. + MaxActiveConns: 40 + # ConnMaxIdleTime is the maximum amount of time a connection may be idle. + # Should be less than server's timeout. + # Expired connections may be closed lazily before reuse. + # If d <= 0, connections are not closed due to a connection's idle time. + # Default is 30 minutes. -1 disables idle timeout check. + ConnMaxIdleTime: 30m + # ConnMaxLifetime is the maximum amount of time a connection may be reused. + # Expired connections may be closed lazily before reuse. + # If <= 0, connections are not closed due to a connection's age. + # Default is to not close idle connections. + ConnMaxLifetime: -1 + # Enable TLS server authentication using the default system bundle. + EnableTLS: false + # Disable set-lib on connect. Default is false. + DisableIndentity: false + # Add suffix to client name. Default is empty. + IdentitySuffix: "" # Instance caches auth middleware instances, gettable by domain or ID. -# Instance: + Instance: # Connector must be enabled above. # When connector is empty, this cache will be disabled. -# Connector: "memory" -# MaxAge: 1h -# LastUsage: 10m -# - # Log enables cache-specific logging. Default to error log to stdout when omitted. -# Log: -# Level: debug -# AddSource: true -# Formatter: -# Format: text + Connector: "" + MaxAge: 1h + LastUsage: 10m + # Log enables cache-specific logging. Default to error log to stderr when omitted. + Log: + Level: error + AddSource: true + Formatter: + Format: text + # Milestones caches instance milestone state, gettable by instance ID + Milestones: + Connector: "" + MaxAge: 1h + LastUsage: 10m + Log: + Level: error + AddSource: true + Formatter: + Format: text Machine: # Cloud-hosted VMs need to specify their metadata endpoint so that the machine can be uniquely identified. diff --git a/cmd/mirror/projections.go b/cmd/mirror/projections.go index 9b7ec02cb8..cffc4921ca 100644 --- a/cmd/mirror/projections.go +++ b/cmd/mirror/projections.go @@ -25,7 +25,7 @@ import ( auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" "github.com/zitadel/zitadel/internal/authz" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" - "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/systemdefaults" crypto_db "github.com/zitadel/zitadel/internal/crypto/database" @@ -72,7 +72,7 @@ type ProjectionsConfig struct { EncryptionKeys *encryption.EncryptionKeyConfig SystemAPIUsers map[string]*internal_authz.SystemAPIUser Eventstore *eventstore.Config - Caches *cache.CachesConfig + Caches *connector.CachesConfig Admin admin_es.Config Auth auth_es.Config @@ -128,13 +128,16 @@ func projections( sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) + cacheConnectors, err := connector.StartConnectors(config.Caches, client) + logging.OnError(err).Fatal("unable to start caches") + queries, err := query.StartQueries( ctx, es, esV4.Querier, client, client, - config.Caches, + cacheConnectors, config.Projections, config.SystemDefaults, keys.IDPConfig, @@ -161,9 +164,9 @@ func projections( DisplayName: config.WebAuthNName, ExternalSecure: config.ExternalSecure, } - commands, err := command.StartCommands( + commands, err := command.StartCommands(ctx, es, - config.Caches, + cacheConnectors, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, staticStorage, diff --git a/cmd/setup/03.go b/cmd/setup/03.go index 4311418388..4d4231ea9c 100644 --- a/cmd/setup/03.go +++ b/cmd/setup/03.go @@ -9,6 +9,7 @@ import ( "golang.org/x/text/language" "github.com/zitadel/zitadel/internal/api/authz" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/crypto" @@ -64,8 +65,9 @@ func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error return err } - cmd, err := command.StartCommands(mig.es, - nil, + cmd, err := command.StartCommands(ctx, + mig.es, + connector.Connectors{}, mig.defaults, mig.zitadelRoles, nil, diff --git a/cmd/setup/config.go b/cmd/setup/config.go index 09044456ea..57681c8bc1 100644 --- a/cmd/setup/config.go +++ b/cmd/setup/config.go @@ -15,7 +15,7 @@ import ( internal_authz "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/oidc" "github.com/zitadel/zitadel/internal/api/ui/login" - "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/hook" "github.com/zitadel/zitadel/internal/config/systemdefaults" @@ -31,7 +31,7 @@ import ( type Config struct { ForMirror bool Database database.Config - Caches *cache.CachesConfig + Caches *connector.CachesConfig SystemDefaults systemdefaults.SystemDefaults InternalAuthZ internal_authz.Config ExternalDomain string diff --git a/cmd/setup/config_change.go b/cmd/setup/config_change.go index 08f0c3c3d6..f38508af2c 100644 --- a/cmd/setup/config_change.go +++ b/cmd/setup/config_change.go @@ -3,6 +3,7 @@ package setup import ( "context" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/eventstore" @@ -31,9 +32,9 @@ func (mig *externalConfigChange) Check(lastRun map[string]interface{}) bool { } func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error { - cmd, err := command.StartCommands( + cmd, err := command.StartCommands(ctx, mig.es, - nil, + connector.Connectors{}, mig.defaults, nil, nil, diff --git a/cmd/setup/setup.go b/cmd/setup/setup.go index 7ffef5e853..e0784654b1 100644 --- a/cmd/setup/setup.go +++ b/cmd/setup/setup.go @@ -22,6 +22,7 @@ import ( auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" "github.com/zitadel/zitadel/internal/authz" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" cryptoDB "github.com/zitadel/zitadel/internal/crypto/database" "github.com/zitadel/zitadel/internal/database" @@ -346,13 +347,17 @@ func initProjections( } sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) + + cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient) + logging.OnError(err).Fatal("unable to start caches") + queries, err := query.StartQueries( ctx, eventstoreClient, eventstoreV4.Querier, queryDBClient, projectionDBClient, - config.Caches, + cacheConnectors, config.Projections, config.SystemDefaults, keys.IDPConfig, @@ -394,9 +399,9 @@ func initProjections( permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) { return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) } - commands, err := command.StartCommands( + commands, err := command.StartCommands(ctx, eventstoreClient, - config.Caches, + cacheConnectors, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, staticStorage, diff --git a/cmd/start/config.go b/cmd/start/config.go index ea432e6296..26c4b84b50 100644 --- a/cmd/start/config.go +++ b/cmd/start/config.go @@ -18,7 +18,7 @@ import ( "github.com/zitadel/zitadel/internal/api/ui/console" "github.com/zitadel/zitadel/internal/api/ui/login" auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing" - "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/config/hook" "github.com/zitadel/zitadel/internal/config/network" @@ -49,7 +49,7 @@ type Config struct { HTTP1HostHeader string WebAuthNName string Database database.Config - Caches *cache.CachesConfig + Caches *connector.CachesConfig Tracing tracing.Config Metrics metrics.Config Profiler profiler.Config diff --git a/cmd/start/start.go b/cmd/start/start.go index 8de1105307..e816b5bb52 100644 --- a/cmd/start/start.go +++ b/cmd/start/start.go @@ -69,6 +69,7 @@ import ( "github.com/zitadel/zitadel/internal/authz" authz_repo "github.com/zitadel/zitadel/internal/authz/repository" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/crypto" cryptoDB "github.com/zitadel/zitadel/internal/crypto/database" @@ -177,6 +178,10 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server })) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) + cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient) + if err != nil { + return fmt.Errorf("unable to start caches: %w", err) + } queries, err := query.StartQueries( ctx, @@ -184,7 +189,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server eventstoreV4.Querier, queryDBClient, projectionDBClient, - config.Caches, + cacheConnectors, config.Projections, config.SystemDefaults, keys.IDPConfig, @@ -222,9 +227,9 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server DisplayName: config.WebAuthNName, ExternalSecure: config.ExternalSecure, } - commands, err := command.StartCommands( + commands, err := command.StartCommands(ctx, eventstoreClient, - config.Caches, + cacheConnectors, config.SystemDefaults, config.InternalAuthZ.RolePermissionMappings, storage, diff --git a/go.mod b/go.mod index 1e4f67eb7d..cf4e755605 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.24.0 github.com/Masterminds/squirrel v1.5.4 github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b + github.com/alicebob/miniredis/v2 v2.33.0 github.com/benbjohnson/clock v1.3.5 github.com/boombuler/barcode v1.0.2 github.com/brianvoe/gofakeit/v6 v6.28.0 @@ -52,6 +53,7 @@ require ( github.com/pashagolub/pgxmock/v4 v4.3.0 github.com/pquerna/otp v1.4.0 github.com/rakyll/statik v0.1.7 + github.com/redis/go-redis/v9 v9.7.0 github.com/rs/cors v1.11.1 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/sony/sonyflake v1.2.0 @@ -94,8 +96,10 @@ require ( cloud.google.com/go/auth v0.6.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect + github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/crewjam/httperr v0.2.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -121,6 +125,7 @@ require ( github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect github.com/zenazn/goji v1.0.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index 8645aa8417..015fea1b80 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/amdonov/xmlsig v0.1.0 h1:i0iQ3neKLmUhcfIRgiiR3eRPKgXZj+n5lAfqnfKoeXI= github.com/amdonov/xmlsig v0.1.0/go.mod h1:jTR/jO0E8fSl/cLvMesP+RjxyV4Ux4WL1Ip64ZnQpA0= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= @@ -87,6 +91,10 @@ github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMc github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -127,6 +135,8 @@ github.com/descope/virtualwebauthn v1.0.2/go.mod h1:iJvinjD1iZYqQ09J5lF0+795OdDb github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg= @@ -620,6 +630,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -719,6 +731,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8= github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= diff --git a/internal/cache/cache.go b/internal/cache/cache.go index c6d01b928e..9e92f50988 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -6,8 +6,16 @@ import ( "time" "github.com/zitadel/logging" +) - "github.com/zitadel/zitadel/internal/database/postgres" +// Purpose describes which object types are stored by a cache. +type Purpose int + +//go:generate enumer -type Purpose -transform snake -trimprefix Purpose +const ( + PurposeUnspecified Purpose = iota + PurposeAuthzInstance + PurposeMilestones ) // Cache stores objects with a value of type `V`. @@ -72,18 +80,19 @@ type Entry[I, K comparable] interface { Keys(index I) (key []K) } -type CachesConfig struct { - Connectors struct { - Memory MemoryConnectorConfig - Postgres PostgresConnectorConfig - // Redis redis.Config? - } - Instance *CacheConfig - Milestones *CacheConfig -} +type Connector int -type CacheConfig struct { - Connector string +//go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text +const ( + // Empty line comment ensures empty string for unspecified value + ConnectorUnspecified Connector = iota // + ConnectorMemory + ConnectorPostgres + ConnectorRedis +) + +type Config struct { + Connector Connector // Age since an object was added to the cache, // after which the object is considered invalid. @@ -99,14 +108,3 @@ type CacheConfig struct { // By default only errors are logged to stdout. Log *logging.Config } - -type MemoryConnectorConfig struct { - Enabled bool - AutoPrune AutoPruneConfig -} - -type PostgresConnectorConfig struct { - Enabled bool - AutoPrune AutoPruneConfig - Connection postgres.Config -} diff --git a/internal/cache/connector/connector.go b/internal/cache/connector/connector.go new file mode 100644 index 0000000000..0c4fb9ccc6 --- /dev/null +++ b/internal/cache/connector/connector.go @@ -0,0 +1,69 @@ +// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages. +package connector + +import ( + "context" + "fmt" + + "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector/gomap" + "github.com/zitadel/zitadel/internal/cache/connector/noop" + "github.com/zitadel/zitadel/internal/cache/connector/pg" + "github.com/zitadel/zitadel/internal/cache/connector/redis" + "github.com/zitadel/zitadel/internal/database" +) + +type CachesConfig struct { + Connectors struct { + Memory gomap.Config + Postgres pg.Config + Redis redis.Config + } + Instance *cache.Config + Milestones *cache.Config +} + +type Connectors struct { + Config CachesConfig + Memory *gomap.Connector + Postgres *pg.Connector + Redis *redis.Connector +} + +func StartConnectors(conf *CachesConfig, client *database.DB) (Connectors, error) { + if conf == nil { + return Connectors{}, nil + } + return Connectors{ + Config: *conf, + Memory: gomap.NewConnector(conf.Connectors.Memory), + Postgres: pg.NewConnector(conf.Connectors.Postgres, client), + Redis: redis.NewConnector(conf.Connectors.Redis), + }, nil +} + +func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) { + if conf == nil || conf.Connector == cache.ConnectorUnspecified { + return noop.NewCache[I, K, V](), nil + } + if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil { + c := gomap.NewCache[I, K, V](background, indices, *conf) + connectors.Memory.Config.StartAutoPrune(background, c, purpose) + return c, nil + } + if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil { + c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres) + if err != nil { + return nil, fmt.Errorf("start cache: %w", err) + } + connectors.Postgres.Config.AutoPrune.StartAutoPrune(background, c, purpose) + return c, nil + } + if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil { + db := connectors.Redis.Config.DBOffset + int(purpose) + c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices) + return c, nil + } + + return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector) +} diff --git a/internal/cache/connector/gomap/connector.go b/internal/cache/connector/gomap/connector.go new file mode 100644 index 0000000000..7ed09c7a72 --- /dev/null +++ b/internal/cache/connector/gomap/connector.go @@ -0,0 +1,23 @@ +package gomap + +import ( + "github.com/zitadel/zitadel/internal/cache" +) + +type Config struct { + Enabled bool + AutoPrune cache.AutoPruneConfig +} + +type Connector struct { + Config cache.AutoPruneConfig +} + +func NewConnector(config Config) *Connector { + if !config.Enabled { + return nil + } + return &Connector{ + Config: config.AutoPrune, + } +} diff --git a/internal/cache/gomap/gomap.go b/internal/cache/connector/gomap/gomap.go similarity index 95% rename from internal/cache/gomap/gomap.go rename to internal/cache/connector/gomap/gomap.go index 160fe4e315..dff9f04143 100644 --- a/internal/cache/gomap/gomap.go +++ b/internal/cache/connector/gomap/gomap.go @@ -14,14 +14,14 @@ import ( ) type mapCache[I, K comparable, V cache.Entry[I, K]] struct { - config *cache.CacheConfig + config *cache.Config indexMap map[I]*index[K, V] logger *slog.Logger } // NewCache returns an in-memory Cache implementation based on the builtin go map type. // Object values are stored as-is and there is no encoding or decoding involved. -func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.CacheConfig) cache.PrunerCache[I, K, V] { +func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] { m := &mapCache[I, K, V]{ config: &config, indexMap: make(map[I]*index[K, V], len(indices)), @@ -116,7 +116,7 @@ func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error { type index[K comparable, V any] struct { mutex sync.RWMutex - config *cache.CacheConfig + config *cache.Config entries map[K]*entry[V] } @@ -177,7 +177,7 @@ type entry[V any] struct { lastUse atomic.Int64 // UnixMicro time } -func (e *entry[V]) isValid(c *cache.CacheConfig) bool { +func (e *entry[V]) isValid(c *cache.Config) bool { if e.invalid.Load() { return false } diff --git a/internal/cache/gomap/gomap_test.go b/internal/cache/connector/gomap/gomap_test.go similarity index 94% rename from internal/cache/gomap/gomap_test.go rename to internal/cache/connector/gomap/gomap_test.go index 7f41900833..810788b554 100644 --- a/internal/cache/gomap/gomap_test.go +++ b/internal/cache/connector/gomap/gomap_test.go @@ -41,7 +41,7 @@ func (o *testObject) Keys(index testIndex) []string { } func Test_mapCache_Get(t *testing.T) { - c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ + c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{ MaxAge: time.Second, LastUseAge: time.Second / 4, Log: &logging.Config{ @@ -103,7 +103,7 @@ func Test_mapCache_Get(t *testing.T) { } func Test_mapCache_Invalidate(t *testing.T) { - c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ + c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{ MaxAge: time.Second, LastUseAge: time.Second / 4, Log: &logging.Config{ @@ -124,7 +124,7 @@ func Test_mapCache_Invalidate(t *testing.T) { } func Test_mapCache_Delete(t *testing.T) { - c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ + c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{ MaxAge: time.Second, LastUseAge: time.Second / 4, Log: &logging.Config{ @@ -157,7 +157,7 @@ func Test_mapCache_Delete(t *testing.T) { } func Test_mapCache_Prune(t *testing.T) { - c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ + c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{ MaxAge: time.Second, LastUseAge: time.Second / 4, Log: &logging.Config{ @@ -193,7 +193,7 @@ func Test_mapCache_Prune(t *testing.T) { } func Test_mapCache_Truncate(t *testing.T) { - c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ + c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{ MaxAge: time.Second, LastUseAge: time.Second / 4, Log: &logging.Config{ @@ -235,7 +235,7 @@ func Test_entry_isValid(t *testing.T) { tests := []struct { name string fields fields - config *cache.CacheConfig + config *cache.Config want bool }{ { @@ -245,7 +245,7 @@ func Test_entry_isValid(t *testing.T) { invalid: true, lastUse: time.Now(), }, - config: &cache.CacheConfig{ + config: &cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -258,7 +258,7 @@ func Test_entry_isValid(t *testing.T) { invalid: false, lastUse: time.Now(), }, - config: &cache.CacheConfig{ + config: &cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -271,7 +271,7 @@ func Test_entry_isValid(t *testing.T) { invalid: false, lastUse: time.Now(), }, - config: &cache.CacheConfig{ + config: &cache.Config{ LastUseAge: time.Second, }, want: true, @@ -283,7 +283,7 @@ func Test_entry_isValid(t *testing.T) { invalid: false, lastUse: time.Now().Add(-(time.Second * 2)), }, - config: &cache.CacheConfig{ + config: &cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -296,7 +296,7 @@ func Test_entry_isValid(t *testing.T) { invalid: false, lastUse: time.Now().Add(-(time.Second * 2)), }, - config: &cache.CacheConfig{ + config: &cache.Config{ MaxAge: time.Minute, }, want: true, @@ -308,7 +308,7 @@ func Test_entry_isValid(t *testing.T) { invalid: false, lastUse: time.Now(), }, - config: &cache.CacheConfig{ + config: &cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, diff --git a/internal/cache/noop/noop.go b/internal/cache/connector/noop/noop.go similarity index 100% rename from internal/cache/noop/noop.go rename to internal/cache/connector/noop/noop.go diff --git a/internal/cache/connector/pg/connector.go b/internal/cache/connector/pg/connector.go new file mode 100644 index 0000000000..9a89cf5f6a --- /dev/null +++ b/internal/cache/connector/pg/connector.go @@ -0,0 +1,28 @@ +package pg + +import ( + "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/database" +) + +type Config struct { + Enabled bool + AutoPrune cache.AutoPruneConfig +} + +type Connector struct { + PGXPool + Dialect string + Config Config +} + +func NewConnector(config Config, client *database.DB) *Connector { + if !config.Enabled { + return nil + } + return &Connector{ + PGXPool: client.Pool, + Dialect: client.Type(), + Config: config, + } +} diff --git a/internal/cache/pg/create_partition.sql.tmpl b/internal/cache/connector/pg/create_partition.sql.tmpl similarity index 100% rename from internal/cache/pg/create_partition.sql.tmpl rename to internal/cache/connector/pg/create_partition.sql.tmpl diff --git a/internal/cache/pg/delete.sql b/internal/cache/connector/pg/delete.sql similarity index 100% rename from internal/cache/pg/delete.sql rename to internal/cache/connector/pg/delete.sql diff --git a/internal/cache/pg/get.sql b/internal/cache/connector/pg/get.sql similarity index 100% rename from internal/cache/pg/get.sql rename to internal/cache/connector/pg/get.sql diff --git a/internal/cache/pg/invalidate.sql b/internal/cache/connector/pg/invalidate.sql similarity index 100% rename from internal/cache/pg/invalidate.sql rename to internal/cache/connector/pg/invalidate.sql diff --git a/internal/cache/pg/pg.go b/internal/cache/connector/pg/pg.go similarity index 78% rename from internal/cache/pg/pg.go rename to internal/cache/connector/pg/pg.go index aee0315327..18215b68ed 100644 --- a/internal/cache/pg/pg.go +++ b/internal/cache/connector/pg/pg.go @@ -40,25 +40,25 @@ type PGXPool interface { } type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct { - name string - config *cache.CacheConfig - indices []I - pool PGXPool - logger *slog.Logger + purpose cache.Purpose + config *cache.Config + indices []I + connector *Connector + logger *slog.Logger } // NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables. -func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name string, config cache.CacheConfig, indices []I, pool PGXPool, dialect string) (cache.PrunerCache[I, K, V], error) { +func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) { c := &pgCache[I, K, V]{ - name: name, - config: &config, - indices: indices, - pool: pool, - logger: config.Log.Slog().With("cache_name", name), + purpose: purpose, + config: &config, + indices: indices, + connector: connector, + logger: config.Log.Slog().With("cache_purpose", purpose), } c.logger.InfoContext(ctx, "pg cache logging enabled") - if dialect == "postgres" { + if connector.Dialect == "postgres" { if err := c.createPartition(ctx); err != nil { return nil, err } @@ -68,10 +68,10 @@ func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error { var query strings.Builder - if err := createPartitionTmpl.Execute(&query, c.name); err != nil { + if err := createPartitionTmpl.Execute(&query, c.purpose.String()); err != nil { return err } - _, err := c.pool.Exec(ctx, query.String()) + _, err := c.connector.Exec(ctx, query.String()) return err } @@ -87,7 +87,7 @@ func (c *pgCache[I, K, V]) set(ctx context.Context, entry V) (err error) { keys := c.indexKeysFromEntry(entry) c.logger.DebugContext(ctx, "pg cache set", "index_key", keys) - _, err = c.pool.Exec(ctx, setQuery, c.name, keys, entry) + _, err = c.connector.Exec(ctx, setQuery, c.purpose.String(), keys, entry) if err != nil { c.logger.ErrorContext(ctx, "pg cache set", "err", err) return err @@ -117,7 +117,7 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er if !slices.Contains(c.indices, index) { return value, cache.NewIndexUnknownErr(index) } - err = c.pool.QueryRow(ctx, getQuery, c.name, index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value) + err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value) return value, err } @@ -125,7 +125,7 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) ( ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() - _, err = c.pool.Exec(ctx, invalidateQuery, c.name, index, keys) + _, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys) c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys) return err } @@ -134,7 +134,7 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() - _, err = c.pool.Exec(ctx, deleteQuery, c.name, index, keys) + _, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys) c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys) return err } @@ -143,7 +143,7 @@ func (c *pgCache[I, K, V]) Prune(ctx context.Context) (err error) { ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() - _, err = c.pool.Exec(ctx, pruneQuery, c.name, c.config.MaxAge, c.config.LastUseAge) + _, err = c.connector.Exec(ctx, pruneQuery, c.purpose.String(), c.config.MaxAge, c.config.LastUseAge) c.logger.DebugContext(ctx, "pg cache prune") return err } @@ -152,7 +152,7 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) { ctx, span := tracing.NewSpan(ctx) defer func() { span.EndWithError(err) }() - _, err = c.pool.Exec(ctx, truncateQuery, c.name) + _, err = c.connector.Exec(ctx, truncateQuery, c.purpose.String()) c.logger.DebugContext(ctx, "pg cache truncate") return err } diff --git a/internal/cache/pg/pg_test.go b/internal/cache/connector/pg/pg_test.go similarity index 83% rename from internal/cache/pg/pg_test.go rename to internal/cache/connector/pg/pg_test.go index 9206a220f2..f5980ad845 100644 --- a/internal/cache/pg/pg_test.go +++ b/internal/cache/connector/pg/pg_test.go @@ -67,7 +67,7 @@ func TestNewCache(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - conf := cache.CacheConfig{ + conf := cache.Config{ Log: &logging.Config{ Level: "debug", AddSource: true, @@ -76,8 +76,12 @@ func TestNewCache(t *testing.T) { pool, err := pgxmock.NewPool() require.NoError(t, err) tt.expect(pool) + connector := &Connector{ + PGXPool: pool, + Dialect: "postgres", + } - c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres") + c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector) require.ErrorIs(t, err, tt.wantErr) if tt.wantErr == nil { assert.NotNil(t, c) @@ -111,7 +115,7 @@ func Test_pgCache_Set(t *testing.T) { }, expect: func(ppi pgxmock.PgxCommonIface) { ppi.ExpectExec(queryExpect). - WithArgs("test", + WithArgs(cachePurpose.String(), []indexKey[testIndex, string]{ {IndexID: testIndexID, IndexKey: "id1"}, {IndexID: testIndexName, IndexKey: "foo"}, @@ -135,7 +139,7 @@ func Test_pgCache_Set(t *testing.T) { }, expect: func(ppi pgxmock.PgxCommonIface) { ppi.ExpectExec(queryExpect). - WithArgs("test", + WithArgs(cachePurpose.String(), []indexKey[testIndex, string]{ {IndexID: testIndexID, IndexKey: "id1"}, {IndexID: testIndexName, IndexKey: "foo"}, @@ -151,7 +155,7 @@ func Test_pgCache_Set(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c, pool := prepareCache(t, cache.CacheConfig{}) + c, pool := prepareCache(t, cache.Config{}) defer pool.Close() tt.expect(pool) @@ -173,7 +177,7 @@ func Test_pgCache_Get(t *testing.T) { } tests := []struct { name string - config cache.CacheConfig + config cache.Config args args expect func(pgxmock.PgxCommonIface) want *testObject @@ -181,7 +185,7 @@ func Test_pgCache_Get(t *testing.T) { }{ { name: "invalid index", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -194,7 +198,7 @@ func Test_pgCache_Get(t *testing.T) { }, { name: "no rows", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, @@ -204,14 +208,14 @@ func Test_pgCache_Get(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectQuery(queryExpect). - WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)). + WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)). WillReturnRows(pgxmock.NewRows([]string{"payload"})) }, wantOk: false, }, { name: "error", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, @@ -221,14 +225,14 @@ func Test_pgCache_Get(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectQuery(queryExpect). - WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)). + WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)). WillReturnError(pgx.ErrTxClosed) }, wantOk: false, }, { name: "ok", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -238,7 +242,7 @@ func Test_pgCache_Get(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectQuery(queryExpect). - WithArgs("test", testIndexID, "id1", time.Minute, time.Second). + WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second). WillReturnRows( pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{ ID: "id1", @@ -276,14 +280,14 @@ func Test_pgCache_Invalidate(t *testing.T) { } tests := []struct { name string - config cache.CacheConfig + config cache.Config args args expect func(pgxmock.PgxCommonIface) wantErr error }{ { name: "error", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, @@ -293,14 +297,14 @@ func Test_pgCache_Invalidate(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", testIndexID, []string{"id1", "id2"}). + WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}). WillReturnError(pgx.ErrTxClosed) }, wantErr: pgx.ErrTxClosed, }, { name: "ok", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -310,7 +314,7 @@ func Test_pgCache_Invalidate(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", testIndexID, []string{"id1", "id2"}). + WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}). WillReturnResult(pgxmock.NewResult("DELETE", 1)) }, }, @@ -338,14 +342,14 @@ func Test_pgCache_Delete(t *testing.T) { } tests := []struct { name string - config cache.CacheConfig + config cache.Config args args expect func(pgxmock.PgxCommonIface) wantErr error }{ { name: "error", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, @@ -355,14 +359,14 @@ func Test_pgCache_Delete(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", testIndexID, []string{"id1", "id2"}). + WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}). WillReturnError(pgx.ErrTxClosed) }, wantErr: pgx.ErrTxClosed, }, { name: "ok", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, @@ -372,7 +376,7 @@ func Test_pgCache_Delete(t *testing.T) { }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", testIndexID, []string{"id1", "id2"}). + WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}). WillReturnResult(pgxmock.NewResult("DELETE", 1)) }, }, @@ -396,32 +400,32 @@ func Test_pgCache_Prune(t *testing.T) { queryExpect := regexp.QuoteMeta(pruneQuery) tests := []struct { name string - config cache.CacheConfig + config cache.Config expect func(pgxmock.PgxCommonIface) wantErr error }{ { name: "error", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", time.Duration(0), time.Duration(0)). + WithArgs(cachePurpose.String(), time.Duration(0), time.Duration(0)). WillReturnError(pgx.ErrTxClosed) }, wantErr: pgx.ErrTxClosed, }, { name: "ok", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test", time.Minute, time.Second). + WithArgs(cachePurpose.String(), time.Minute, time.Second). WillReturnResult(pgxmock.NewResult("DELETE", 1)) }, }, @@ -445,32 +449,32 @@ func Test_pgCache_Truncate(t *testing.T) { queryExpect := regexp.QuoteMeta(truncateQuery) tests := []struct { name string - config cache.CacheConfig + config cache.Config expect func(pgxmock.PgxCommonIface) wantErr error }{ { name: "error", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: 0, LastUseAge: 0, }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test"). + WithArgs(cachePurpose.String()). WillReturnError(pgx.ErrTxClosed) }, wantErr: pgx.ErrTxClosed, }, { name: "ok", - config: cache.CacheConfig{ + config: cache.Config{ MaxAge: time.Minute, LastUseAge: time.Second, }, expect: func(pci pgxmock.PgxCommonIface) { pci.ExpectExec(queryExpect). - WithArgs("test"). + WithArgs(cachePurpose.String()). WillReturnResult(pgxmock.NewResult("DELETE", 1)) }, }, @@ -491,18 +495,18 @@ func Test_pgCache_Truncate(t *testing.T) { } const ( - cacheName = "test" - expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_test + cachePurpose = cache.PurposeAuthzInstance + expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_authz_instance partition of cache.objects -for values in ('test'); +for values in ('authz_instance'); -create unlogged table if not exists cache.string_keys_test +create unlogged table if not exists cache.string_keys_authz_instance partition of cache.string_keys -for values in ('test'); +for values in ('authz_instance'); ` ) -func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) { +func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) { conf.Log = &logging.Config{ Level: "debug", AddSource: true, @@ -512,8 +516,11 @@ func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testI pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)). WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0)) - - c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres") + connector := &Connector{ + PGXPool: pool, + Dialect: "postgres", + } + c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector) require.NoError(t, err) return c, pool } diff --git a/internal/cache/pg/prune.sql b/internal/cache/connector/pg/prune.sql similarity index 100% rename from internal/cache/pg/prune.sql rename to internal/cache/connector/pg/prune.sql diff --git a/internal/cache/pg/set.sql b/internal/cache/connector/pg/set.sql similarity index 100% rename from internal/cache/pg/set.sql rename to internal/cache/connector/pg/set.sql diff --git a/internal/cache/pg/truncate.sql b/internal/cache/connector/pg/truncate.sql similarity index 100% rename from internal/cache/pg/truncate.sql rename to internal/cache/connector/pg/truncate.sql diff --git a/internal/cache/connector/redis/_remove.lua b/internal/cache/connector/redis/_remove.lua new file mode 100644 index 0000000000..cbd7f5a797 --- /dev/null +++ b/internal/cache/connector/redis/_remove.lua @@ -0,0 +1,10 @@ +local function remove(object_id) + local setKey = keySetKey(object_id) + local keys = redis.call("SMEMBERS", setKey) + local n = #keys + for i = 1, n do + redis.call("DEL", keys[i]) + end + redis.call("DEL", setKey) + redis.call("DEL", object_id) +end diff --git a/internal/cache/connector/redis/_select.lua b/internal/cache/connector/redis/_select.lua new file mode 100644 index 0000000000..d87bb3f5c0 --- /dev/null +++ b/internal/cache/connector/redis/_select.lua @@ -0,0 +1,3 @@ +-- SELECT ensures the DB namespace for each script. +-- When used, it consumes the first ARGV entry. +redis.call("SELECT", ARGV[1]) diff --git a/internal/cache/connector/redis/_util.lua b/internal/cache/connector/redis/_util.lua new file mode 100644 index 0000000000..4563c3df6e --- /dev/null +++ b/internal/cache/connector/redis/_util.lua @@ -0,0 +1,17 @@ +-- keySetKey returns the redis key of the set containing all keys to the object. +local function keySetKey (object_id) + return object_id .. "-keys" +end + +local function getTime() + return tonumber(redis.call('TIME')[1]) +end + +-- getCall wrapts redis.call so a nil is returned instead of false. +local function getCall (...) + local result = redis.call(...) + if result == false then + return nil + end + return result +end diff --git a/internal/cache/connector/redis/connector.go b/internal/cache/connector/redis/connector.go new file mode 100644 index 0000000000..2d0498dfa0 --- /dev/null +++ b/internal/cache/connector/redis/connector.go @@ -0,0 +1,154 @@ +package redis + +import ( + "crypto/tls" + "time" + + "github.com/redis/go-redis/v9" +) + +type Config struct { + Enabled bool + + // The network type, either tcp or unix. + // Default is tcp. + Network string + // host:port address. + Addr string + // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. + ClientName string + // Use the specified Username to authenticate the current connection + // with one of the connections defined in the ACL list when connecting + // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. + Username string + // Optional password. Must match the password specified in the + // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + // or the User Password when connecting to a Redis 6.0 instance, or greater, + // that is using the Redis ACL system. + Password string + // Each ZITADEL cache uses an incremental DB namespace. + // This option offsets the first DB so it doesn't conflict with other databases on the same server. + // Note that ZITADEL uses FLUSHDB command to truncate a cache. + // This can have destructive consequences when overlapping DB namespaces are used. + DBOffset int + + // Maximum number of retries before giving up. + // Default is 3 retries; -1 (not 0) disables retries. + MaxRetries int + // Minimum backoff between each retry. + // Default is 8 milliseconds; -1 disables backoff. + MinRetryBackoff time.Duration + // Maximum backoff between each retry. + // Default is 512 milliseconds; -1 disables backoff. + MaxRetryBackoff time.Duration + + // Dial timeout for establishing new connections. + // Default is 5 seconds. + DialTimeout time.Duration + // Timeout for socket reads. If reached, commands will fail + // with a timeout instead of blocking. Supported values: + // - `0` - default timeout (3 seconds). + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetReadDeadline calls completely. + ReadTimeout time.Duration + // Timeout for socket writes. If reached, commands will fail + // with a timeout instead of blocking. Supported values: + // - `0` - default timeout (3 seconds). + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetWriteDeadline calls completely. + WriteTimeout time.Duration + + // Type of connection pool. + // true for FIFO pool, false for LIFO pool. + // Note that FIFO has slightly higher overhead compared to LIFO, + // but it helps closing idle connections faster reducing the pool size. + PoolFIFO bool + // Base number of socket connections. + // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. + // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, + // you can limit it through MaxActiveConns + PoolSize int + // Amount of time client waits for connection if all connections + // are busy before returning an error. + // Default is ReadTimeout + 1 second. + PoolTimeout time.Duration + // Minimum number of idle connections which is useful when establishing + // new connection is slow. + // Default is 0. the idle connections are not closed by default. + MinIdleConns int + // Maximum number of idle connections. + // Default is 0. the idle connections are not closed by default. + MaxIdleConns int + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActiveConns int + // ConnMaxIdleTime is the maximum amount of time a connection may be idle. + // Should be less than server's timeout. + // + // Expired connections may be closed lazily before reuse. + // If d <= 0, connections are not closed due to a connection's idle time. + // + // Default is 30 minutes. -1 disables idle timeout check. + ConnMaxIdleTime time.Duration + // ConnMaxLifetime is the maximum amount of time a connection may be reused. + // + // Expired connections may be closed lazily before reuse. + // If <= 0, connections are not closed due to a connection's age. + // + // Default is to not close idle connections. + ConnMaxLifetime time.Duration + + EnableTLS bool + + // Disable set-lib on connect. Default is false. + DisableIndentity bool + + // Add suffix to client name. Default is empty. + IdentitySuffix string +} + +type Connector struct { + *redis.Client + Config Config +} + +func NewConnector(config Config) *Connector { + if !config.Enabled { + return nil + } + return &Connector{ + Client: redis.NewClient(optionsFromConfig(config)), + Config: config, + } +} + +func optionsFromConfig(c Config) *redis.Options { + opts := &redis.Options{ + Network: c.Network, + Addr: c.Addr, + ClientName: c.ClientName, + Protocol: 3, + Username: c.Username, + Password: c.Password, + MaxRetries: c.MaxRetries, + MinRetryBackoff: c.MinRetryBackoff, + MaxRetryBackoff: c.MaxRetryBackoff, + DialTimeout: c.DialTimeout, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + ContextTimeoutEnabled: true, + PoolFIFO: c.PoolFIFO, + PoolTimeout: c.PoolTimeout, + MinIdleConns: c.MinIdleConns, + MaxIdleConns: c.MaxIdleConns, + MaxActiveConns: c.MaxActiveConns, + ConnMaxIdleTime: c.ConnMaxIdleTime, + ConnMaxLifetime: c.ConnMaxLifetime, + DisableIndentity: c.DisableIndentity, + IdentitySuffix: c.IdentitySuffix, + } + if c.EnableTLS { + opts.TLSConfig = new(tls.Config) + } + return opts +} diff --git a/internal/cache/connector/redis/get.lua b/internal/cache/connector/redis/get.lua new file mode 100644 index 0000000000..cfb3e89d8a --- /dev/null +++ b/internal/cache/connector/redis/get.lua @@ -0,0 +1,29 @@ +local result = redis.call("GET", KEYS[1]) +if result == false then + return nil +end +local object_id = tostring(result) + +local object = getCall("HGET", object_id, "object") +if object == nil then + -- object expired, but there are keys that need to be cleaned up + remove(object_id) + return nil +end + +-- max-age must be checked manually +local expiry = getCall("HGET", object_id, "expiry") +if not (expiry == nil) and expiry > 0 then + if getTime() > expiry then + remove(object_id) + return nil + end +end + +local usage_lifetime = getCall("HGET", object_id, "usage_lifetime") +-- reset usage based TTL +if not (usage_lifetime == nil) and tonumber(usage_lifetime) > 0 then + redis.call('EXPIRE', object_id, usage_lifetime) +end + +return object diff --git a/internal/cache/connector/redis/invalidate.lua b/internal/cache/connector/redis/invalidate.lua new file mode 100644 index 0000000000..e2a766ac72 --- /dev/null +++ b/internal/cache/connector/redis/invalidate.lua @@ -0,0 +1,9 @@ +local n = #KEYS +for i = 1, n do + local result = redis.call("GET", KEYS[i]) + if result == false then + return nil + end + local object_id = tostring(result) + remove(object_id) +end diff --git a/internal/cache/connector/redis/redis.go b/internal/cache/connector/redis/redis.go new file mode 100644 index 0000000000..fef15f6d55 --- /dev/null +++ b/internal/cache/connector/redis/redis.go @@ -0,0 +1,172 @@ +package redis + +import ( + "context" + _ "embed" + "encoding/json" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + + "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/telemetry/tracing" +) + +var ( + //go:embed _select.lua + selectComponent string + //go:embed _util.lua + utilComponent string + //go:embed _remove.lua + removeComponent string + //go:embed set.lua + setScript string + //go:embed get.lua + getScript string + //go:embed invalidate.lua + invalidateScript string + + // Don't mind the creative "import" + setParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, setScript}, "\n")) + getParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, removeComponent, getScript}, "\n")) + invalidateParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, removeComponent, invalidateScript}, "\n")) +) + +type redisCache[I, K comparable, V cache.Entry[I, K]] struct { + db int + config *cache.Config + indices []I + connector *Connector + logger *slog.Logger +} + +// NewCache returns a cache that stores and retrieves object using single Redis. +func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, client *Connector, db int, indices []I) cache.Cache[I, K, V] { + return &redisCache[I, K, V]{ + config: &config, + db: db, + indices: indices, + connector: client, + logger: config.Log.Slog(), + } +} + +func (c *redisCache[I, K, V]) Set(ctx context.Context, value V) { + if _, err := c.set(ctx, value); err != nil { + c.logger.ErrorContext(ctx, "redis cache set", "err", err) + } +} + +func (c *redisCache[I, K, V]) set(ctx context.Context, value V) (objectID string, err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + // Internal ID used for the object + objectID = uuid.NewString() + keys := []string{objectID} + // flatten the secondary keys + for _, index := range c.indices { + keys = append(keys, c.redisIndexKeys(index, value.Keys(index)...)...) + } + var buf strings.Builder + err = json.NewEncoder(&buf).Encode(value) + if err != nil { + return "", err + } + err = setParsed.Run(ctx, c.connector, keys, + c.db, // DB namespace + buf.String(), // object + int64(c.config.LastUseAge/time.Second), // usage_lifetime + int64(c.config.MaxAge/time.Second), // max_age, + ).Err() + // redis.Nil is always returned because the script doesn't have a return value. + if err != nil && !errors.Is(err, redis.Nil) { + return "", err + } + return objectID, nil +} + +func (c *redisCache[I, K, V]) Get(ctx context.Context, index I, key K) (value V, ok bool) { + var ( + obj any + err error + ) + ctx, span := tracing.NewSpan(ctx) + defer func() { + if errors.Is(err, redis.Nil) { + err = nil + } + span.EndWithError(err) + }() + + logger := c.logger.With("index", index, "key", key) + obj, err = getParsed.Run(ctx, c.connector, c.redisIndexKeys(index, key), c.db).Result() + if err != nil && !errors.Is(err, redis.Nil) { + logger.ErrorContext(ctx, "redis cache get", "err", err) + return value, false + } + data, ok := obj.(string) + if !ok { + logger.With("err", err).InfoContext(ctx, "redis cache miss") + return value, false + } + err = json.NewDecoder(strings.NewReader(data)).Decode(&value) + if err != nil { + logger.ErrorContext(ctx, "redis cache get", "err", fmt.Errorf("decode: %w", err)) + return value, false + } + return value, true +} + +func (c *redisCache[I, K, V]) Invalidate(ctx context.Context, index I, key ...K) (err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + if len(key) == 0 { + return nil + } + err = invalidateParsed.Run(ctx, c.connector, c.redisIndexKeys(index, key...), c.db).Err() + // redis.Nil is always returned because the script doesn't have a return value. + if err != nil && !errors.Is(err, redis.Nil) { + return err + } + return nil +} + +func (c *redisCache[I, K, V]) Delete(ctx context.Context, index I, key ...K) (err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + if len(key) == 0 { + return nil + } + pipe := c.connector.Pipeline() + pipe.Select(ctx, c.db) + pipe.Del(ctx, c.redisIndexKeys(index, key...)...) + _, err = pipe.Exec(ctx) + return err +} + +func (c *redisCache[I, K, V]) Truncate(ctx context.Context) (err error) { + ctx, span := tracing.NewSpan(ctx) + defer func() { span.EndWithError(err) }() + + pipe := c.connector.Pipeline() + pipe.Select(ctx, c.db) + pipe.FlushDB(ctx) + _, err = pipe.Exec(ctx) + return err +} + +func (c *redisCache[I, K, V]) redisIndexKeys(index I, keys ...K) []string { + out := make([]string, len(keys)) + for i, k := range keys { + out[i] = fmt.Sprintf("%v:%v", index, k) + } + return out +} diff --git a/internal/cache/connector/redis/redis_test.go b/internal/cache/connector/redis/redis_test.go new file mode 100644 index 0000000000..3f45be1502 --- /dev/null +++ b/internal/cache/connector/redis/redis_test.go @@ -0,0 +1,714 @@ +package redis + +import ( + "context" + "testing" + "time" + + "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/zitadel/logging" + + "github.com/zitadel/zitadel/internal/cache" +) + +type testIndex int + +const ( + testIndexID testIndex = iota + testIndexName +) + +const ( + testDB = 99 +) + +var testIndices = []testIndex{ + testIndexID, + testIndexName, +} + +type testObject struct { + ID string + Name []string +} + +func (o *testObject) Keys(index testIndex) []string { + switch index { + case testIndexID: + return []string{o.ID} + case testIndexName: + return o.Name + default: + return nil + } +} + +func Test_redisCache_set(t *testing.T) { + type args struct { + ctx context.Context + value *testObject + } + tests := []struct { + name string + config cache.Config + args args + assertions func(t *testing.T, s *miniredis.Miniredis, objectID string) + wantErr error + }{ + { + name: "ok", + config: cache.Config{}, + args: args{ + ctx: context.Background(), + value: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + }, + assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) { + s.CheckGet(t, "0:one", objectID) + s.CheckGet(t, "1:foo", objectID) + s.CheckGet(t, "1:bar", objectID) + assert.Empty(t, s.HGet(objectID, "expiry")) + assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object")) + }, + }, + { + name: "with last use TTL", + config: cache.Config{ + LastUseAge: time.Second, + }, + args: args{ + ctx: context.Background(), + value: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + }, + assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) { + s.CheckGet(t, "0:one", objectID) + s.CheckGet(t, "1:foo", objectID) + s.CheckGet(t, "1:bar", objectID) + assert.Empty(t, s.HGet(objectID, "expiry")) + assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object")) + assert.Positive(t, s.TTL(objectID)) + + s.FastForward(2 * time.Second) + v, err := s.Get(objectID) + require.Error(t, err) + assert.Empty(t, v) + }, + }, + { + name: "with last use TTL and max age", + config: cache.Config{ + MaxAge: time.Minute, + LastUseAge: time.Second, + }, + args: args{ + ctx: context.Background(), + value: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + }, + assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) { + s.CheckGet(t, "0:one", objectID) + s.CheckGet(t, "1:foo", objectID) + s.CheckGet(t, "1:bar", objectID) + assert.NotEmpty(t, s.HGet(objectID, "expiry")) + assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object")) + assert.Positive(t, s.TTL(objectID)) + + s.FastForward(2 * time.Second) + v, err := s.Get(objectID) + require.Error(t, err) + assert.Empty(t, v) + }, + }, + { + name: "with max age TTL", + config: cache.Config{ + MaxAge: time.Minute, + }, + args: args{ + ctx: context.Background(), + value: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + }, + assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) { + s.CheckGet(t, "0:one", objectID) + s.CheckGet(t, "1:foo", objectID) + s.CheckGet(t, "1:bar", objectID) + assert.Empty(t, s.HGet(objectID, "expiry")) + assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object")) + assert.Positive(t, s.TTL(objectID)) + + s.FastForward(2 * time.Minute) + v, err := s.Get(objectID) + require.Error(t, err) + assert.Empty(t, v) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, server := prepareCache(t, tt.config) + rc := c.(*redisCache[testIndex, string, *testObject]) + objectID, err := rc.set(tt.args.ctx, tt.args.value) + require.ErrorIs(t, err, tt.wantErr) + t.Log(rc.connector.HGetAll(context.Background(), objectID)) + tt.assertions(t, server, objectID) + }) + } +} + +func Test_redisCache_Get(t *testing.T) { + type args struct { + ctx context.Context + index testIndex + key string + } + tests := []struct { + name string + config cache.Config + preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) + args args + want *testObject + wantOK bool + }{ + { + name: "connection error", + config: cache.Config{}, + preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + s.RequireAuth("foobar") + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: "foo", + }, + wantOK: false, + }, + { + name: "get by ID", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: "one", + }, + want: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + wantOK: true, + }, + { + name: "get by name", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: "foo", + }, + want: &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }, + wantOK: true, + }, + { + name: "usage timeout", + config: cache.Config{ + LastUseAge: time.Minute, + }, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + _, ok := c.Get(context.Background(), testIndexID, "one") + require.True(t, ok) + s.FastForward(2 * time.Minute) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: "foo", + }, + want: nil, + wantOK: false, + }, + { + name: "max age timeout", + config: cache.Config{ + MaxAge: time.Minute, + }, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + _, ok := c.Get(context.Background(), testIndexID, "one") + require.True(t, ok) + s.FastForward(2 * time.Minute) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: "foo", + }, + want: nil, + wantOK: false, + }, + { + name: "not found", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: "spanac", + }, + wantOK: false, + }, + { + name: "json decode error", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + objectID, err := s.Get(c.(*redisCache[testIndex, string, *testObject]).redisIndexKeys(testIndexID, "one")[0]) + require.NoError(t, err) + s.HSet(objectID, "object", "~~~") + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: "one", + }, + wantOK: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, server := prepareCache(t, tt.config) + tt.preparation(t, c, server) + t.Log(server.Keys()) + + got, ok := c.Get(tt.args.ctx, tt.args.index, tt.args.key) + require.Equal(t, tt.wantOK, ok) + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_redisCache_Invalidate(t *testing.T) { + type args struct { + ctx context.Context + index testIndex + key []string + } + tests := []struct { + name string + config cache.Config + preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) + assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) + args args + wantErr bool + }{ + { + name: "connection error", + config: cache.Config{}, + preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + s.RequireAuth("foobar") + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"foo"}, + }, + wantErr: true, + }, + { + name: "no keys, noop", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: []string{}, + }, + wantErr: false, + }, + { + name: "invalidate by ID", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.False(t, ok) + assert.Nil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.False(t, ok) + assert.Nil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: []string{"one"}, + }, + wantErr: false, + }, + { + name: "invalidate by name", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.False(t, ok) + assert.Nil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.False(t, ok) + assert.Nil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"foo"}, + }, + wantErr: false, + }, + { + name: "invalidate after timeout", + config: cache.Config{ + LastUseAge: time.Minute, + }, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + _, ok := c.Get(context.Background(), testIndexID, "one") + require.True(t, ok) + s.FastForward(2 * time.Minute) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.False(t, ok) + assert.Nil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.False(t, ok) + assert.Nil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"foo"}, + }, + wantErr: false, + }, + { + name: "not found", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.True(t, ok) + assert.NotNil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.True(t, ok) + assert.NotNil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"spanac"}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, server := prepareCache(t, tt.config) + tt.preparation(t, c, server) + t.Log(server.Keys()) + + err := c.Invalidate(tt.args.ctx, tt.args.index, tt.args.key...) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + }) + } +} + +func Test_redisCache_Delete(t *testing.T) { + type args struct { + ctx context.Context + index testIndex + key []string + } + tests := []struct { + name string + config cache.Config + preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) + assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) + args args + wantErr bool + }{ + { + name: "connection error", + config: cache.Config{}, + preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + s.RequireAuth("foobar") + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"foo"}, + }, + wantErr: true, + }, + { + name: "no keys, noop", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: []string{}, + }, + wantErr: false, + }, + { + name: "delete ID", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.False(t, ok) + assert.Nil(t, obj) + // Get be name should still work + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.True(t, ok) + assert.NotNil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexID, + key: []string{"one"}, + }, + wantErr: false, + }, + { + name: "delete name", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + // get by ID should still work + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.True(t, ok) + assert.NotNil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.False(t, ok) + assert.Nil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"foo"}, + }, + wantErr: false, + }, + { + name: "not found", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.True(t, ok) + assert.NotNil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "foo") + assert.True(t, ok) + assert.NotNil(t, obj) + }, + args: args{ + ctx: context.Background(), + index: testIndexName, + key: []string{"spanac"}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, server := prepareCache(t, tt.config) + tt.preparation(t, c, server) + t.Log(server.Keys()) + + err := c.Delete(tt.args.ctx, tt.args.index, tt.args.key...) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + }) + } +} + +func Test_redisCache_Truncate(t *testing.T) { + type args struct { + ctx context.Context + } + tests := []struct { + name string + config cache.Config + preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) + assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) + args args + wantErr bool + }{ + { + name: "connection error", + config: cache.Config{}, + preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + s.RequireAuth("foobar") + }, + args: args{ + ctx: context.Background(), + }, + wantErr: true, + }, + { + name: "ok", + config: cache.Config{}, + preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) { + c.Set(context.Background(), &testObject{ + ID: "one", + Name: []string{"foo", "bar"}, + }) + c.Set(context.Background(), &testObject{ + ID: "two", + Name: []string{"Hello", "World"}, + }) + }, + assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) { + obj, ok := c.Get(context.Background(), testIndexID, "one") + assert.False(t, ok) + assert.Nil(t, obj) + obj, ok = c.Get(context.Background(), testIndexName, "World") + assert.False(t, ok) + assert.Nil(t, obj) + }, + args: args{ + ctx: context.Background(), + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c, server := prepareCache(t, tt.config) + tt.preparation(t, c, server) + t.Log(server.Keys()) + + err := c.Truncate(tt.args.ctx) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + }) + } +} + +func prepareCache(t *testing.T, conf cache.Config) (cache.Cache[testIndex, string, *testObject], *miniredis.Miniredis) { + conf.Log = &logging.Config{ + Level: "debug", + AddSource: true, + } + server := miniredis.RunT(t) + server.Select(testDB) + client := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: server.Addr(), + }) + t.Cleanup(func() { + client.Close() + server.Close() + }) + connector := NewConnector(Config{ + Enabled: true, + Network: "tcp", + Addr: server.Addr(), + }) + c := NewCache[testIndex, string, *testObject](conf, connector, testDB, testIndices) + return c, server +} diff --git a/internal/cache/connector/redis/set.lua b/internal/cache/connector/redis/set.lua new file mode 100644 index 0000000000..8c586bb47b --- /dev/null +++ b/internal/cache/connector/redis/set.lua @@ -0,0 +1,27 @@ +-- KEYS: [1]: object_id; [>1]: index keys. +local object_id = KEYS[1] +local object = ARGV[2] +local usage_lifetime = tonumber(ARGV[3]) -- usage based lifetime in seconds +local max_age = tonumber(ARGV[4]) -- max age liftime in seconds + +redis.call("HSET", object_id,"object", object) +if usage_lifetime > 0 then + redis.call("HSET", object_id, "usage_lifetime", usage_lifetime) + -- enable usage based TTL + redis.call("EXPIRE", object_id, usage_lifetime) + if max_age > 0 then + -- set max_age to hash map for expired remove on Get + local expiry = getTime() + max_age + redis.call("HSET", object_id, "expiry", expiry) + end +elseif max_age > 0 then + -- enable max_age based TTL + redis.call("EXPIRE", object_id, max_age) +end + +local n = #KEYS +local setKey = keySetKey(object_id) +for i = 2, n do -- offset to the second element to skip object_id + redis.call("SADD", setKey, KEYS[i]) -- set of all keys used for housekeeping + redis.call("SET", KEYS[i], object_id) -- key to object_id mapping +end diff --git a/internal/cache/connector_enumer.go b/internal/cache/connector_enumer.go new file mode 100644 index 0000000000..7ea014db16 --- /dev/null +++ b/internal/cache/connector_enumer.go @@ -0,0 +1,98 @@ +// Code generated by "enumer -type Connector -transform snake -trimprefix Connector -linecomment -text"; DO NOT EDIT. + +package cache + +import ( + "fmt" + "strings" +) + +const _ConnectorName = "memorypostgresredis" + +var _ConnectorIndex = [...]uint8{0, 0, 6, 14, 19} + +const _ConnectorLowerName = "memorypostgresredis" + +func (i Connector) String() string { + if i < 0 || i >= Connector(len(_ConnectorIndex)-1) { + return fmt.Sprintf("Connector(%d)", i) + } + return _ConnectorName[_ConnectorIndex[i]:_ConnectorIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ConnectorNoOp() { + var x [1]struct{} + _ = x[ConnectorUnspecified-(0)] + _ = x[ConnectorMemory-(1)] + _ = x[ConnectorPostgres-(2)] + _ = x[ConnectorRedis-(3)] +} + +var _ConnectorValues = []Connector{ConnectorUnspecified, ConnectorMemory, ConnectorPostgres, ConnectorRedis} + +var _ConnectorNameToValueMap = map[string]Connector{ + _ConnectorName[0:0]: ConnectorUnspecified, + _ConnectorLowerName[0:0]: ConnectorUnspecified, + _ConnectorName[0:6]: ConnectorMemory, + _ConnectorLowerName[0:6]: ConnectorMemory, + _ConnectorName[6:14]: ConnectorPostgres, + _ConnectorLowerName[6:14]: ConnectorPostgres, + _ConnectorName[14:19]: ConnectorRedis, + _ConnectorLowerName[14:19]: ConnectorRedis, +} + +var _ConnectorNames = []string{ + _ConnectorName[0:0], + _ConnectorName[0:6], + _ConnectorName[6:14], + _ConnectorName[14:19], +} + +// ConnectorString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ConnectorString(s string) (Connector, error) { + if val, ok := _ConnectorNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ConnectorNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Connector values", s) +} + +// ConnectorValues returns all values of the enum +func ConnectorValues() []Connector { + return _ConnectorValues +} + +// ConnectorStrings returns a slice of all String values of the enum +func ConnectorStrings() []string { + strs := make([]string, len(_ConnectorNames)) + copy(strs, _ConnectorNames) + return strs +} + +// IsAConnector returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Connector) IsAConnector() bool { + for _, v := range _ConnectorValues { + if i == v { + return true + } + } + return false +} + +// MarshalText implements the encoding.TextMarshaler interface for Connector +func (i Connector) MarshalText() ([]byte, error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for Connector +func (i *Connector) UnmarshalText(text []byte) error { + var err error + *i, err = ConnectorString(string(text)) + return err +} diff --git a/internal/cache/pruner.go b/internal/cache/pruner.go index d4b0b41266..959762d410 100644 --- a/internal/cache/pruner.go +++ b/internal/cache/pruner.go @@ -31,22 +31,22 @@ type AutoPruneConfig struct { Timeout time.Duration } -func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, name string) (close func()) { - return c.startAutoPrune(background, pruner, name, clockwork.NewRealClock()) +func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, purpose Purpose) (close func()) { + return c.startAutoPrune(background, pruner, purpose, clockwork.NewRealClock()) } -func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, name string, clock clockwork.Clock) (close func()) { +func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, purpose Purpose, clock clockwork.Clock) (close func()) { if c.Interval <= 0 { return func() {} } background, cancel := context.WithCancel(background) // randomize the first interval timer := clock.NewTimer(time.Duration(rand.Int63n(int64(c.Interval)))) - go c.pruneTimer(background, pruner, name, timer) + go c.pruneTimer(background, pruner, purpose, timer) return cancel } -func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, name string, timer clockwork.Timer) { +func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, purpose Purpose, timer clockwork.Timer) { defer func() { if !timer.Stop() { <-timer.Chan() @@ -58,9 +58,9 @@ func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, case <-background.Done(): return case <-timer.Chan(): - timer.Reset(c.Interval) err := c.doPrune(background, pruner) - logging.OnError(err).WithField("name", name).Error("cache auto prune") + logging.OnError(err).WithField("purpose", purpose).Error("cache auto prune") + timer.Reset(c.Interval) } } } diff --git a/internal/cache/pruner_test.go b/internal/cache/pruner_test.go index ababe81e59..faaedeb88c 100644 --- a/internal/cache/pruner_test.go +++ b/internal/cache/pruner_test.go @@ -30,7 +30,7 @@ func TestAutoPruneConfig_startAutoPrune(t *testing.T) { called: make(chan struct{}), } clock := clockwork.NewFakeClock() - close := c.startAutoPrune(ctx, &pruner, "foo", clock) + close := c.startAutoPrune(ctx, &pruner, PurposeAuthzInstance, clock) defer close() clock.Advance(time.Second) diff --git a/internal/cache/purpose_enumer.go b/internal/cache/purpose_enumer.go new file mode 100644 index 0000000000..bae47476ff --- /dev/null +++ b/internal/cache/purpose_enumer.go @@ -0,0 +1,82 @@ +// Code generated by "enumer -type Purpose -transform snake -trimprefix Purpose"; DO NOT EDIT. + +package cache + +import ( + "fmt" + "strings" +) + +const _PurposeName = "unspecifiedauthz_instancemilestones" + +var _PurposeIndex = [...]uint8{0, 11, 25, 35} + +const _PurposeLowerName = "unspecifiedauthz_instancemilestones" + +func (i Purpose) String() string { + if i < 0 || i >= Purpose(len(_PurposeIndex)-1) { + return fmt.Sprintf("Purpose(%d)", i) + } + return _PurposeName[_PurposeIndex[i]:_PurposeIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _PurposeNoOp() { + var x [1]struct{} + _ = x[PurposeUnspecified-(0)] + _ = x[PurposeAuthzInstance-(1)] + _ = x[PurposeMilestones-(2)] +} + +var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones} + +var _PurposeNameToValueMap = map[string]Purpose{ + _PurposeName[0:11]: PurposeUnspecified, + _PurposeLowerName[0:11]: PurposeUnspecified, + _PurposeName[11:25]: PurposeAuthzInstance, + _PurposeLowerName[11:25]: PurposeAuthzInstance, + _PurposeName[25:35]: PurposeMilestones, + _PurposeLowerName[25:35]: PurposeMilestones, +} + +var _PurposeNames = []string{ + _PurposeName[0:11], + _PurposeName[11:25], + _PurposeName[25:35], +} + +// PurposeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PurposeString(s string) (Purpose, error) { + if val, ok := _PurposeNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _PurposeNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Purpose values", s) +} + +// PurposeValues returns all values of the enum +func PurposeValues() []Purpose { + return _PurposeValues +} + +// PurposeStrings returns a slice of all String values of the enum +func PurposeStrings() []string { + strs := make([]string, len(_PurposeNames)) + copy(strs, _PurposeNames) + return strs +} + +// IsAPurpose returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Purpose) IsAPurpose() bool { + for _, v := range _PurposeValues { + if i == v { + return true + } + } + return false +} diff --git a/internal/command/cache.go b/internal/command/cache.go index bf976bd2d7..384577738e 100644 --- a/internal/command/cache.go +++ b/internal/command/cache.go @@ -2,81 +2,20 @@ package command import ( "context" - "fmt" - "strings" "github.com/zitadel/zitadel/internal/cache" - "github.com/zitadel/zitadel/internal/cache/gomap" - "github.com/zitadel/zitadel/internal/cache/noop" - "github.com/zitadel/zitadel/internal/cache/pg" - "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/cache/connector" ) type Caches struct { - connectors *cacheConnectors milestones cache.Cache[milestoneIndex, string, *MilestonesReached] } -func startCaches(background context.Context, conf *cache.CachesConfig, client *database.DB) (_ *Caches, err error) { - caches := &Caches{ - milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](), - } - if conf == nil { - return caches, nil - } - caches.connectors, err = startCacheConnectors(background, conf, client) - if err != nil { - return nil, err - } - caches.milestones, err = startCache[milestoneIndex, string, *MilestonesReached](background, []milestoneIndex{milestoneIndexInstanceID}, "milestones", conf.Instance, caches.connectors) +func startCaches(background context.Context, connectors connector.Connectors) (_ *Caches, err error) { + caches := new(Caches) + caches.milestones, err = connector.StartCache[milestoneIndex, string, *MilestonesReached](background, []milestoneIndex{milestoneIndexInstanceID}, cache.PurposeMilestones, connectors.Config.Milestones, connectors) if err != nil { return nil, err } return caches, nil } - -type cacheConnectors struct { - memory *cache.AutoPruneConfig - postgres *pgxPoolCacheConnector -} - -type pgxPoolCacheConnector struct { - *cache.AutoPruneConfig - client *database.DB -} - -func startCacheConnectors(_ context.Context, conf *cache.CachesConfig, client *database.DB) (_ *cacheConnectors, err error) { - connectors := new(cacheConnectors) - if conf.Connectors.Memory.Enabled { - connectors.memory = &conf.Connectors.Memory.AutoPrune - } - if conf.Connectors.Postgres.Enabled { - connectors.postgres = &pgxPoolCacheConnector{ - AutoPruneConfig: &conf.Connectors.Postgres.AutoPrune, - client: client, - } - } - return connectors, nil -} - -func startCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, name string, conf *cache.CacheConfig, connectors *cacheConnectors) (cache.Cache[I, K, V], error) { - if conf == nil || conf.Connector == "" { - return noop.NewCache[I, K, V](), nil - } - if strings.EqualFold(conf.Connector, "memory") && connectors.memory != nil { - c := gomap.NewCache[I, K, V](background, indices, *conf) - connectors.memory.StartAutoPrune(background, c, name) - return c, nil - } - if strings.EqualFold(conf.Connector, "postgres") && connectors.postgres != nil { - client := connectors.postgres.client - c, err := pg.NewCache[I, K, V](background, name, *conf, indices, client.Pool, client.Type()) - if err != nil { - return nil, fmt.Errorf("query start cache: %w", err) - } - connectors.postgres.StartAutoPrune(background, c, name) - return c, nil - } - - return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector) -} diff --git a/internal/command/command.go b/internal/command/command.go index 7c56f05b86..bc3f189a4a 100644 --- a/internal/command/command.go +++ b/internal/command/command.go @@ -18,7 +18,7 @@ import ( "github.com/zitadel/zitadel/internal/api/authz" api_http "github.com/zitadel/zitadel/internal/api/http" - "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/command/preparation" sd "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/crypto" @@ -98,8 +98,9 @@ type Commands struct { } func StartCommands( + ctx context.Context, es *eventstore.Eventstore, - cachesConfig *cache.CachesConfig, + cacheConnectors connector.Connectors, defaults sd.SystemDefaults, zitadelRoles []authz.RoleMapping, staticStore static.Storage, @@ -131,7 +132,7 @@ func StartCommands( if err != nil { return nil, fmt.Errorf("password hasher: %w", err) } - caches, err := startCaches(context.TODO(), cachesConfig, es.Client()) + caches, err := startCaches(ctx, cacheConnectors) if err != nil { return nil, fmt.Errorf("caches: %w", err) } diff --git a/internal/command/instance_test.go b/internal/command/instance_test.go index c60b2763b3..301077b268 100644 --- a/internal/command/instance_test.go +++ b/internal/command/instance_test.go @@ -13,7 +13,7 @@ import ( "golang.org/x/text/language" "github.com/zitadel/zitadel/internal/api/authz" - "github.com/zitadel/zitadel/internal/cache/noop" + "github.com/zitadel/zitadel/internal/cache/connector/noop" "github.com/zitadel/zitadel/internal/command/preparation" "github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/domain" diff --git a/internal/command/milestone_test.go b/internal/command/milestone_test.go index 819db9d098..3c4bffc704 100644 --- a/internal/command/milestone_test.go +++ b/internal/command/milestone_test.go @@ -10,8 +10,8 @@ import ( "github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/cache" - "github.com/zitadel/zitadel/internal/cache/gomap" - "github.com/zitadel/zitadel/internal/cache/noop" + "github.com/zitadel/zitadel/internal/cache/connector/gomap" + "github.com/zitadel/zitadel/internal/cache/connector/noop" "github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/repository/milestone" ) @@ -183,7 +183,7 @@ func TestCommands_GetMilestonesReached(t *testing.T) { cache := gomap.NewCache[milestoneIndex, string, *MilestonesReached]( context.Background(), []milestoneIndex{milestoneIndexInstanceID}, - cache.CacheConfig{Connector: "memory"}, + cache.Config{Connector: cache.ConnectorMemory}, ) cache.Set(context.Background(), cached) diff --git a/internal/integration/config/docker-compose.yaml b/internal/integration/config/docker-compose.yaml index 1749b9f0ab..19c68ae405 100644 --- a/internal/integration/config/docker-compose.yaml +++ b/internal/integration/config/docker-compose.yaml @@ -23,3 +23,9 @@ services: start_period: '20s' ports: - 5432:5432 + + cache: + restart: 'always' + image: 'redis:latest' + ports: + - 6379:6379 diff --git a/internal/integration/config/zitadel.yaml b/internal/integration/config/zitadel.yaml index b1482f6e1a..378dc2f09b 100644 --- a/internal/integration/config/zitadel.yaml +++ b/internal/integration/config/zitadel.yaml @@ -10,13 +10,21 @@ Caches: Connectors: Postgres: Enabled: true - AutoPrune: - Interval: 30s - TimeOut: 1s + Redis: + Enabled: true Instance: + Connector: "redis" + MaxAge: 1h + LastUsage: 10m + Log: + Level: info + AddSource: true + Formatter: + Format: text + Milestones: Connector: "postgres" MaxAge: 1h - LastUsage: 30m + LastUsage: 10m Log: Level: info AddSource: true diff --git a/internal/query/cache.go b/internal/query/cache.go index 2722377891..55f7bb3db6 100644 --- a/internal/query/cache.go +++ b/internal/query/cache.go @@ -2,90 +2,28 @@ package query import ( "context" - "fmt" - "strings" "github.com/zitadel/logging" "github.com/zitadel/zitadel/internal/cache" - "github.com/zitadel/zitadel/internal/cache/gomap" - "github.com/zitadel/zitadel/internal/cache/noop" - "github.com/zitadel/zitadel/internal/cache/pg" - "github.com/zitadel/zitadel/internal/database" + "github.com/zitadel/zitadel/internal/cache/connector" "github.com/zitadel/zitadel/internal/eventstore" ) type Caches struct { - connectors *cacheConnectors - instance cache.Cache[instanceIndex, string, *authzInstance] + instance cache.Cache[instanceIndex, string, *authzInstance] } -func startCaches(background context.Context, conf *cache.CachesConfig, client *database.DB) (_ *Caches, err error) { - caches := &Caches{ - instance: noop.NewCache[instanceIndex, string, *authzInstance](), - } - if conf == nil { - return caches, nil - } - caches.connectors, err = startCacheConnectors(background, conf, client) - if err != nil { - return nil, err - } - caches.instance, err = startCache[instanceIndex, string, *authzInstance](background, instanceIndexValues(), "authz_instance", conf.Instance, caches.connectors) +func startCaches(background context.Context, connectors connector.Connectors) (_ *Caches, err error) { + caches := new(Caches) + caches.instance, err = connector.StartCache[instanceIndex, string, *authzInstance](background, instanceIndexValues(), cache.PurposeAuthzInstance, connectors.Config.Instance, connectors) if err != nil { return nil, err } caches.registerInstanceInvalidation() - return caches, nil } -type cacheConnectors struct { - memory *cache.AutoPruneConfig - postgres *pgxPoolCacheConnector -} - -type pgxPoolCacheConnector struct { - *cache.AutoPruneConfig - client *database.DB -} - -func startCacheConnectors(_ context.Context, conf *cache.CachesConfig, client *database.DB) (_ *cacheConnectors, err error) { - connectors := new(cacheConnectors) - if conf.Connectors.Memory.Enabled { - connectors.memory = &conf.Connectors.Memory.AutoPrune - } - if conf.Connectors.Postgres.Enabled { - connectors.postgres = &pgxPoolCacheConnector{ - AutoPruneConfig: &conf.Connectors.Postgres.AutoPrune, - client: client, - } - } - return connectors, nil -} - -func startCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, name string, conf *cache.CacheConfig, connectors *cacheConnectors) (cache.Cache[I, K, V], error) { - if conf == nil || conf.Connector == "" { - return noop.NewCache[I, K, V](), nil - } - if strings.EqualFold(conf.Connector, "memory") && connectors.memory != nil { - c := gomap.NewCache[I, K, V](background, indices, *conf) - connectors.memory.StartAutoPrune(background, c, name) - return c, nil - } - if strings.EqualFold(conf.Connector, "postgres") && connectors.postgres != nil { - client := connectors.postgres.client - c, err := pg.NewCache[I, K, V](background, name, *conf, indices, client.Pool, client.Type()) - if err != nil { - return nil, fmt.Errorf("query start cache: %w", err) - } - connectors.postgres.StartAutoPrune(background, c, name) - return c, nil - } - - return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector) -} - type invalidator[I comparable] interface { Invalidate(ctx context.Context, index I, key ...string) error } diff --git a/internal/query/instance.go b/internal/query/instance.go index ef74f0ebdd..549c05a233 100644 --- a/internal/query/instance.go +++ b/internal/query/instance.go @@ -587,9 +587,10 @@ func (c *Caches) registerInstanceInvalidation() { projection.InstanceTrustedDomainProjection.RegisterCacheInvalidation(invalidate) projection.SecurityPolicyProjection.RegisterCacheInvalidation(invalidate) - // limits uses own aggregate ID, invalidate using resource owner. + // These projections have their own aggregate ID, invalidate using resource owner. invalidate = cacheInvalidationFunc(c.instance, instanceIndexByID, getResourceOwner) projection.LimitsProjection.RegisterCacheInvalidation(invalidate) + projection.RestrictionsProjection.RegisterCacheInvalidation(invalidate) // System feature update should invalidate all instances, so Truncate the cache. projection.SystemFeatureProjection.RegisterCacheInvalidation(func(ctx context.Context, _ []*eventstore.Aggregate) { diff --git a/internal/query/query.go b/internal/query/query.go index 590326d07b..b39dbe9ca1 100644 --- a/internal/query/query.go +++ b/internal/query/query.go @@ -11,7 +11,7 @@ import ( "golang.org/x/text/language" "github.com/zitadel/zitadel/internal/api/authz" - "github.com/zitadel/zitadel/internal/cache" + "github.com/zitadel/zitadel/internal/cache/connector" sd "github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/database" @@ -49,7 +49,7 @@ func StartQueries( es *eventstore.Eventstore, esV4 es_v4.Querier, querySqlClient, projectionSqlClient *database.DB, - caches *cache.CachesConfig, + cacheConnectors connector.Connectors, projections projection.Config, defaults sd.SystemDefaults, idpConfigEncryption, otpEncryption, keyEncryptionAlgorithm, certEncryptionAlgorithm crypto.EncryptionAlgorithm, @@ -89,7 +89,7 @@ func StartQueries( if startProjections { projection.Start(ctx) } - repo.caches, err = startCaches(ctx, caches, querySqlClient) + repo.caches, err = startCaches(ctx, cacheConnectors) if err != nil { return nil, err } From 4a354a568d60bda0443dc9e2e3a073d9d2a84a71 Mon Sep 17 00:00:00 2001 From: Silvan Date: Sun, 10 Nov 2024 16:11:33 +0100 Subject: [PATCH 28/30] docs: update currently available regions of zitadel.cloud (#7230) Removes the available regions section in legal docs. --------- Co-authored-by: mffap --- .../legal/service-description/cloud-service-description.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/docs/legal/service-description/cloud-service-description.md b/docs/docs/legal/service-description/cloud-service-description.md index 6fadafa85a..10c3e552a9 100644 --- a/docs/docs/legal/service-description/cloud-service-description.md +++ b/docs/docs/legal/service-description/cloud-service-description.md @@ -75,12 +75,6 @@ Data location refers to a region, consisting of one or many countries or territo We can not guarantee that during transit the data will only remain within this region. We take measures, as outlined in our [privacy policy](../policies/privacy-policy), to protect your data in transit and in rest. -The following regions will be available when using our cloud service. This list is for informational purposes and will be updated in due course, please refer to our website for all available regions at this time. - -- **Global**: All available cloud regions offered by our cloud provider -- **Switzerland**: Exclusively on Swiss region -- **GDPR safe countries**: Hosting location is within any of the EU member states and [Adequate Countries](https://ec.europa.eu/info/law/law-topic/data-protection/international-dimension-data-protection/adequacy-decisions_en) as recognized by the European Commission under the GDPR - ## Backup Our backup strategy executes daily full backups and differential backups on much higher frequency. From fb6579e4565aa0e74ed3f3f5ea5ee7d8c67d78a2 Mon Sep 17 00:00:00 2001 From: Livio Spring Date: Mon, 11 Nov 2024 12:28:27 +0100 Subject: [PATCH 29/30] fix(milestones): use previous spelling for milestone types (#8886) # Which Problems Are Solved https://github.com/zitadel/zitadel/pull/8788 accidentally changed the spelling of milestone types from PascalCase to snake_case. This breaks systems where `milestone.pushed` events already exist. # How the Problems Are Solved - Use PascalCase again - Prefix event types with v2. (Previous pushed event type was anyway ignored). - Create `milstones3` projection # Additional Changes None # Additional Context relates to #8788 --- cmd/setup/36.go | 16 +++---- cmd/setup/config.go | 2 +- cmd/setup/setup.go | 2 +- internal/query/milestone_test.go | 12 +++--- internal/query/projection/milestones.go | 5 +-- internal/query/projection/milestones_test.go | 20 ++++----- internal/repository/milestone/events.go | 4 +- internal/repository/milestone/type_enumer.go | 44 ++++++++++---------- 8 files changed, 51 insertions(+), 54 deletions(-) diff --git a/cmd/setup/36.go b/cmd/setup/36.go index 3ccab4992a..7536c584b7 100644 --- a/cmd/setup/36.go +++ b/cmd/setup/36.go @@ -23,7 +23,7 @@ var ( getProjectedMilestones string ) -type FillV2Milestones struct { +type FillV3Milestones struct { dbClient *database.DB eventstore *eventstore.Eventstore } @@ -34,7 +34,7 @@ type instanceMilestone struct { Pushed *time.Time } -func (mig *FillV2Milestones) Execute(ctx context.Context, _ eventstore.Event) error { +func (mig *FillV3Milestones) Execute(ctx context.Context, _ eventstore.Event) error { im, err := mig.getProjectedMilestones(ctx) if err != nil { return err @@ -42,7 +42,7 @@ func (mig *FillV2Milestones) Execute(ctx context.Context, _ eventstore.Event) er return mig.pushEventsByInstance(ctx, im) } -func (mig *FillV2Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) { +func (mig *FillV3Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) { type row struct { InstanceID string Type milestone.Type @@ -73,7 +73,7 @@ func (mig *FillV2Milestones) getProjectedMilestones(ctx context.Context) (map[st // pushEventsByInstance creates the v2 milestone events by instance. // This prevents we will try to push 6*N(instance) events in one push. -func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error { +func (mig *FillV3Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error { // keep a deterministic order by instance ID. order := make([]string, 0, len(milestoneMap)) for k := range milestoneMap { @@ -81,8 +81,8 @@ func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestone } slices.Sort(order) - for _, instanceID := range order { - logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("filter existing milestone events") + for i, instanceID := range order { + logging.WithFields("instance_id", instanceID, "migration", mig.String(), "progress", fmt.Sprintf("%d/%d", i+1, len(order))).Info("filter existing milestone events") // because each Push runs in a separate TX, we need to make sure that events // from a partially executed migration are pushed again. @@ -113,6 +113,6 @@ func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestone return nil } -func (mig *FillV2Milestones) String() string { - return "36_fill_v2_milestones" +func (mig *FillV3Milestones) String() string { + return "36_fill_v3_milestones" } diff --git a/cmd/setup/config.go b/cmd/setup/config.go index 57681c8bc1..f688a2a3a4 100644 --- a/cmd/setup/config.go +++ b/cmd/setup/config.go @@ -122,7 +122,7 @@ type Steps struct { s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid s34AddCacheSchema *AddCacheSchema s35AddPositionToIndexEsWm *AddPositionToIndexEsWm - s36FillV2Milestones *FillV2Milestones + s36FillV2Milestones *FillV3Milestones s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart } diff --git a/cmd/setup/setup.go b/cmd/setup/setup.go index e0784654b1..aeed1523b1 100644 --- a/cmd/setup/setup.go +++ b/cmd/setup/setup.go @@ -166,7 +166,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string) steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient} steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient} steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient} - steps.s36FillV2Milestones = &FillV2Milestones{dbClient: queryDBClient, eventstore: eventstoreClient} + steps.s36FillV2Milestones = &FillV3Milestones{dbClient: queryDBClient, eventstore: eventstoreClient} steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient} steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient} diff --git a/internal/query/milestone_test.go b/internal/query/milestone_test.go index c960724299..ee99474ec2 100644 --- a/internal/query/milestone_test.go +++ b/internal/query/milestone_test.go @@ -11,14 +11,14 @@ import ( var ( expectedMilestoneQuery = regexp.QuoteMeta(` - SELECT projections.milestones2.instance_id, + SELECT projections.milestones3.instance_id, projections.instance_domains.domain, - projections.milestones2.reached_date, - projections.milestones2.last_pushed_date, - projections.milestones2.type, + projections.milestones3.reached_date, + projections.milestones3.last_pushed_date, + projections.milestones3.type, COUNT(*) OVER () - FROM projections.milestones2 AS OF SYSTEM TIME '-1 ms' - LEFT JOIN projections.instance_domains ON projections.milestones2.instance_id = projections.instance_domains.instance_id + FROM projections.milestones3 AS OF SYSTEM TIME '-1 ms' + LEFT JOIN projections.instance_domains ON projections.milestones3.instance_id = projections.instance_domains.instance_id `) milestoneCols = []string{ diff --git a/internal/query/projection/milestones.go b/internal/query/projection/milestones.go index c264aa48fe..038fd8a1d1 100644 --- a/internal/query/projection/milestones.go +++ b/internal/query/projection/milestones.go @@ -10,7 +10,7 @@ import ( ) const ( - MilestonesProjectionTable = "projections.milestones2" + MilestonesProjectionTable = "projections.milestones3" MilestoneColumnInstanceID = "instance_id" MilestoneColumnType = "type" @@ -77,9 +77,6 @@ func (p *milestoneProjection) reducePushed(event eventstore.Event) (*handler.Sta if err != nil { return nil, err } - if e.Agg.Version != milestone.AggregateVersion { - return handler.NewNoOpStatement(event), nil // Skip v1 events. - } if e.MilestoneType != milestone.InstanceDeleted { return handler.NewUpdateStatement( event, diff --git a/internal/query/projection/milestones_test.go b/internal/query/projection/milestones_test.go index 4216e01636..7d874e2b47 100644 --- a/internal/query/projection/milestones_test.go +++ b/internal/query/projection/milestones_test.go @@ -31,7 +31,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(timedTestEvent( milestone.ReachedEventType, milestone.AggregateType, - []byte(`{"type": "instance_created"}`), + []byte(`{"type": "InstanceCreated"}`), now, withVersion(milestone.AggregateVersion), ), milestone.ReachedEventMapper), @@ -43,7 +43,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "INSERT INTO projections.milestones2 (instance_id, type, reached_date) VALUES ($1, $2, $3)", + expectedStmt: "INSERT INTO projections.milestones3 (instance_id, type, reached_date) VALUES ($1, $2, $3)", expectedArgs: []interface{}{ "instance-id", milestone.InstanceCreated, @@ -60,7 +60,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(timedTestEvent( milestone.ReachedEventType, milestone.AggregateType, - []byte(`{"type": "instance_created", "reachedDate":"2006-01-02T15:04:05Z"}`), + []byte(`{"type": "InstanceCreated", "reachedDate":"2006-01-02T15:04:05Z"}`), now, withVersion(milestone.AggregateVersion), ), milestone.ReachedEventMapper), @@ -72,7 +72,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "INSERT INTO projections.milestones2 (instance_id, type, reached_date) VALUES ($1, $2, $3)", + expectedStmt: "INSERT INTO projections.milestones3 (instance_id, type, reached_date) VALUES ($1, $2, $3)", expectedArgs: []interface{}{ "instance-id", milestone.InstanceCreated, @@ -89,7 +89,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(timedTestEvent( milestone.PushedEventType, milestone.AggregateType, - []byte(`{"type": "project_created"}`), + []byte(`{"type": "ProjectCreated"}`), now, withVersion(milestone.AggregateVersion), ), milestone.PushedEventMapper), @@ -101,7 +101,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "UPDATE projections.milestones2 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", + expectedStmt: "UPDATE projections.milestones3 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", expectedArgs: []interface{}{ now, "instance-id", @@ -118,7 +118,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(timedTestEvent( milestone.PushedEventType, milestone.AggregateType, - []byte(`{"type": "project_created", "pushedDate":"2006-01-02T15:04:05Z"}`), + []byte(`{"type": "ProjectCreated", "pushedDate":"2006-01-02T15:04:05Z"}`), now, withVersion(milestone.AggregateVersion), ), milestone.PushedEventMapper), @@ -130,7 +130,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "UPDATE projections.milestones2 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", + expectedStmt: "UPDATE projections.milestones3 SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)", expectedArgs: []interface{}{ date, "instance-id", @@ -147,7 +147,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { event: getEvent(testEvent( milestone.PushedEventType, milestone.AggregateType, - []byte(`{"type": "instance_deleted"}`), + []byte(`{"type": "InstanceDeleted"}`), withVersion(milestone.AggregateVersion), ), milestone.PushedEventMapper), }, @@ -158,7 +158,7 @@ func TestMilestonesProjection_reduces(t *testing.T) { executer: &testExecuter{ executions: []execution{ { - expectedStmt: "DELETE FROM projections.milestones2 WHERE (instance_id = $1)", + expectedStmt: "DELETE FROM projections.milestones3 WHERE (instance_id = $1)", expectedArgs: []interface{}{ "instance-id", }, diff --git a/internal/repository/milestone/events.go b/internal/repository/milestone/events.go index 86e372e48c..95dfa24265 100644 --- a/internal/repository/milestone/events.go +++ b/internal/repository/milestone/events.go @@ -7,7 +7,7 @@ import ( "github.com/zitadel/zitadel/internal/eventstore" ) -//go:generate enumer -type Type -json -linecomment -transform=snake +//go:generate enumer -type Type -json -linecomment type Type int const ( @@ -20,7 +20,7 @@ const ( ) const ( - eventTypePrefix = "milestone." + eventTypePrefix = "milestone.v2." ReachedEventType = eventTypePrefix + "reached" PushedEventType = eventTypePrefix + "pushed" ) diff --git a/internal/repository/milestone/type_enumer.go b/internal/repository/milestone/type_enumer.go index 3b32fc6218..c2763a9a78 100644 --- a/internal/repository/milestone/type_enumer.go +++ b/internal/repository/milestone/type_enumer.go @@ -1,4 +1,4 @@ -// Code generated by "enumer -type Type -json -linecomment -transform=snake"; DO NOT EDIT. +// Code generated by "enumer -type Type -json -linecomment"; DO NOT EDIT. package milestone @@ -8,11 +8,11 @@ import ( "strings" ) -const _TypeName = "instance_createdauthentication_succeeded_on_instanceproject_createdapplication_createdauthentication_succeeded_on_applicationinstance_deleted" +const _TypeName = "InstanceCreatedAuthenticationSucceededOnInstanceProjectCreatedApplicationCreatedAuthenticationSucceededOnApplicationInstanceDeleted" -var _TypeIndex = [...]uint8{0, 16, 52, 67, 86, 125, 141} +var _TypeIndex = [...]uint8{0, 15, 48, 62, 80, 116, 131} -const _TypeLowerName = "instance_createdauthentication_succeeded_on_instanceproject_createdapplication_createdauthentication_succeeded_on_applicationinstance_deleted" +const _TypeLowerName = "instancecreatedauthenticationsucceededoninstanceprojectcreatedapplicationcreatedauthenticationsucceededonapplicationinstancedeleted" func (i Type) String() string { i -= 1 @@ -37,27 +37,27 @@ func _TypeNoOp() { var _TypeValues = []Type{InstanceCreated, AuthenticationSucceededOnInstance, ProjectCreated, ApplicationCreated, AuthenticationSucceededOnApplication, InstanceDeleted} var _TypeNameToValueMap = map[string]Type{ - _TypeName[0:16]: InstanceCreated, - _TypeLowerName[0:16]: InstanceCreated, - _TypeName[16:52]: AuthenticationSucceededOnInstance, - _TypeLowerName[16:52]: AuthenticationSucceededOnInstance, - _TypeName[52:67]: ProjectCreated, - _TypeLowerName[52:67]: ProjectCreated, - _TypeName[67:86]: ApplicationCreated, - _TypeLowerName[67:86]: ApplicationCreated, - _TypeName[86:125]: AuthenticationSucceededOnApplication, - _TypeLowerName[86:125]: AuthenticationSucceededOnApplication, - _TypeName[125:141]: InstanceDeleted, - _TypeLowerName[125:141]: InstanceDeleted, + _TypeName[0:15]: InstanceCreated, + _TypeLowerName[0:15]: InstanceCreated, + _TypeName[15:48]: AuthenticationSucceededOnInstance, + _TypeLowerName[15:48]: AuthenticationSucceededOnInstance, + _TypeName[48:62]: ProjectCreated, + _TypeLowerName[48:62]: ProjectCreated, + _TypeName[62:80]: ApplicationCreated, + _TypeLowerName[62:80]: ApplicationCreated, + _TypeName[80:116]: AuthenticationSucceededOnApplication, + _TypeLowerName[80:116]: AuthenticationSucceededOnApplication, + _TypeName[116:131]: InstanceDeleted, + _TypeLowerName[116:131]: InstanceDeleted, } var _TypeNames = []string{ - _TypeName[0:16], - _TypeName[16:52], - _TypeName[52:67], - _TypeName[67:86], - _TypeName[86:125], - _TypeName[125:141], + _TypeName[0:15], + _TypeName[15:48], + _TypeName[48:62], + _TypeName[62:80], + _TypeName[80:116], + _TypeName[116:131], } // TypeString retrieves an enum value from the enum constants string name. From 04a166f2d2392da59baf93e22c2f68223e986e7b Mon Sep 17 00:00:00 2001 From: Jonathon Taylor <26156282+j0nathontayl0r@users.noreply.github.com> Date: Mon, 11 Nov 2024 22:03:15 +1000 Subject: [PATCH 30/30] fix(translations): typo in VerifyEmail body (#8872) # Which Problems Are Solved Fixes small typo in email body during user creation & verification. The change also includes the removal of some unnecessary white space in the same yaml file. # How the Problems Are Solved Replaces din't with didn't. ![image](https://github.com/user-attachments/assets/48abf38b-4deb-42b7-a85b-91009e19f27f) Co-authored-by: jtaylor@dingo.com Co-authored-by: Silvan --- cmd/defaults.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/defaults.yaml b/cmd/defaults.yaml index 8015dd8dad..18f814a6d2 100644 --- a/cmd/defaults.yaml +++ b/cmd/defaults.yaml @@ -387,7 +387,7 @@ Projections: org_domain_verified_fields: TransactionDuration: 0s BulkLimit: 2000 - + # The Notifications projection is used for sending emails and SMS to users Notifications: # As notification projections don't result in database statements, retries don't have an effect @@ -469,7 +469,7 @@ OIDC: AuthMethodPrivateKeyJWT: true # ZITADEL_OIDC_AUTHMETHODPRIVATEKEYJWT GrantTypeRefreshToken: true # ZITADEL_OIDC_GRANTTYPEREFRESHTOKEN RequestObjectSupported: true # ZITADEL_OIDC_REQUESTOBJECTSUPPORTED - + # Deprecated: The signing algorithm is determined by the generated keys. # Use the web keys resource to generate keys with different algorithms. SigningKeyAlgorithm: RS256 # ZITADEL_OIDC_SIGNINGKEYALGORITHM @@ -482,7 +482,7 @@ OIDC: DefaultRefreshTokenIdleExpiration: 720h # ZITADEL_OIDC_DEFAULTREFRESHTOKENIDLEEXPIRATION # 2160h are 90 days, three months DefaultRefreshTokenExpiration: 2160h # ZITADEL_OIDC_DEFAULTREFRESHTOKENEXPIRATION - + # HTTP Cache-Control max-age header value to set on the jwks endpoint. # Only used when the web keys feature is enabled. 0 sets a no-store value. JWKSCacheControlMaxAge: 5m # ZITADEL_OIDC_JWKSCACHECONTROLMAXAGE @@ -913,7 +913,7 @@ DefaultInstance: MaxOTPAttempts: 0 # ZITADEL_DEFAULTINSTANCE_LOCKOUTPOLICY_MAXOTPATTEMPTS ShouldShowLockoutFailure: true # ZITADEL_DEFAULTINSTANCE_LOCKOUTPOLICY_SHOULDSHOWLOCKOUTFAILURE EmailTemplate: 
<!doctype html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
  <title>

  </title>
  <!--[if !mso]><!-->
  <meta http-equiv="X-UA-Compatible" content="IE=edge">
  <!--<![endif]-->
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <style type="text/css">
    #outlook a { padding:0; }
    body { margin:0;padding:0;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%; }
    table, td { border-collapse:collapse;mso-table-lspace:0pt;mso-table-rspace:0pt; }
    img { border:0;height:auto;line-height:100%; outline:none;text-decoration:none;-ms-interpolation-mode:bicubic; }
    p { display:block;margin:13px 0; }
  </style>
  <!--[if mso]>
  <xml>
    <o:OfficeDocumentSettings>
      <o:AllowPNG/>
      <o:PixelsPerInch>96</o:PixelsPerInch>
    </o:OfficeDocumentSettings>
  </xml>
  <![endif]-->
  <!--[if lte mso 11]>
  <style type="text/css">
    .mj-outlook-group-fix { width:100% !important; }
  </style>
  <![endif]-->


  <style type="text/css">
    @media only screen and (min-width:480px) {
      .mj-column-per-100 { width:100% !important; max-width: 100%; }
      .mj-column-per-60 { width:60% !important; max-width: 60%; }
    }
  </style>


  <style type="text/css">



    @media only screen and (max-width:480px) {
      table.mj-full-width-mobile { width: 100% !important; }
      td.mj-full-width-mobile { width: auto !important; }
    }

  </style>
  <style type="text/css">.shadow a {
    box-shadow: 0px 3px 1px -2px rgba(0, 0, 0, 0.2), 0px 2px 2px 0px rgba(0, 0, 0, 0.14), 0px 1px 5px 0px rgba(0, 0, 0, 0.12);
  }</style>

  {{if .FontURL}}
  <style>
    @font-face {
      font-family: '{{.FontFaceFamily}}';
      font-style: normal;
      font-display: swap;
      src: url({{.FontURL}});
    }
  </style>
  {{end}}

</head>
<body style="word-spacing:normal;">


<div
        style=""
>

  <table
          align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="background:{{.BackgroundColor}};background-color:{{.BackgroundColor}};width:100%;border-radius:16px;"
  >
    <tbody>
    <tr>
      <td>


        <!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:800px;" width="800" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->


        <div  style="margin:0px auto;border-radius:16px;max-width:800px;">

          <table
                  align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="width:100%;border-radius:16px;"
          >
            <tbody>
            <tr>
              <td
                      style="direction:ltr;font-size:0px;padding:20px 0;padding-left:0;text-align:center;"
              >
                <!--[if mso | IE]><table role="presentation" border="0" cellpadding="0" cellspacing="0"><tr><td class="" width="800px" ><![endif]-->

                <table
                        align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="width:100%;"
                >
                  <tbody>
                  <tr>
                    <td>


                      <!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:800px;" width="800" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->


                      <div  style="margin:0px auto;max-width:800px;">

                        <table
                                align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="width:100%;"
                        >
                          <tbody>
                          <tr>
                            <td
                                    style="direction:ltr;font-size:0px;padding:0;text-align:center;"
                            >
                              <!--[if mso | IE]><table role="presentation" border="0" cellpadding="0" cellspacing="0"><tr><td class="" style="width:800px;" ><![endif]-->

                              <div
                                      class="mj-column-per-100 mj-outlook-group-fix" style="font-size:0;line-height:0;text-align:left;display:inline-block;width:100%;direction:ltr;"
                              >
                                <!--[if mso | IE]><table border="0" cellpadding="0" cellspacing="0" role="presentation" ><tr><td style="vertical-align:top;width:800px;" ><![endif]-->

                                <div
                                        class="mj-column-per-100 mj-outlook-group-fix" style="font-size:0px;text-align:left;direction:ltr;display:inline-block;vertical-align:top;width:100%;"
                                >

                                  <table
                                          border="0" cellpadding="0" cellspacing="0" role="presentation" width="100%"
                                  >
                                    <tbody>
                                    <tr>
                                      <td  style="vertical-align:top;padding:0;">
                                        {{if .LogoURL}}
                                        <table
                                                border="0" cellpadding="0" cellspacing="0" role="presentation" style="" width="100%"
                                        >
                                          <tbody>

                                          <tr>
                                            <td
                                                    align="center" style="font-size:0px;padding:50px 0 30px 0;word-break:break-word;"
                                            >

                                              <table
                                                      border="0" cellpadding="0" cellspacing="0" role="presentation" style="border-collapse:collapse;border-spacing:0px;"
                                              >
                                                <tbody>
                                                <tr>
                                                  <td  style="width:180px;">

                                                    <img
                                                            height="auto" src="{{.LogoURL}}" style="border:0;border-radius:8px;display:block;outline:none;text-decoration:none;height:auto;width:100%;font-size:13px;" width="180"
                                                    />

                                                  </td>
                                                </tr>
                                                </tbody>
                                              </table>

                                            </td>
                                          </tr>

                                          </tbody>
                                        </table>
                                        {{end}}
                                      </td>
                                    </tr>
                                    </tbody>
                                  </table>

                                </div>

                                <!--[if mso | IE]></td></tr></table><![endif]-->
                              </div>

                              <!--[if mso | IE]></td></tr></table><![endif]-->
                            </td>
                          </tr>
                          </tbody>
                        </table>

                      </div>


                      <!--[if mso | IE]></td></tr></table><![endif]-->


                    </td>
                  </tr>
                  </tbody>
                </table>

                <!--[if mso | IE]></td></tr><tr><td class="" width="800px" ><![endif]-->

                <table
                        align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="width:100%;"
                >
                  <tbody>
                  <tr>
                    <td>


                      <!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:800px;" width="800" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->


                      <div  style="margin:0px auto;max-width:800px;">

                        <table
                                align="center" border="0" cellpadding="0" cellspacing="0" role="presentation" style="width:100%;"
                        >
                          <tbody>
                          <tr>
                            <td
                                    style="direction:ltr;font-size:0px;padding:0;text-align:center;"
                            >
                              <!--[if mso | IE]><table role="presentation" border="0" cellpadding="0" cellspacing="0"><tr><td class="" style="vertical-align:top;width:480px;" ><![endif]-->

                              <div
                                      class="mj-column-per-60 mj-outlook-group-fix" style="font-size:0px;text-align:left;direction:ltr;display:inline-block;vertical-align:top;width:100%;"
                              >

                                <table
                                        border="0" cellpadding="0" cellspacing="0" role="presentation" width="100%"
                                >
                                  <tbody>
                                  <tr>
                                    <td  style="vertical-align:top;padding:0;">

                                      <table
                                              border="0" cellpadding="0" cellspacing="0" role="presentation" style="" width="100%"
                                      >
                                        <tbody>

                                        <tr>
                                          <td
                                                  align="center" style="font-size:0px;padding:10px 25px;word-break:break-word;"
                                          >

                                            <div
                                                    style="font-family:{{.FontFamily}};font-size:24px;font-weight:500;line-height:1;text-align:center;color:{{.FontColor}};"
                                            >{{.Greeting}}</div>

                                          </td>
                                        </tr>

                                        <tr>
                                          <td
                                                  align="center" style="font-size:0px;padding:10px 25px;word-break:break-word;"
                                          >

                                            <div
                                                    style="font-family:{{.FontFamily}};font-size:16px;font-weight:light;line-height:1.5;text-align:center;color:{{.FontColor}};"
                                            >{{.Text}}</div>

                                          </td>
                                        </tr>


                                        <tr>
                                          <td
                                                  align="center" vertical-align="middle" class="shadow" style="font-size:0px;padding:10px 25px;word-break:break-word;"
                                          >

                                            <table
                                                    border="0" cellpadding="0" cellspacing="0" role="presentation" style="border-collapse:separate;line-height:100%;"
                                            >
                                              <tr>
                                                <td
                                                        align="center" bgcolor="{{.PrimaryColor}}" role="presentation" style="border:none;border-radius:6px;cursor:auto;mso-padding-alt:10px 25px;background:{{.PrimaryColor}};" valign="middle"
                                                >
                                                  <a
                                                          href="{{.URL}}" rel="noopener noreferrer notrack" style="display:inline-block;background:{{.PrimaryColor}};color:#ffffff;font-family:{{.FontFamily}};font-size:14px;font-weight:500;line-height:120%;margin:0;text-decoration:none;text-transform:none;padding:10px 25px;mso-padding-alt:0px;border-radius:6px;" target="_blank"
                                                  >
                                                    {{.ButtonText}}
                                                  </a>
                                                </td>
                                              </tr>
                                            </table>

                                          </td>
                                        </tr>
                                        {{if .IncludeFooter}}
                                        <tr>
                                          <td
                                                  align="center" style="font-size:0px;padding:10px 25px;padding-top:20px;padding-right:20px;padding-bottom:20px;padding-left:20px;word-break:break-word;"
                                          >

                                            <p
                                                    style="border-top:solid 2px #dbdbdb;font-size:1px;margin:0px auto;width:100%;"
                                            >
                                            </p>

                                            <!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" style="border-top:solid 2px #dbdbdb;font-size:1px;margin:0px auto;width:440px;" role="presentation" width="440px" ><tr><td style="height:0;line-height:0;"> &nbsp;
                                      </td></tr></table><![endif]-->


                                          </td>
                                        </tr>

                                        <tr>
                                          <td
                                                  align="center" style="font-size:0px;padding:16px;word-break:break-word;"
                                          >

                                            <div
                                                    style="font-family:{{.FontFamily}};font-size:13px;line-height:1;text-align:center;color:{{.FontColor}};"
                                            >{{.FooterText}}</div>

                                          </td>
                                        </tr>
                                        {{end}}
                                        </tbody>
                                      </table>

                                    </td>
                                  </tr>
                                  </tbody>
                                </table>

                              </div>

                              <!--[if mso | IE]></td></tr></table><![endif]-->
                            </td>
                          </tr>
                          </tbody>
                        </table>

                      </div>


                      <!--[if mso | IE]></td></tr></table><![endif]-->


                    </td>
                  </tr>
                  </tbody>
                </table>

                <!--[if mso | IE]></td></tr></table><![endif]-->
              </td>
            </tr>
            </tbody>
          </table>

        </div>


        <!--[if mso | IE]></td></tr></table><![endif]-->


      </td>
    </tr>
    </tbody>
  </table>

</div>

</body>
</html>
 # ZITADEL_DEFAULTINSTANCE_EMAILTEMPLATE - + # WebKeys configures the OIDC token signing keys that are generated when a new instance is created. # WebKeys are still in alpha, so the config is disabled here. This will prevent generation of keys for now. # WebKeys: @@ -925,7 +925,7 @@ DefaultInstance: # Type: "ecdsa" # Config: # Curve: "P256" # ZITADEL_DEFAULTINSTANCE_WEBKEYS_CONFIG_CURVE - + # Sets the default values for lifetime and expiration for OIDC in each newly created instance # This default can be overwritten for each instance during runtime # Overwrites the system defaults @@ -1024,7 +1024,7 @@ DefaultInstance: PreHeader: Verify email Subject: Verify email Greeting: Hello {{.DisplayName}}, - Text: A new email has been added. Please use the button below to verify your email. (Code {{.Code}}) If you din't add a new email, please ignore this email. + Text: A new email has been added. Please use the button below to verify your email. (Code {{.Code}}) If you didn't add a new email, please ignore this email. ButtonText: Verify email - MessageTextType: VerifyPhone Language: en