2022-02-14 17:22:30 +01:00
|
|
|
package login
|
2020-06-05 07:50:04 +02:00
|
|
|
|
|
|
|
import (
|
2024-07-17 06:43:07 +02:00
|
|
|
"context"
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 16:01:17 +01:00
|
|
|
"fmt"
|
2020-06-05 07:50:04 +02:00
|
|
|
"net/http"
|
2024-07-22 14:46:27 +02:00
|
|
|
"slices"
|
2022-02-14 17:22:30 +01:00
|
|
|
|
2024-07-17 06:43:07 +02:00
|
|
|
"github.com/zitadel/logging"
|
|
|
|
|
|
|
|
http_mw "github.com/zitadel/zitadel/internal/api/http/middleware"
|
2022-04-27 01:01:45 +02:00
|
|
|
"github.com/zitadel/zitadel/internal/domain"
|
2024-07-17 06:43:07 +02:00
|
|
|
"github.com/zitadel/zitadel/internal/zerrors"
|
2020-06-05 07:50:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
queryCode = "code"
|
|
|
|
queryUserID = "userID"
|
|
|
|
|
|
|
|
tmplMailVerification = "mail_verification"
|
|
|
|
tmplMailVerified = "mail_verified"
|
|
|
|
)
|
|
|
|
|
|
|
|
type mailVerificationFormData struct {
|
2024-07-17 06:43:07 +02:00
|
|
|
Code string `schema:"code"`
|
|
|
|
UserID string `schema:"userID"`
|
|
|
|
Resend bool `schema:"resend"`
|
|
|
|
PasswordInit bool `schema:"passwordInit"`
|
|
|
|
Password string `schema:"password"`
|
|
|
|
PasswordConfirm string `schema:"passwordconfirm"`
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type mailVerificationData struct {
|
|
|
|
baseData
|
2020-07-20 10:00:29 +02:00
|
|
|
profileData
|
2024-07-17 06:43:07 +02:00
|
|
|
UserID string
|
|
|
|
Code string
|
|
|
|
PasswordInit bool
|
|
|
|
MinLength uint64
|
|
|
|
HasUppercase string
|
|
|
|
HasLowercase string
|
|
|
|
HasNumber string
|
|
|
|
HasSymbol string
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 16:01:17 +01:00
|
|
|
func MailVerificationLinkTemplate(origin, userID, orgID, authRequestID string) string {
|
|
|
|
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s",
|
|
|
|
externalLink(origin), EndpointMailVerification,
|
|
|
|
queryUserID, userID,
|
|
|
|
queryCode, "{{.Code}}",
|
|
|
|
queryOrgID, orgID,
|
|
|
|
QueryAuthRequestID, authRequestID)
|
2022-04-25 11:16:36 +02:00
|
|
|
}
|
|
|
|
|
2020-06-05 07:50:04 +02:00
|
|
|
func (l *Login) handleMailVerification(w http.ResponseWriter, r *http.Request) {
|
2024-04-24 17:50:58 +02:00
|
|
|
authReq := l.checkOptionalAuthRequestOfEmailLinks(r)
|
2020-06-05 07:50:04 +02:00
|
|
|
userID := r.FormValue(queryUserID)
|
|
|
|
code := r.FormValue(queryCode)
|
2024-07-17 06:43:07 +02:00
|
|
|
if userID == "" && authReq == nil {
|
|
|
|
l.renderError(w, r, authReq, nil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if userID == "" {
|
|
|
|
userID = authReq.UserID
|
|
|
|
}
|
|
|
|
passwordInit := l.checkUserNoFirstFactor(r.Context(), userID)
|
|
|
|
if code != "" && !passwordInit {
|
|
|
|
l.checkMailCode(w, r, authReq, userID, code, "")
|
2020-06-05 07:50:04 +02:00
|
|
|
return
|
|
|
|
}
|
2024-07-17 06:43:07 +02:00
|
|
|
l.renderMailVerification(w, r, authReq, userID, code, passwordInit, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) checkUserNoFirstFactor(ctx context.Context, userID string) bool {
|
2024-10-10 18:50:53 +02:00
|
|
|
authMethods, err := l.query.ListUserAuthMethodTypes(setUserContext(ctx, userID, ""), userID, false, false, "")
|
2024-07-17 06:43:07 +02:00
|
|
|
if err != nil {
|
|
|
|
logging.WithFields("userID", userID).OnError(err).Warn("unable to load user's auth methods for mail verification")
|
|
|
|
return false
|
|
|
|
}
|
2024-07-22 14:46:27 +02:00
|
|
|
return !slices.ContainsFunc(authMethods.AuthMethodTypes, func(m domain.UserAuthMethodType) bool {
|
|
|
|
return m == domain.UserAuthMethodTypeIDP ||
|
|
|
|
m == domain.UserAuthMethodTypePassword ||
|
|
|
|
m == domain.UserAuthMethodTypePasswordless
|
|
|
|
})
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) handleMailVerificationCheck(w http.ResponseWriter, r *http.Request) {
|
|
|
|
data := new(mailVerificationFormData)
|
|
|
|
authReq, err := l.getAuthRequestAndParseData(r, data)
|
|
|
|
if err != nil {
|
|
|
|
l.renderError(w, r, authReq, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !data.Resend {
|
2024-07-17 06:43:07 +02:00
|
|
|
if data.PasswordInit && data.Password != data.PasswordConfirm {
|
|
|
|
err := zerrors.ThrowInvalidArgument(nil, "VIEW-fsdfd", "Errors.User.Password.ConfirmationWrong")
|
|
|
|
l.renderMailVerification(w, r, authReq, data.UserID, data.Code, data.PasswordInit, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
l.checkMailCode(w, r, authReq, data.UserID, data.Code, data.Password)
|
2020-06-05 07:50:04 +02:00
|
|
|
return
|
|
|
|
}
|
2024-06-07 09:30:04 +02:00
|
|
|
var userOrg, authReqID string
|
2020-06-05 07:50:04 +02:00
|
|
|
if authReq != nil {
|
|
|
|
userOrg = authReq.UserOrgID
|
2024-06-07 09:30:04 +02:00
|
|
|
authReqID = authReq.ID
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
2022-03-14 07:55:09 +01:00
|
|
|
emailCodeGenerator, err := l.query.InitEncryptionGenerator(r.Context(), domain.SecretGeneratorTypeVerifyEmailCode, l.userCodeAlg)
|
2022-02-16 16:49:17 +01:00
|
|
|
if err != nil {
|
2024-07-17 06:43:07 +02:00
|
|
|
l.renderMailVerification(w, r, authReq, data.UserID, "", data.PasswordInit, err)
|
2022-02-16 16:49:17 +01:00
|
|
|
return
|
|
|
|
}
|
2024-06-07 09:30:04 +02:00
|
|
|
_, err = l.command.CreateHumanEmailVerificationCode(setContext(r.Context(), userOrg), data.UserID, userOrg, emailCodeGenerator, authReqID)
|
2024-07-17 06:43:07 +02:00
|
|
|
l.renderMailVerification(w, r, authReq, data.UserID, "", data.PasswordInit, err)
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-07-17 06:43:07 +02:00
|
|
|
func (l *Login) checkMailCode(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, userID, code, password string) {
|
2021-02-08 11:30:30 +01:00
|
|
|
userOrg := ""
|
2020-06-05 07:50:04 +02:00
|
|
|
if authReq != nil {
|
|
|
|
userID = authReq.UserID
|
|
|
|
userOrg = authReq.UserOrgID
|
|
|
|
}
|
2022-03-14 07:55:09 +01:00
|
|
|
emailCodeGenerator, err := l.query.InitEncryptionGenerator(r.Context(), domain.SecretGeneratorTypeVerifyEmailCode, l.userCodeAlg)
|
2022-02-16 16:49:17 +01:00
|
|
|
if err != nil {
|
2024-07-17 06:43:07 +02:00
|
|
|
l.renderMailVerification(w, r, authReq, userID, "", password != "", err)
|
2022-02-16 16:49:17 +01:00
|
|
|
return
|
|
|
|
}
|
2024-07-17 06:43:07 +02:00
|
|
|
userAgentID, _ := http_mw.UserAgentIDFromCtx(r.Context())
|
|
|
|
_, err = l.command.VerifyHumanEmail(setContext(r.Context(), userOrg), userID, code, userOrg, password, userAgentID, emailCodeGenerator)
|
2020-06-05 07:50:04 +02:00
|
|
|
if err != nil {
|
2024-07-17 06:43:07 +02:00
|
|
|
l.renderMailVerification(w, r, authReq, userID, "", password != "", err)
|
2020-06-05 07:50:04 +02:00
|
|
|
return
|
|
|
|
}
|
2022-09-20 09:22:47 +02:00
|
|
|
l.renderMailVerified(w, r, authReq, userOrg)
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-07-17 06:43:07 +02:00
|
|
|
func (l *Login) renderMailVerification(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, userID, code string, passwordInit bool, err error) {
|
2021-06-18 10:31:53 +02:00
|
|
|
var errID, errMessage string
|
2020-06-05 07:50:04 +02:00
|
|
|
if err != nil {
|
2021-06-18 10:31:53 +02:00
|
|
|
errID, errMessage = l.getErrorMessage(r, err)
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
2024-07-17 06:43:07 +02:00
|
|
|
if userID == "" && authReq != nil {
|
2020-06-05 07:50:04 +02:00
|
|
|
userID = authReq.UserID
|
|
|
|
}
|
2022-11-07 09:55:12 +01:00
|
|
|
|
|
|
|
translator := l.getTranslator(r.Context(), authReq)
|
2020-06-05 07:50:04 +02:00
|
|
|
data := mailVerificationData{
|
2024-07-17 06:43:07 +02:00
|
|
|
baseData: l.getBaseData(r, authReq, translator, "EmailVerification.Title", "EmailVerification.Description", errID, errMessage),
|
|
|
|
UserID: userID,
|
|
|
|
profileData: l.getProfileData(authReq),
|
|
|
|
Code: code,
|
|
|
|
PasswordInit: passwordInit,
|
|
|
|
}
|
|
|
|
if passwordInit {
|
|
|
|
policy := l.getPasswordComplexityPolicyByUserID(r, userID)
|
|
|
|
if policy != nil {
|
|
|
|
data.MinLength = policy.MinLength
|
|
|
|
if policy.HasUppercase {
|
|
|
|
data.HasUppercase = UpperCaseRegex
|
|
|
|
}
|
|
|
|
if policy.HasLowercase {
|
|
|
|
data.HasLowercase = LowerCaseRegex
|
|
|
|
}
|
|
|
|
if policy.HasSymbol {
|
|
|
|
data.HasSymbol = SymbolRegex
|
|
|
|
}
|
|
|
|
if policy.HasNumber {
|
|
|
|
data.HasNumber = NumberRegex
|
|
|
|
}
|
|
|
|
}
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
2022-08-26 10:53:11 +02:00
|
|
|
if authReq == nil {
|
2023-11-21 14:11:38 +02:00
|
|
|
user, err := l.query.GetUserByID(r.Context(), false, userID)
|
2022-08-26 10:53:11 +02:00
|
|
|
if err == nil {
|
|
|
|
l.customTexts(r.Context(), translator, user.ResourceOwner)
|
|
|
|
}
|
|
|
|
}
|
2021-07-05 15:10:49 +02:00
|
|
|
l.renderer.RenderTemplate(w, r, translator, l.renderer.Templates[tmplMailVerification], data, nil)
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
|
|
|
|
2022-09-20 09:22:47 +02:00
|
|
|
func (l *Login) renderMailVerified(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, orgID string) {
|
2022-11-07 09:55:12 +01:00
|
|
|
translator := l.getTranslator(r.Context(), authReq)
|
2020-06-05 07:50:04 +02:00
|
|
|
data := mailVerificationData{
|
2023-12-05 12:12:01 +01:00
|
|
|
baseData: l.getBaseData(r, authReq, translator, "EmailVerificationDone.Title", "EmailVerificationDone.Description", "", ""),
|
2020-07-20 10:00:29 +02:00
|
|
|
profileData: l.getProfileData(authReq),
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|
2022-09-20 09:22:47 +02:00
|
|
|
if authReq == nil {
|
|
|
|
l.customTexts(r.Context(), translator, orgID)
|
|
|
|
}
|
2021-07-05 15:10:49 +02:00
|
|
|
l.renderer.RenderTemplate(w, r, translator, l.renderer.Templates[tmplMailVerified], data, nil)
|
2020-06-05 07:50:04 +02:00
|
|
|
}
|