2024-09-11 12:53:55 +02:00
|
|
|
package login
|
|
|
|
|
|
|
|
import (
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 16:01:17 +01:00
|
|
|
"fmt"
|
2024-09-11 12:53:55 +02:00
|
|
|
"net/http"
|
|
|
|
|
|
|
|
http_mw "github.com/zitadel/zitadel/internal/api/http/middleware"
|
|
|
|
"github.com/zitadel/zitadel/internal/domain"
|
|
|
|
"github.com/zitadel/zitadel/internal/zerrors"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
queryInviteUserCode = "code"
|
|
|
|
queryInviteUserUserID = "userID"
|
|
|
|
queryInviteUserLoginName = "loginname"
|
|
|
|
|
|
|
|
tmplInviteUser = "inviteuser"
|
|
|
|
)
|
|
|
|
|
|
|
|
type inviteUserFormData struct {
|
|
|
|
Code string `schema:"code"`
|
|
|
|
LoginName string `schema:"loginname"`
|
|
|
|
Password string `schema:"password"`
|
|
|
|
PasswordConfirm string `schema:"passwordconfirm"`
|
|
|
|
UserID string `schema:"userID"`
|
|
|
|
OrgID string `schema:"orgID"`
|
|
|
|
Resend bool `schema:"resend"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type inviteUserData struct {
|
|
|
|
baseData
|
|
|
|
profileData
|
|
|
|
Code string
|
|
|
|
LoginName string
|
|
|
|
UserID string
|
|
|
|
MinLength uint64
|
|
|
|
HasUppercase string
|
|
|
|
HasLowercase string
|
|
|
|
HasNumber string
|
|
|
|
HasSymbol string
|
|
|
|
}
|
|
|
|
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 16:01:17 +01:00
|
|
|
func InviteUserLinkTemplate(origin, userID, orgID string, authRequestID string) string {
|
|
|
|
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s&%s=%s",
|
|
|
|
externalLink(origin), EndpointInviteUser,
|
|
|
|
queryInviteUserUserID, userID,
|
|
|
|
queryInviteUserLoginName, "{{.LoginName}}",
|
|
|
|
queryInviteUserCode, "{{.Code}}",
|
|
|
|
queryOrgID, orgID,
|
|
|
|
QueryAuthRequestID, authRequestID)
|
2024-09-11 12:53:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) handleInviteUser(w http.ResponseWriter, r *http.Request) {
|
|
|
|
authReq := l.checkOptionalAuthRequestOfEmailLinks(r)
|
|
|
|
userID := r.FormValue(queryInviteUserUserID)
|
|
|
|
orgID := r.FormValue(queryOrgID)
|
|
|
|
code := r.FormValue(queryInviteUserCode)
|
|
|
|
loginName := r.FormValue(queryInviteUserLoginName)
|
|
|
|
l.renderInviteUser(w, r, authReq, userID, orgID, loginName, code, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) handleInviteUserCheck(w http.ResponseWriter, r *http.Request) {
|
|
|
|
data := new(inviteUserFormData)
|
|
|
|
authReq, err := l.getAuthRequestAndParseData(r, data)
|
|
|
|
if err != nil {
|
|
|
|
l.renderError(w, r, nil, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if data.Resend {
|
|
|
|
l.resendUserInvite(w, r, authReq, data.UserID, data.OrgID, data.LoginName)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
l.checkUserInviteCode(w, r, authReq, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) checkUserInviteCode(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, data *inviteUserFormData) {
|
|
|
|
if data.Password != data.PasswordConfirm {
|
|
|
|
err := zerrors.ThrowInvalidArgument(nil, "VIEW-KJS3h", "Errors.User.Password.ConfirmationWrong")
|
|
|
|
l.renderInviteUser(w, r, authReq, data.UserID, data.OrgID, data.LoginName, data.Code, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
userOrgID := ""
|
|
|
|
if authReq != nil {
|
|
|
|
userOrgID = authReq.UserOrgID
|
|
|
|
}
|
|
|
|
userAgentID, _ := http_mw.UserAgentIDFromCtx(r.Context())
|
|
|
|
_, err := l.command.VerifyInviteCodeSetPassword(setUserContext(r.Context(), data.UserID, userOrgID), data.UserID, data.Code, data.Password, userAgentID)
|
|
|
|
if err != nil {
|
|
|
|
l.renderInviteUser(w, r, authReq, data.UserID, data.OrgID, data.LoginName, "", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if authReq == nil {
|
|
|
|
l.defaultRedirect(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
l.renderNextStep(w, r, authReq)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) resendUserInvite(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, userID, orgID, loginName string) {
|
|
|
|
var userOrgID, authRequestID string
|
|
|
|
if authReq != nil {
|
|
|
|
userOrgID = authReq.UserOrgID
|
|
|
|
authRequestID = authReq.ID
|
|
|
|
}
|
|
|
|
_, err := l.command.ResendInviteCode(setUserContext(r.Context(), userID, userOrgID), userID, userOrgID, authRequestID)
|
|
|
|
l.renderInviteUser(w, r, authReq, userID, orgID, loginName, "", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Login) renderInviteUser(w http.ResponseWriter, r *http.Request, authReq *domain.AuthRequest, userID, orgID, loginName string, code string, err error) {
|
|
|
|
var errID, errMessage string
|
|
|
|
if err != nil {
|
|
|
|
errID, errMessage = l.getErrorMessage(r, err)
|
|
|
|
}
|
|
|
|
if authReq != nil {
|
|
|
|
userID = authReq.UserID
|
|
|
|
orgID = authReq.UserOrgID
|
|
|
|
}
|
|
|
|
|
|
|
|
translator := l.getTranslator(r.Context(), authReq)
|
|
|
|
data := inviteUserData{
|
|
|
|
baseData: l.getBaseData(r, authReq, translator, "InviteUser.Title", "InviteUser.Description", errID, errMessage),
|
|
|
|
profileData: l.getProfileData(authReq),
|
|
|
|
UserID: userID,
|
|
|
|
Code: code,
|
|
|
|
}
|
|
|
|
// if the user clicked on the link in the mail, we need to make sure the loginName is rendered
|
|
|
|
if authReq == nil {
|
|
|
|
data.LoginName = loginName
|
|
|
|
data.UserName = loginName
|
|
|
|
}
|
|
|
|
policy := l.getPasswordComplexityPolicyByUserID(r, userID)
|
|
|
|
if policy != nil {
|
|
|
|
data.MinLength = policy.MinLength
|
|
|
|
if policy.HasUppercase {
|
|
|
|
data.HasUppercase = UpperCaseRegex
|
|
|
|
}
|
|
|
|
if policy.HasLowercase {
|
|
|
|
data.HasLowercase = LowerCaseRegex
|
|
|
|
}
|
|
|
|
if policy.HasSymbol {
|
|
|
|
data.HasSymbol = SymbolRegex
|
|
|
|
}
|
|
|
|
if policy.HasNumber {
|
|
|
|
data.HasNumber = NumberRegex
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if authReq == nil {
|
|
|
|
if err == nil {
|
|
|
|
l.customTexts(r.Context(), translator, orgID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.renderer.RenderTemplate(w, r, translator, l.renderer.Templates[tmplInviteUser], data, nil)
|
|
|
|
}
|