mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-16 12:58:00 +00:00
8537805ea5
# Which Problems Are Solved The current handling of notification follows the same pattern as all other projections: Created events are handled sequentially (based on "position") by a handler. During the process, a lot of information is aggregated (user, texts, templates, ...). This leads to back pressure on the projection since the handling of events might take longer than the time before a new event (to be handled) is created. # How the Problems Are Solved - The current user notification handler creates separate notification events based on the user / session events. - These events contain all the present and required information including the userID. - These notification events get processed by notification workers, which gather the necessary information (recipient address, texts, templates) to send out these notifications. - If a notification fails, a retry event is created based on the current notification request including the current state of the user (this prevents race conditions, where a user is changed in the meantime and the notification already gets the new state). - The retry event will be handled after a backoff delay. This delay increases with every attempt. - If the configured amount of attempts is reached or the message expired (based on config), a cancel event is created, letting the workers know, the notification must no longer be handled. - In case of successful send, a sent event is created for the notification aggregate and the existing "sent" events for the user / session object is stored. - The following is added to the defaults.yaml to allow configuration of the notification workers: ```yaml Notifications: # The amount of workers processing the notification request events. # If set to 0, no notification request events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS # The amount of events a single worker will process in a run. BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT # Time interval between scheduled notifications for request events RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY # The amount of workers processing the notification retry events. # If set to 0, no notification retry events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS # Time interval between scheduled notifications for retry events RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY # Only instances are projected, for which at least a projection-relevant event exists within the timeframe # from HandleActiveInstances duration in the past until the projection's current time # If set to 0 (default), every instance is always considered active HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES # The maximum duration a transaction remains open # before it spots left folding additional events # and updates the table. TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION # Automatically cancel the notification after the amount of failed attempts MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS # Automatically cancel the notification if it cannot be handled within a specific time MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL # Failed attempts are retried after a confogired delay (with exponential backoff). # Set a minimum and maximum delay and a factor for the backoff MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY # Any factor below 1 will be set to 1 RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR ``` # Additional Changes None # Additional Context - closes #8931
163 lines
5.0 KiB
Go
163 lines
5.0 KiB
Go
package command
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"time"
|
|
|
|
"github.com/zitadel/zitadel/internal/crypto"
|
|
"github.com/zitadel/zitadel/internal/domain"
|
|
"github.com/zitadel/zitadel/internal/eventstore"
|
|
"github.com/zitadel/zitadel/internal/query"
|
|
"github.com/zitadel/zitadel/internal/repository/notification"
|
|
)
|
|
|
|
type NotificationRequest struct {
|
|
UserID string
|
|
UserResourceOwner string
|
|
TriggerOrigin string
|
|
URLTemplate string
|
|
Code *crypto.CryptoValue
|
|
CodeExpiry time.Duration
|
|
EventType eventstore.EventType
|
|
NotificationType domain.NotificationType
|
|
MessageType string
|
|
UnverifiedNotificationChannel bool
|
|
Args *domain.NotificationArguments
|
|
AggregateID string
|
|
AggregateResourceOwner string
|
|
IsOTP bool
|
|
RequiresPreviousDomain bool
|
|
}
|
|
|
|
type NotificationRetryRequest struct {
|
|
NotificationRequest
|
|
BackOff time.Duration
|
|
NotifyUser *query.NotifyUser
|
|
}
|
|
|
|
func NewNotificationRequest(
|
|
userID, resourceOwner, triggerOrigin string,
|
|
eventType eventstore.EventType,
|
|
notificationType domain.NotificationType,
|
|
messageType string,
|
|
) *NotificationRequest {
|
|
return &NotificationRequest{
|
|
UserID: userID,
|
|
UserResourceOwner: resourceOwner,
|
|
TriggerOrigin: triggerOrigin,
|
|
EventType: eventType,
|
|
NotificationType: notificationType,
|
|
MessageType: messageType,
|
|
}
|
|
}
|
|
|
|
func (r *NotificationRequest) WithCode(code *crypto.CryptoValue, expiry time.Duration) *NotificationRequest {
|
|
r.Code = code
|
|
r.CodeExpiry = expiry
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithURLTemplate(urlTemplate string) *NotificationRequest {
|
|
r.URLTemplate = urlTemplate
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithUnverifiedChannel() *NotificationRequest {
|
|
r.UnverifiedNotificationChannel = true
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithArgs(args *domain.NotificationArguments) *NotificationRequest {
|
|
r.Args = args
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithAggregate(id, resourceOwner string) *NotificationRequest {
|
|
r.AggregateID = id
|
|
r.AggregateResourceOwner = resourceOwner
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithOTP() *NotificationRequest {
|
|
r.IsOTP = true
|
|
return r
|
|
}
|
|
|
|
func (r *NotificationRequest) WithPreviousDomain() *NotificationRequest {
|
|
r.RequiresPreviousDomain = true
|
|
return r
|
|
}
|
|
|
|
// RequestNotification writes a new notification.RequestEvent with the notification.Aggregate to the eventstore
|
|
func (c *Commands) RequestNotification(
|
|
ctx context.Context,
|
|
resourceOwner string,
|
|
request *NotificationRequest,
|
|
) error {
|
|
id, err := c.idGenerator.Next()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = c.eventstore.Push(ctx, notification.NewRequestedEvent(ctx, ¬ification.NewAggregate(id, resourceOwner).Aggregate,
|
|
request.UserID,
|
|
request.UserResourceOwner,
|
|
request.AggregateID,
|
|
request.AggregateResourceOwner,
|
|
request.TriggerOrigin,
|
|
request.URLTemplate,
|
|
request.Code,
|
|
request.CodeExpiry,
|
|
request.EventType,
|
|
request.NotificationType,
|
|
request.MessageType,
|
|
request.UnverifiedNotificationChannel,
|
|
request.IsOTP,
|
|
request.RequiresPreviousDomain,
|
|
request.Args))
|
|
return err
|
|
}
|
|
|
|
// NotificationCanceled writes a new notification.CanceledEvent with the notification.Aggregate to the eventstore
|
|
func (c *Commands) NotificationCanceled(ctx context.Context, tx *sql.Tx, id, resourceOwner string, requestError error) error {
|
|
var errorMessage string
|
|
if requestError != nil {
|
|
errorMessage = requestError.Error()
|
|
}
|
|
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewCanceledEvent(ctx, ¬ification.NewAggregate(id, resourceOwner).Aggregate, errorMessage))
|
|
return err
|
|
}
|
|
|
|
// NotificationSent writes a new notification.SentEvent with the notification.Aggregate to the eventstore
|
|
func (c *Commands) NotificationSent(ctx context.Context, tx *sql.Tx, id, resourceOwner string) error {
|
|
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewSentEvent(ctx, ¬ification.NewAggregate(id, resourceOwner).Aggregate))
|
|
return err
|
|
}
|
|
|
|
// NotificationRetryRequested writes a new notification.RetryRequestEvent with the notification.Aggregate to the eventstore
|
|
func (c *Commands) NotificationRetryRequested(ctx context.Context, tx *sql.Tx, id, resourceOwner string, request *NotificationRetryRequest, requestError error) error {
|
|
var errorMessage string
|
|
if requestError != nil {
|
|
errorMessage = requestError.Error()
|
|
}
|
|
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewRetryRequestedEvent(ctx, ¬ification.NewAggregate(id, resourceOwner).Aggregate,
|
|
request.UserID,
|
|
request.UserResourceOwner,
|
|
request.AggregateID,
|
|
request.AggregateResourceOwner,
|
|
request.TriggerOrigin,
|
|
request.URLTemplate,
|
|
request.Code,
|
|
request.CodeExpiry,
|
|
request.EventType,
|
|
request.NotificationType,
|
|
request.MessageType,
|
|
request.UnverifiedNotificationChannel,
|
|
request.IsOTP,
|
|
request.Args,
|
|
request.NotifyUser,
|
|
request.BackOff,
|
|
errorMessage))
|
|
return err
|
|
}
|