zitadel/internal/repository/notification/notification.go
Livio Spring 8537805ea5
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved

The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.

# How the Problems Are Solved

- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml

Notifications:
  # The amount of workers processing the notification request events.
  # If set to 0, no notification request events will be handled. This can be useful when running in
  # multi binary / pod setup and allowing only certain executables to process the events.
  Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
  # The amount of events a single worker will process in a run.
  BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
  # Time interval between scheduled notifications for request events
  RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
  # The amount of workers processing the notification retry events.
  # If set to 0, no notification retry events will be handled. This can be useful when running in
  # multi binary / pod setup and allowing only certain executables to process the events.
  RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
  # Time interval between scheduled notifications for retry events
  RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
  # Only instances are projected, for which at least a projection-relevant event exists within the timeframe
  # from HandleActiveInstances duration in the past until the projection's current time
  # If set to 0 (default), every instance is always considered active
  HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
  # The maximum duration a transaction remains open
  # before it spots left folding additional events
  # and updates the table.
  TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
  # Automatically cancel the notification after the amount of failed attempts
  MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
  # Automatically cancel the notification if it cannot be handled within a specific time
  MaxTtl: 5m  # ZITADEL_NOTIFIACATIONS_MAXTTL
  # Failed attempts are retried after a confogired delay (with exponential backoff).
  # Set a minimum and maximum delay and a factor for the backoff
  MinRetryDelay: 1s  # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
  MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
  # Any factor below 1 will be set to 1
  RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```


# Additional Changes

None

# Additional Context

- closes #8931
2024-11-27 15:01:17 +00:00

245 lines
6.8 KiB
Go

package notification
import (
"context"
"time"
"github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/query"
)
const (
notificationEventPrefix = "notification."
RequestedType = notificationEventPrefix + "requested"
RetryRequestedType = notificationEventPrefix + "retry.requested"
SentType = notificationEventPrefix + "sent"
CanceledType = notificationEventPrefix + "canceled"
)
type Request struct {
UserID string `json:"userID"`
UserResourceOwner string `json:"userResourceOwner"`
AggregateID string `json:"notificationAggregateID"`
AggregateResourceOwner string `json:"notificationAggregateResourceOwner"`
TriggeredAtOrigin string `json:"triggeredAtOrigin"`
EventType eventstore.EventType `json:"eventType"`
MessageType string `json:"messageType"`
NotificationType domain.NotificationType `json:"notificationType"`
URLTemplate string `json:"urlTemplate,omitempty"`
CodeExpiry time.Duration `json:"codeExpiry,omitempty"`
Code *crypto.CryptoValue `json:"code,omitempty"`
UnverifiedNotificationChannel bool `json:"unverifiedNotificationChannel,omitempty"`
IsOTP bool `json:"isOTP,omitempty"`
RequiresPreviousDomain bool `json:"RequiresPreviousDomain,omitempty"`
Args *domain.NotificationArguments `json:"args,omitempty"`
}
func (e *Request) NotificationAggregateID() string {
if e.AggregateID == "" {
return e.UserID
}
return e.AggregateID
}
func (e *Request) NotificationAggregateResourceOwner() string {
if e.AggregateResourceOwner == "" {
return e.UserResourceOwner
}
return e.AggregateResourceOwner
}
type RequestedEvent struct {
eventstore.BaseEvent `json:"-"`
Request `json:"request"`
}
func (e *RequestedEvent) TriggerOrigin() string {
return e.TriggeredAtOrigin
}
func (e *RequestedEvent) Payload() interface{} {
return e
}
func (e *RequestedEvent) UniqueConstraints() []*eventstore.UniqueConstraint {
return nil
}
func (e *RequestedEvent) SetBaseEvent(event *eventstore.BaseEvent) {
e.BaseEvent = *event
}
func NewRequestedEvent(ctx context.Context,
aggregate *eventstore.Aggregate,
userID,
userResourceOwner,
aggregateID,
aggregateResourceOwner,
triggerOrigin,
urlTemplate string,
code *crypto.CryptoValue,
codeExpiry time.Duration,
eventType eventstore.EventType,
notificationType domain.NotificationType,
messageType string,
unverifiedNotificationChannel,
isOTP,
requiresPreviousDomain bool,
args *domain.NotificationArguments,
) *RequestedEvent {
return &RequestedEvent{
BaseEvent: *eventstore.NewBaseEventForPush(
ctx,
aggregate,
RequestedType,
),
Request: Request{
UserID: userID,
UserResourceOwner: userResourceOwner,
AggregateID: aggregateID,
AggregateResourceOwner: aggregateResourceOwner,
TriggeredAtOrigin: triggerOrigin,
EventType: eventType,
MessageType: messageType,
NotificationType: notificationType,
URLTemplate: urlTemplate,
CodeExpiry: codeExpiry,
Code: code,
UnverifiedNotificationChannel: unverifiedNotificationChannel,
IsOTP: isOTP,
RequiresPreviousDomain: requiresPreviousDomain,
Args: args,
},
}
}
type SentEvent struct {
eventstore.BaseEvent `json:"-"`
}
func (e *SentEvent) Payload() interface{} {
return e
}
func (e *SentEvent) UniqueConstraints() []*eventstore.UniqueConstraint {
return nil
}
func (e *SentEvent) SetBaseEvent(event *eventstore.BaseEvent) {
e.BaseEvent = *event
}
func NewSentEvent(ctx context.Context,
aggregate *eventstore.Aggregate,
) *SentEvent {
return &SentEvent{
BaseEvent: *eventstore.NewBaseEventForPush(
ctx,
aggregate,
SentType,
),
}
}
type CanceledEvent struct {
eventstore.BaseEvent `json:"-"`
Error string `json:"error"`
}
func (e *CanceledEvent) Payload() interface{} {
return e
}
func (e *CanceledEvent) UniqueConstraints() []*eventstore.UniqueConstraint {
return nil
}
func (e *CanceledEvent) SetBaseEvent(event *eventstore.BaseEvent) {
e.BaseEvent = *event
}
func NewCanceledEvent(ctx context.Context, aggregate *eventstore.Aggregate, errorMessage string) *CanceledEvent {
return &CanceledEvent{
BaseEvent: *eventstore.NewBaseEventForPush(
ctx,
aggregate,
CanceledType,
),
Error: errorMessage,
}
}
type RetryRequestedEvent struct {
eventstore.BaseEvent `json:"-"`
Request `json:"request"`
Error string `json:"error"`
NotifyUser *query.NotifyUser `json:"notifyUser"`
BackOff time.Duration `json:"backOff"`
}
func (e *RetryRequestedEvent) Payload() interface{} {
return e
}
func (e *RetryRequestedEvent) UniqueConstraints() []*eventstore.UniqueConstraint {
return nil
}
func (e *RetryRequestedEvent) SetBaseEvent(event *eventstore.BaseEvent) {
e.BaseEvent = *event
}
func NewRetryRequestedEvent(
ctx context.Context,
aggregate *eventstore.Aggregate,
userID,
userResourceOwner,
aggregateID,
aggregateResourceOwner,
triggerOrigin,
urlTemplate string,
code *crypto.CryptoValue,
codeExpiry time.Duration,
eventType eventstore.EventType,
notificationType domain.NotificationType,
messageType string,
unverifiedNotificationChannel,
isOTP bool,
args *domain.NotificationArguments,
notifyUser *query.NotifyUser,
backoff time.Duration,
errorMessage string,
) *RetryRequestedEvent {
return &RetryRequestedEvent{
BaseEvent: *eventstore.NewBaseEventForPush(
ctx,
aggregate,
RetryRequestedType,
),
Request: Request{
UserID: userID,
UserResourceOwner: userResourceOwner,
AggregateID: aggregateID,
AggregateResourceOwner: aggregateResourceOwner,
TriggeredAtOrigin: triggerOrigin,
EventType: eventType,
MessageType: messageType,
NotificationType: notificationType,
URLTemplate: urlTemplate,
CodeExpiry: codeExpiry,
Code: code,
UnverifiedNotificationChannel: unverifiedNotificationChannel,
IsOTP: isOTP,
Args: args,
},
NotifyUser: notifyUser,
BackOff: backoff,
Error: errorMessage,
}
}