2023-10-10 13:20:53 +00:00
|
|
|
// Code generated by MockGen. DO NOT EDIT.
|
|
|
|
// Source: github.com/zitadel/zitadel/internal/notification/handlers (interfaces: Commands)
|
2023-11-22 10:56:43 +00:00
|
|
|
//
|
|
|
|
// Generated by this command:
|
|
|
|
//
|
|
|
|
// mockgen -package mock -destination ./mock/commands.mock.go github.com/zitadel/zitadel/internal/notification/handlers Commands
|
|
|
|
//
|
2024-04-09 17:21:21 +00:00
|
|
|
|
2023-10-10 13:20:53 +00:00
|
|
|
// Package mock is a generated GoMock package.
|
|
|
|
package mock
|
|
|
|
|
|
|
|
import (
|
|
|
|
context "context"
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 15:01:17 +00:00
|
|
|
sql "database/sql"
|
2023-11-22 10:56:43 +00:00
|
|
|
reflect "reflect"
|
|
|
|
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 15:01:17 +00:00
|
|
|
command "github.com/zitadel/zitadel/internal/command"
|
2024-09-26 07:14:33 +00:00
|
|
|
senders "github.com/zitadel/zitadel/internal/notification/senders"
|
2023-10-10 13:20:53 +00:00
|
|
|
milestone "github.com/zitadel/zitadel/internal/repository/milestone"
|
|
|
|
quota "github.com/zitadel/zitadel/internal/repository/quota"
|
2023-11-22 10:56:43 +00:00
|
|
|
gomock "go.uber.org/mock/gomock"
|
2023-10-10 13:20:53 +00:00
|
|
|
)
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// MockCommands is a mock of Commands interface.
|
2023-10-10 13:20:53 +00:00
|
|
|
type MockCommands struct {
|
|
|
|
ctrl *gomock.Controller
|
|
|
|
recorder *MockCommandsMockRecorder
|
2024-10-31 14:57:17 +00:00
|
|
|
isgomock struct{}
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// MockCommandsMockRecorder is the mock recorder for MockCommands.
|
2023-10-10 13:20:53 +00:00
|
|
|
type MockCommandsMockRecorder struct {
|
|
|
|
mock *MockCommands
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// NewMockCommands creates a new mock instance.
|
2023-10-10 13:20:53 +00:00
|
|
|
func NewMockCommands(ctrl *gomock.Controller) *MockCommands {
|
|
|
|
mock := &MockCommands{ctrl: ctrl}
|
|
|
|
mock.recorder = &MockCommandsMockRecorder{mock}
|
|
|
|
return mock
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
2023-10-10 13:20:53 +00:00
|
|
|
func (m *MockCommands) EXPECT() *MockCommandsMockRecorder {
|
|
|
|
return m.recorder
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanEmailVerificationCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanEmailVerificationCodeSent(ctx context.Context, orgID, userID string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanEmailVerificationCodeSent", ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanEmailVerificationCodeSent indicates an expected call of HumanEmailVerificationCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanEmailVerificationCodeSent(ctx, orgID, userID any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanEmailVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanEmailVerificationCodeSent), ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanInitCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanInitCodeSent(ctx context.Context, orgID, userID string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanInitCodeSent", ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanInitCodeSent indicates an expected call of HumanInitCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanInitCodeSent(ctx, orgID, userID any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanInitCodeSent), ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanOTPEmailCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanOTPEmailCodeSent(ctx context.Context, userID, resourceOwner string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanOTPEmailCodeSent", ctx, userID, resourceOwner)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanOTPEmailCodeSent indicates an expected call of HumanOTPEmailCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanOTPEmailCodeSent(ctx, userID, resourceOwner any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPEmailCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPEmailCodeSent), ctx, userID, resourceOwner)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanOTPSMSCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanOTPSMSCodeSent(ctx context.Context, userID, resourceOwner string, generatorInfo *senders.CodeGeneratorInfo) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanOTPSMSCodeSent", ctx, userID, resourceOwner, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanOTPSMSCodeSent indicates an expected call of HumanOTPSMSCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanOTPSMSCodeSent(ctx, userID, resourceOwner, generatorInfo any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanOTPSMSCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanOTPSMSCodeSent), ctx, userID, resourceOwner, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanPasswordlessInitCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanPasswordlessInitCodeSent(ctx context.Context, userID, resourceOwner, codeID string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanPasswordlessInitCodeSent", ctx, userID, resourceOwner, codeID)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanPasswordlessInitCodeSent indicates an expected call of HumanPasswordlessInitCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanPasswordlessInitCodeSent(ctx, userID, resourceOwner, codeID any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPasswordlessInitCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPasswordlessInitCodeSent), ctx, userID, resourceOwner, codeID)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanPhoneVerificationCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) HumanPhoneVerificationCodeSent(ctx context.Context, orgID, userID string, generatorInfo *senders.CodeGeneratorInfo) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "HumanPhoneVerificationCodeSent", ctx, orgID, userID, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// HumanPhoneVerificationCodeSent indicates an expected call of HumanPhoneVerificationCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) HumanPhoneVerificationCodeSent(ctx, orgID, userID, generatorInfo any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HumanPhoneVerificationCodeSent", reflect.TypeOf((*MockCommands)(nil).HumanPhoneVerificationCodeSent), ctx, orgID, userID, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// InviteCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) InviteCodeSent(ctx context.Context, orgID, userID string) error {
|
2024-09-11 10:53:55 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "InviteCodeSent", ctx, orgID, userID)
|
2024-09-11 10:53:55 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// InviteCodeSent indicates an expected call of InviteCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) InviteCodeSent(ctx, orgID, userID any) *gomock.Call {
|
2024-09-11 10:53:55 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InviteCodeSent", reflect.TypeOf((*MockCommands)(nil).InviteCodeSent), ctx, orgID, userID)
|
2024-09-11 10:53:55 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// MilestonePushed mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) MilestonePushed(ctx context.Context, instanceID string, msType milestone.Type, endpoints []string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "MilestonePushed", ctx, instanceID, msType, endpoints)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// MilestonePushed indicates an expected call of MilestonePushed.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) MilestonePushed(ctx, instanceID, msType, endpoints any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MilestonePushed", reflect.TypeOf((*MockCommands)(nil).MilestonePushed), ctx, instanceID, msType, endpoints)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 15:01:17 +00:00
|
|
|
// NotificationCanceled mocks base method.
|
|
|
|
func (m *MockCommands) NotificationCanceled(ctx context.Context, tx *sql.Tx, id, resourceOwner string, err error) error {
|
|
|
|
m.ctrl.T.Helper()
|
|
|
|
ret := m.ctrl.Call(m, "NotificationCanceled", ctx, tx, id, resourceOwner, err)
|
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotificationCanceled indicates an expected call of NotificationCanceled.
|
|
|
|
func (mr *MockCommandsMockRecorder) NotificationCanceled(ctx, tx, id, resourceOwner, err any) *gomock.Call {
|
|
|
|
mr.mock.ctrl.T.Helper()
|
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationCanceled", reflect.TypeOf((*MockCommands)(nil).NotificationCanceled), ctx, tx, id, resourceOwner, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotificationRetryRequested mocks base method.
|
|
|
|
func (m *MockCommands) NotificationRetryRequested(ctx context.Context, tx *sql.Tx, id, resourceOwner string, request *command.NotificationRetryRequest, err error) error {
|
|
|
|
m.ctrl.T.Helper()
|
|
|
|
ret := m.ctrl.Call(m, "NotificationRetryRequested", ctx, tx, id, resourceOwner, request, err)
|
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotificationRetryRequested indicates an expected call of NotificationRetryRequested.
|
|
|
|
func (mr *MockCommandsMockRecorder) NotificationRetryRequested(ctx, tx, id, resourceOwner, request, err any) *gomock.Call {
|
|
|
|
mr.mock.ctrl.T.Helper()
|
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationRetryRequested", reflect.TypeOf((*MockCommands)(nil).NotificationRetryRequested), ctx, tx, id, resourceOwner, request, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotificationSent mocks base method.
|
|
|
|
func (m *MockCommands) NotificationSent(ctx context.Context, tx *sql.Tx, id, instanceID string) error {
|
|
|
|
m.ctrl.T.Helper()
|
|
|
|
ret := m.ctrl.Call(m, "NotificationSent", ctx, tx, id, instanceID)
|
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotificationSent indicates an expected call of NotificationSent.
|
|
|
|
func (mr *MockCommandsMockRecorder) NotificationSent(ctx, tx, id, instanceID any) *gomock.Call {
|
|
|
|
mr.mock.ctrl.T.Helper()
|
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotificationSent", reflect.TypeOf((*MockCommands)(nil).NotificationSent), ctx, tx, id, instanceID)
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// OTPEmailSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) OTPEmailSent(ctx context.Context, sessionID, resourceOwner string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "OTPEmailSent", ctx, sessionID, resourceOwner)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// OTPEmailSent indicates an expected call of OTPEmailSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) OTPEmailSent(ctx, sessionID, resourceOwner any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPEmailSent", reflect.TypeOf((*MockCommands)(nil).OTPEmailSent), ctx, sessionID, resourceOwner)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// OTPSMSSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) OTPSMSSent(ctx context.Context, sessionID, resourceOwner string, generatorInfo *senders.CodeGeneratorInfo) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "OTPSMSSent", ctx, sessionID, resourceOwner, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// OTPSMSSent indicates an expected call of OTPSMSSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) OTPSMSSent(ctx, sessionID, resourceOwner, generatorInfo any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OTPSMSSent", reflect.TypeOf((*MockCommands)(nil).OTPSMSSent), ctx, sessionID, resourceOwner, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// PasswordChangeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) PasswordChangeSent(ctx context.Context, orgID, userID string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "PasswordChangeSent", ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// PasswordChangeSent indicates an expected call of PasswordChangeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) PasswordChangeSent(ctx, orgID, userID any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordChangeSent", reflect.TypeOf((*MockCommands)(nil).PasswordChangeSent), ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// PasswordCodeSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) PasswordCodeSent(ctx context.Context, orgID, userID string, generatorInfo *senders.CodeGeneratorInfo) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "PasswordCodeSent", ctx, orgID, userID, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// PasswordCodeSent indicates an expected call of PasswordCodeSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) PasswordCodeSent(ctx, orgID, userID, generatorInfo any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PasswordCodeSent", reflect.TypeOf((*MockCommands)(nil).PasswordCodeSent), ctx, orgID, userID, generatorInfo)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 15:01:17 +00:00
|
|
|
// RequestNotification mocks base method.
|
|
|
|
func (m *MockCommands) RequestNotification(ctx context.Context, instanceID string, request *command.NotificationRequest) error {
|
|
|
|
m.ctrl.T.Helper()
|
|
|
|
ret := m.ctrl.Call(m, "RequestNotification", ctx, instanceID, request)
|
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestNotification indicates an expected call of RequestNotification.
|
|
|
|
func (mr *MockCommandsMockRecorder) RequestNotification(ctx, instanceID, request any) *gomock.Call {
|
|
|
|
mr.mock.ctrl.T.Helper()
|
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestNotification", reflect.TypeOf((*MockCommands)(nil).RequestNotification), ctx, instanceID, request)
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// UsageNotificationSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) UsageNotificationSent(ctx context.Context, dueEvent *quota.NotificationDueEvent) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "UsageNotificationSent", ctx, dueEvent)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// UsageNotificationSent indicates an expected call of UsageNotificationSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) UsageNotificationSent(ctx, dueEvent any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UsageNotificationSent", reflect.TypeOf((*MockCommands)(nil).UsageNotificationSent), ctx, dueEvent)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// UserDomainClaimedSent mocks base method.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (m *MockCommands) UserDomainClaimedSent(ctx context.Context, orgID, userID string) error {
|
2023-10-10 13:20:53 +00:00
|
|
|
m.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
ret := m.ctrl.Call(m, "UserDomainClaimedSent", ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
ret0, _ := ret[0].(error)
|
|
|
|
return ret0
|
|
|
|
}
|
|
|
|
|
2024-09-26 07:14:33 +00:00
|
|
|
// UserDomainClaimedSent indicates an expected call of UserDomainClaimedSent.
|
2024-10-31 14:57:17 +00:00
|
|
|
func (mr *MockCommandsMockRecorder) UserDomainClaimedSent(ctx, orgID, userID any) *gomock.Call {
|
2023-10-10 13:20:53 +00:00
|
|
|
mr.mock.ctrl.T.Helper()
|
2024-10-31 14:57:17 +00:00
|
|
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserDomainClaimedSent", reflect.TypeOf((*MockCommands)(nil).UserDomainClaimedSent), ctx, orgID, userID)
|
2023-10-10 13:20:53 +00:00
|
|
|
}
|