fix: use fixed active instances duration (#5567)

* fix: use fixed active instances duration

* fix active instances tests

* fix syntax error

* run pipeline

---------

Co-authored-by: Silvan <silvan.reusser@gmail.com>
This commit is contained in:
Elio Bischof 2023-03-30 13:01:27 +02:00 committed by GitHub
parent 9b30d6ad83
commit 887e2f474d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 107 additions and 101 deletions

View File

@ -159,10 +159,10 @@ Projections:
ConcurrentInstances: 1 ConcurrentInstances: 1
# Limit of returned events per query # Limit of returned events per query
BulkLimit: 200 BulkLimit: 200
# If HandleInactiveInstances this is false, only instances are projected, # Only instance are projected, for which at least a projection relevant event exists withing the timeframe
# for which at least a projection relevant event exists withing the timeframe # from HandleActiveInstances duration in the past until the projections current time
# from twice the RequeueEvery time in the past until the projections current time # Defaults to twice the RequeueEvery duration
HandleInactiveInstances: false HandleActiveInstances: 120s
# In the Customizations section, all settings from above can be overwritten for each specific projection # In the Customizations section, all settings from above can be overwritten for each specific projection
Customizations: Customizations:
Projects: Projects:
@ -174,7 +174,8 @@ Projections:
# The NotificationsQuotas projection is used for calling quota webhooks # The NotificationsQuotas projection is used for calling quota webhooks
NotificationsQuotas: NotificationsQuotas:
# Delivery guarantee requirements are probably higher for quota webhooks # Delivery guarantee requirements are probably higher for quota webhooks
HandleInactiveInstances: true # Defaults to 45 days
HandleActiveInstances: 1080h
# As quota notification projections don't result in database statements, retries don't have an effect # As quota notification projections don't result in database statements, retries don't have an effect
MaxFailureCount: 0 MaxFailureCount: 0
# Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much. # Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much.

View File

@ -84,14 +84,14 @@ Projections:
RetryFailedAfter: 1s RetryFailedAfter: 1s
# Retried execution number of database statements resulting from projected events # Retried execution number of database statements resulting from projected events
MaxFailureCount: 5 MaxFailureCount: 5
# Number of concurrent projection routines # Number of concurrent projection routines. Values of 0 and below are overwritten to 1
ConcurrentInstances: 1 ConcurrentInstances: 1
# Limit of returned events per query # Limit of returned events per query
BulkLimit: 200 BulkLimit: 200
# If HandleInactiveInstances this is false, only instances are projected, # Only instance are projected, for which at least a projection relevant event exists withing the timeframe
# for which at least a projection relevant event exists withing the timeframe # from HandleActiveInstances duration in the past until the projections current time
# from twice the RequeueEvery time in the past until the projections current time # Defaults to twice the RequeueEvery duration
HandleInactiveInstances: false HandleActiveInstances: 120s
# In the Customizations section, all settings from above can be overwritten for each specific projection # In the Customizations section, all settings from above can be overwritten for each specific projection
Customizations: Customizations:
Projects: Projects:
@ -103,7 +103,8 @@ Projections:
# The NotificationsQuotas projection is used for calling quota webhooks # The NotificationsQuotas projection is used for calling quota webhooks
NotificationsQuotas: NotificationsQuotas:
# Delivery guarantee requirements are probably higher for quota webhooks # Delivery guarantee requirements are probably higher for quota webhooks
HandleInactiveInstances: true # Defaults to 45 days
HandleActiveInstances: 1080h
# As quota notification projections don't result in database statements, retries don't have an effect # As quota notification projections don't result in database statements, retries don't have an effect
MaxFailureCount: 0 MaxFailureCount: 0
# Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much. # Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much.

View File

@ -20,12 +20,12 @@ const (
type ProjectionHandlerConfig struct { type ProjectionHandlerConfig struct {
HandlerConfig HandlerConfig
ProjectionName string ProjectionName string
RequeueEvery time.Duration RequeueEvery time.Duration
RetryFailedAfter time.Duration RetryFailedAfter time.Duration
Retries uint Retries uint
ConcurrentInstances uint ConcurrentInstances uint
HandleInactiveInstances bool HandleActiveInstances time.Duration
} }
// Update updates the projection with the given statements // Update updates the projection with the given statements
@ -49,19 +49,19 @@ type NowFunc func() time.Time
type ProjectionHandler struct { type ProjectionHandler struct {
Handler Handler
ProjectionName string ProjectionName string
reduce Reduce reduce Reduce
update Update update Update
searchQuery SearchQuery searchQuery SearchQuery
triggerProjection *time.Timer triggerProjection *time.Timer
lock Lock lock Lock
unlock Unlock unlock Unlock
requeueAfter time.Duration requeueAfter time.Duration
retryFailedAfter time.Duration retryFailedAfter time.Duration
retries int retries int
concurrentInstances int concurrentInstances int
handleInactiveInstances bool handleActiveInstances time.Duration
nowFunc NowFunc nowFunc NowFunc
} }
func NewProjectionHandler( func NewProjectionHandler(
@ -79,20 +79,20 @@ func NewProjectionHandler(
concurrentInstances = 1 concurrentInstances = 1
} }
h := &ProjectionHandler{ h := &ProjectionHandler{
Handler: NewHandler(config.HandlerConfig), Handler: NewHandler(config.HandlerConfig),
ProjectionName: config.ProjectionName, ProjectionName: config.ProjectionName,
reduce: reduce, reduce: reduce,
update: update, update: update,
searchQuery: query, searchQuery: query,
lock: lock, lock: lock,
unlock: unlock, unlock: unlock,
requeueAfter: config.RequeueEvery, requeueAfter: config.RequeueEvery,
triggerProjection: time.NewTimer(0), // first trigger is instant on startup triggerProjection: time.NewTimer(0), // first trigger is instant on startup
retryFailedAfter: config.RetryFailedAfter, retryFailedAfter: config.RetryFailedAfter,
retries: int(config.Retries), retries: int(config.Retries),
concurrentInstances: concurrentInstances, concurrentInstances: concurrentInstances,
handleInactiveInstances: config.HandleInactiveInstances, handleActiveInstances: config.HandleActiveInstances,
nowFunc: time.Now, nowFunc: time.Now,
} }
go func() { go func() {
@ -229,11 +229,11 @@ func (h *ProjectionHandler) schedule(ctx context.Context) {
} }
go h.cancelOnErr(lockCtx, errs, cancelLock) go h.cancelOnErr(lockCtx, errs, cancelLock)
} }
if succeededOnce && !h.handleInactiveInstances { if succeededOnce {
// since we have at least one successful run, we can restrict it to events not older than // since we have at least one successful run, we can restrict it to events not older than
// twice the requeue time (just to be sure not to miss an event) // h.handleActiveInstances (just to be sure not to miss an event)
// This ensures that only instances with recent events on the handler are projected // This ensures that only instances with recent events on the handler are projected
query = query.CreationDateAfter(h.nowFunc().Add(-2 * h.requeueAfter)) query = query.CreationDateAfter(h.nowFunc().Add(-1 * h.handleActiveInstances))
} }
ids, err := h.Eventstore.InstanceIDs(ctx, query.Builder()) ids, err := h.Eventstore.InstanceIDs(ctx, query.Builder())
if err != nil { if err != nil {

View File

@ -668,13 +668,13 @@ func TestProjection_schedule(t *testing.T) {
ctx context.Context ctx context.Context
} }
type fields struct { type fields struct {
reduce Reduce reduce Reduce
update Update update Update
eventstore func(t *testing.T) *eventstore.Eventstore eventstore func(t *testing.T) *eventstore.Eventstore
lock *lockMock lock *lockMock
unlock *unlockMock unlock *unlockMock
query SearchQuery query SearchQuery
handleInactiveInstances bool handleActiveInstances time.Duration
} }
type want struct { type want struct {
locksCount int locksCount int
@ -711,7 +711,7 @@ func TestProjection_schedule(t *testing.T) {
), ),
) )
}, },
handleInactiveInstances: false, handleActiveInstances: 2 * time.Minute,
}, },
want{ want{
locksCount: 0, locksCount: 0,
@ -739,7 +739,7 @@ func TestProjection_schedule(t *testing.T) {
), ),
) )
}, },
handleInactiveInstances: false, handleActiveInstances: 2 * time.Minute,
}, },
want{ want{
locksCount: 0, locksCount: 0,
@ -771,7 +771,7 @@ func TestProjection_schedule(t *testing.T) {
firstErr: ErrLock, firstErr: ErrLock,
canceled: make(chan bool, 1), canceled: make(chan bool, 1),
}, },
handleInactiveInstances: false, handleActiveInstances: 2 * time.Minute,
}, },
want{ want{
locksCount: 1, locksCount: 1,
@ -803,9 +803,9 @@ func TestProjection_schedule(t *testing.T) {
firstErr: nil, firstErr: nil,
errWait: 100 * time.Millisecond, errWait: 100 * time.Millisecond,
}, },
unlock: &unlockMock{}, unlock: &unlockMock{},
query: testQuery(nil, 0, ErrQuery), query: testQuery(nil, 0, ErrQuery),
handleInactiveInstances: false, handleActiveInstances: 2 * time.Minute,
}, },
want{ want{
locksCount: 1, locksCount: 1,
@ -837,7 +837,7 @@ func TestProjection_schedule(t *testing.T) {
}, { }, {
Field: repository.FieldCreationDate, Field: repository.FieldCreationDate,
Operation: repository.OperationGreater, Operation: repository.OperationGreater,
Value: now().Add(-2 * time.Hour), Value: now().Add(-2 * time.Minute),
}}, }},
"206626268110651755", "206626268110651755",
). ).
@ -855,10 +855,10 @@ func TestProjection_schedule(t *testing.T) {
firstErr: nil, firstErr: nil,
errWait: 100 * time.Millisecond, errWait: 100 * time.Millisecond,
}, },
unlock: &unlockMock{}, unlock: &unlockMock{},
handleInactiveInstances: false, handleActiveInstances: 2 * time.Minute,
reduce: testReduce(newTestStatement("aggregate1", 1, 0)), reduce: testReduce(newTestStatement("aggregate1", 1, 0)),
update: testUpdate(t, 1, 1, nil), update: testUpdate(t, 1, 1, nil),
query: testQuery( query: testQuery(
eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent). eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
AddQuery(). AddQuery().
@ -894,6 +894,10 @@ func TestProjection_schedule(t *testing.T) {
Field: repository.FieldInstanceID, Field: repository.FieldInstanceID,
Operation: repository.OperationNotIn, Operation: repository.OperationNotIn,
Value: database.StringArray{""}, Value: database.StringArray{""},
}, {
Field: repository.FieldCreationDate,
Operation: repository.OperationGreater,
Value: now().Add(-45 * time.Hour),
}}, "206626268110651755"). }}, "206626268110651755").
ExpectFilterEvents(&repository.Event{ ExpectFilterEvents(&repository.Event{
AggregateType: "quota", AggregateType: "quota",
@ -909,10 +913,10 @@ func TestProjection_schedule(t *testing.T) {
firstErr: nil, firstErr: nil,
errWait: 100 * time.Millisecond, errWait: 100 * time.Millisecond,
}, },
unlock: &unlockMock{}, unlock: &unlockMock{},
handleInactiveInstances: true, handleActiveInstances: 45 * time.Hour,
reduce: testReduce(newTestStatement("aggregate1", 1, 0)), reduce: testReduce(newTestStatement("aggregate1", 1, 0)),
update: testUpdate(t, 1, 1, nil), update: testUpdate(t, 1, 1, nil),
query: testQuery( query: testQuery(
eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent). eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
AddQuery(). AddQuery().
@ -936,17 +940,17 @@ func TestProjection_schedule(t *testing.T) {
EventQueue: make(chan eventstore.Event, 10), EventQueue: make(chan eventstore.Event, 10),
Eventstore: tt.fields.eventstore(t), Eventstore: tt.fields.eventstore(t),
}, },
reduce: tt.fields.reduce, reduce: tt.fields.reduce,
update: tt.fields.update, update: tt.fields.update,
searchQuery: tt.fields.query, searchQuery: tt.fields.query,
lock: tt.fields.lock.lock(), lock: tt.fields.lock.lock(),
unlock: tt.fields.unlock.unlock(), unlock: tt.fields.unlock.unlock(),
triggerProjection: time.NewTimer(0), // immediately run an iteration triggerProjection: time.NewTimer(0), // immediately run an iteration
requeueAfter: time.Hour, // run only one iteration requeueAfter: time.Hour, // run only one iteration
concurrentInstances: 1, concurrentInstances: 1,
handleInactiveInstances: tt.fields.handleInactiveInstances, handleActiveInstances: tt.fields.handleActiveInstances,
retries: 0, retries: 0,
nowFunc: now, nowFunc: now,
} }
ctx, cancel := context.WithCancel(tt.args.ctx) ctx, cancel := context.WithCancel(tt.args.ctx)
go func() { go func() {

View File

@ -252,7 +252,7 @@ func given(t *testing.T, args args, want want) (context.Context, *clock.Mock, *e
svc := logstore.New( svc := logstore.New(
quotaqueriermock.NewNoopQuerier(&args.config, periodStart), quotaqueriermock.NewNoopQuerier(&args.config, periodStart),
logstore.UsageReporterFunc(func(context.Context, []*quota.NotifiedEvent) error { return nil }), logstore.UsageReporterFunc(func(context.Context, []*quota.NotificationDueEvent) error { return nil }),
mainEmitter, mainEmitter,
secondaryEmitter) secondaryEmitter)

View File

@ -5,20 +5,20 @@ import (
) )
type Config struct { type Config struct {
RequeueEvery time.Duration RequeueEvery time.Duration
RetryFailedAfter time.Duration RetryFailedAfter time.Duration
MaxFailureCount uint MaxFailureCount uint
ConcurrentInstances uint ConcurrentInstances uint
BulkLimit uint64 BulkLimit uint64
Customizations map[string]CustomConfig Customizations map[string]CustomConfig
HandleInactiveInstances bool HandleActiveInstances time.Duration
} }
type CustomConfig struct { type CustomConfig struct {
RequeueEvery *time.Duration RequeueEvery *time.Duration
RetryFailedAfter *time.Duration RetryFailedAfter *time.Duration
MaxFailureCount *uint MaxFailureCount *uint
ConcurrentInstances *uint ConcurrentInstances *uint
BulkLimit *uint64 BulkLimit *uint64
HandleInactiveInstances *bool HandleActiveInstances *time.Duration
} }

View File

@ -81,11 +81,11 @@ func Create(ctx context.Context, sqlClient *database.DB, es *eventstore.Eventsto
HandlerConfig: handler.HandlerConfig{ HandlerConfig: handler.HandlerConfig{
Eventstore: es, Eventstore: es,
}, },
RequeueEvery: config.RequeueEvery, RequeueEvery: config.RequeueEvery,
RetryFailedAfter: config.RetryFailedAfter, RetryFailedAfter: config.RetryFailedAfter,
Retries: config.MaxFailureCount, Retries: config.MaxFailureCount,
ConcurrentInstances: config.ConcurrentInstances, ConcurrentInstances: config.ConcurrentInstances,
HandleInactiveInstances: config.HandleInactiveInstances, HandleActiveInstances: config.HandleActiveInstances,
}, },
Client: sqlClient, Client: sqlClient,
SequenceTable: CurrentSeqTable, SequenceTable: CurrentSeqTable,
@ -175,8 +175,8 @@ func applyCustomConfig(config crdb.StatementHandlerConfig, customConfig CustomCo
if customConfig.RetryFailedAfter != nil { if customConfig.RetryFailedAfter != nil {
config.RetryFailedAfter = *customConfig.RetryFailedAfter config.RetryFailedAfter = *customConfig.RetryFailedAfter
} }
if customConfig.HandleInactiveInstances != nil { if customConfig.HandleActiveInstances != nil {
config.HandleInactiveInstances = *customConfig.HandleInactiveInstances config.HandleActiveInstances = *customConfig.HandleActiveInstances
} }
return config return config