mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 21:37:32 +00:00
fix: reduce load on view tables (#4716)
* fix: reduce load on view tables * create prerelease * linting: pass context to view handlers * fix error handling of refresh token handler * fix: improve processing of successful instanceIDs on views * fix revert intended change in .golangci.yaml * fix: set timeout for processInstances in spooler * fix: reduce update to active tokens on profile change * change token expiration query to db now() * remove branch from .releaserc.js
This commit is contained in:
@@ -17,10 +17,10 @@ const (
|
||||
|
||||
type Handler interface {
|
||||
ViewModel() string
|
||||
EventQuery(instanceIDs ...string) (*models.SearchQuery, error)
|
||||
EventQuery(instanceIDs []string) (*models.SearchQuery, error)
|
||||
Reduce(*models.Event) error
|
||||
OnError(event *models.Event, err error) error
|
||||
OnSuccess() error
|
||||
OnSuccess(instanceIDs []string) error
|
||||
MinimumCycleDuration() time.Duration
|
||||
LockDuration() time.Duration
|
||||
QueryLimit() uint64
|
||||
@@ -32,7 +32,7 @@ type Handler interface {
|
||||
Subscription() *v1.Subscription
|
||||
}
|
||||
|
||||
func ReduceEvent(handler Handler, event *models.Event) {
|
||||
func ReduceEvent(ctx context.Context, handler Handler, event *models.Event) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
|
||||
@@ -42,7 +42,7 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
"cause", err,
|
||||
"stack", string(debug.Stack()),
|
||||
"sequence", event.Sequence,
|
||||
"instnace", event.InstanceID,
|
||||
"instance", event.InstanceID,
|
||||
).Error("reduce panicked")
|
||||
}
|
||||
}()
|
||||
@@ -60,7 +60,7 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
SearchQuery().
|
||||
SetLimit(eventLimit)
|
||||
|
||||
unprocessedEvents, err := handler.Eventstore().FilterEvents(context.Background(), searchQuery)
|
||||
unprocessedEvents, err := handler.Eventstore().FilterEvents(ctx, searchQuery)
|
||||
if err != nil {
|
||||
logging.WithFields("sequence", event.Sequence).Warn("filter failed")
|
||||
return
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/v1/query"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
|
||||
type Config struct {
|
||||
Eventstore v1.Eventstore
|
||||
EventstoreV2 *eventstore.Eventstore
|
||||
Locker Locker
|
||||
ViewHandlers []query.Handler
|
||||
ConcurrentWorkers int
|
||||
@@ -31,6 +33,7 @@ func (c *Config) New() *Spooler {
|
||||
handlers: c.ViewHandlers,
|
||||
lockID: lockID,
|
||||
eventstore: c.Eventstore,
|
||||
esV2: c.EventstoreV2,
|
||||
locker: c.Locker,
|
||||
queue: make(chan *spooledHandler, len(c.ViewHandlers)),
|
||||
workers: c.ConcurrentWorkers,
|
||||
|
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/v1/query"
|
||||
@@ -16,13 +18,19 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/view/repository"
|
||||
)
|
||||
|
||||
const systemID = "system"
|
||||
const (
|
||||
systemID = "system"
|
||||
schedulerSucceeded = eventstore.EventType("system.projections.scheduler.succeeded")
|
||||
aggregateType = eventstore.AggregateType("system")
|
||||
aggregateID = "SYSTEM"
|
||||
)
|
||||
|
||||
type Spooler struct {
|
||||
handlers []query.Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
eventstore v1.Eventstore
|
||||
esV2 *eventstore.Eventstore
|
||||
workers int
|
||||
queue chan *spooledHandler
|
||||
concurrentInstances int
|
||||
@@ -37,7 +45,9 @@ type spooledHandler struct {
|
||||
locker Locker
|
||||
queuedAt time.Time
|
||||
eventstore v1.Eventstore
|
||||
esV2 *eventstore.Eventstore
|
||||
concurrentInstances int
|
||||
succeededOnce bool
|
||||
}
|
||||
|
||||
func (s *Spooler) Start() {
|
||||
@@ -57,7 +67,7 @@ func (s *Spooler) Start() {
|
||||
}
|
||||
go func() {
|
||||
for _, handler := range s.handlers {
|
||||
s.queue <- &spooledHandler{Handler: handler, locker: s.locker, queuedAt: time.Now(), eventstore: s.eventstore, concurrentInstances: s.concurrentInstances}
|
||||
s.queue <- &spooledHandler{Handler: handler, locker: s.locker, queuedAt: time.Now(), eventstore: s.eventstore, esV2: s.esV2, concurrentInstances: s.concurrentInstances}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -68,6 +78,32 @@ func requeueTask(task *spooledHandler, queue chan<- *spooledHandler) {
|
||||
queue <- task
|
||||
}
|
||||
|
||||
func (s *spooledHandler) hasSucceededOnce(ctx context.Context) (bool, error) {
|
||||
events, err := s.esV2.Filter(ctx, eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||
AddQuery().
|
||||
AggregateTypes(aggregateType).
|
||||
AggregateIDs(aggregateID).
|
||||
EventTypes(schedulerSucceeded).
|
||||
EventData(map[string]interface{}{
|
||||
"name": s.ViewModel(),
|
||||
}).
|
||||
Builder(),
|
||||
)
|
||||
return len(events) > 0 && err == nil, err
|
||||
}
|
||||
|
||||
func (s *spooledHandler) setSucceededOnce(ctx context.Context) error {
|
||||
_, err := s.esV2.Push(ctx, &handler.ProjectionSucceededEvent{
|
||||
BaseEvent: *eventstore.NewBaseEventForPush(ctx,
|
||||
eventstore.NewAggregate(ctx, aggregateID, aggregateType, "v1"),
|
||||
schedulerSucceeded,
|
||||
),
|
||||
Name: s.ViewModel(),
|
||||
})
|
||||
s.succeededOnce = err == nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *spooledHandler) load(workerID string) {
|
||||
errs := make(chan error)
|
||||
defer func() {
|
||||
@@ -86,8 +122,24 @@ func (s *spooledHandler) load(workerID string) {
|
||||
hasLocked := s.lock(ctx, errs, workerID)
|
||||
|
||||
if <-hasLocked {
|
||||
if !s.succeededOnce {
|
||||
var err error
|
||||
s.succeededOnce, err = s.hasSucceededOnce(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("view", s.ViewModel()).OnError(err).Warn("initial lock failed for first schedule")
|
||||
errs <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
instanceIDQuery := models.NewSearchQuery().SetColumn(models.Columns_InstanceIDs).AddQuery().ExcludedInstanceIDsFilter("")
|
||||
for {
|
||||
ids, err := s.eventstore.InstanceIDs(ctx, models.NewSearchQuery().SetColumn(models.Columns_InstanceIDs).AddQuery().ExcludedInstanceIDsFilter("").SearchQuery())
|
||||
if s.succeededOnce {
|
||||
// since we have at least one successful run, we can restrict it to events not older than
|
||||
// twice the requeue time (just to be sure not to miss an event)
|
||||
instanceIDQuery = instanceIDQuery.CreationDateNewerFilter(time.Now().Add(-2 * s.MinimumCycleDuration()))
|
||||
}
|
||||
ids, err := s.eventstore.InstanceIDs(ctx, instanceIDQuery.SearchQuery())
|
||||
if err != nil {
|
||||
errs <- err
|
||||
break
|
||||
@@ -97,12 +149,16 @@ func (s *spooledHandler) load(workerID string) {
|
||||
if max > len(ids) {
|
||||
max = len(ids)
|
||||
}
|
||||
err = s.processInstances(ctx, workerID, ids[i:max]...)
|
||||
err = s.processInstances(ctx, workerID, ids[i:max])
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}
|
||||
if ctx.Err() == nil {
|
||||
if !s.succeededOnce {
|
||||
err = s.setSucceededOnce(ctx)
|
||||
logging.WithFields("view", s.ViewModel()).OnError(err).Warn("unable to push first schedule succeeded")
|
||||
}
|
||||
errs <- nil
|
||||
}
|
||||
break
|
||||
@@ -111,16 +167,20 @@ func (s *spooledHandler) load(workerID string) {
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (s *spooledHandler) processInstances(ctx context.Context, workerID string, ids ...string) error {
|
||||
func (s *spooledHandler) processInstances(ctx context.Context, workerID string, ids []string) error {
|
||||
for {
|
||||
events, err := s.query(ctx, ids...)
|
||||
processCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
events, err := s.query(processCtx, ids)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
if len(events) == 0 {
|
||||
cancel()
|
||||
return nil
|
||||
}
|
||||
err = s.process(ctx, events, workerID)
|
||||
err = s.process(processCtx, events, workerID, ids)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,7 +199,7 @@ func (s *spooledHandler) awaitError(cancel func(), errs chan error, workerID str
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spooledHandler) process(ctx context.Context, events []*models.Event, workerID string) error {
|
||||
func (s *spooledHandler) process(ctx context.Context, events []*models.Event, workerID string, instanceIDs []string) error {
|
||||
for i, event := range events {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -152,17 +212,17 @@ func (s *spooledHandler) process(ctx context.Context, events []*models.Event, wo
|
||||
continue
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return s.process(ctx, events[i:], workerID)
|
||||
return s.process(ctx, events[i:], workerID, instanceIDs)
|
||||
}
|
||||
}
|
||||
}
|
||||
err := s.OnSuccess()
|
||||
err := s.OnSuccess(instanceIDs)
|
||||
logging.WithFields("view", s.ViewModel(), "worker", workerID, "traceID", tracing.TraceIDFromCtx(ctx)).OnError(err).Warn("could not process on success func")
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *spooledHandler) query(ctx context.Context, instanceIDs ...string) ([]*models.Event, error) {
|
||||
query, err := s.EventQuery(instanceIDs...)
|
||||
func (s *spooledHandler) query(ctx context.Context, instanceIDs []string) ([]*models.Event, error) {
|
||||
query, err := s.EventQuery(instanceIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -227,6 +287,6 @@ func HandleError(event *models.Event, failedErr error,
|
||||
return failedErr
|
||||
}
|
||||
|
||||
func HandleSuccess(updateSpoolerRunTimestamp func() error) error {
|
||||
return updateSpoolerRunTimestamp()
|
||||
func HandleSuccess(updateSpoolerRunTimestamp func([]string) error, instanceIDs []string) error {
|
||||
return updateSpoolerRunTimestamp(instanceIDs)
|
||||
}
|
||||
|
@@ -51,7 +51,7 @@ func (h *testHandler) Subscription() *v1.Subscription {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *testHandler) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
func (h *testHandler) EventQuery(instanceIDs []string) (*models.SearchQuery, error) {
|
||||
if h.queryError != nil {
|
||||
return nil, h.queryError
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func (h *testHandler) OnError(event *models.Event, err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *testHandler) OnSuccess() error {
|
||||
func (h *testHandler) OnSuccess([]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -127,8 +127,9 @@ func TestSpooler_process(t *testing.T) {
|
||||
currentHandler *testHandler
|
||||
}
|
||||
type args struct {
|
||||
timeout time.Duration
|
||||
events []*models.Event
|
||||
timeout time.Duration
|
||||
events []*models.Event
|
||||
instanceIDs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -184,7 +185,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
start = time.Now()
|
||||
}
|
||||
|
||||
if err := s.process(ctx, tt.args.events, "test"); (err != nil) != tt.wantErr {
|
||||
if err := s.process(ctx, tt.args.events, "test", tt.args.instanceIDs); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Spooler.process() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if tt.fields.currentHandler.maxErrCount != tt.wantRetries {
|
||||
|
Reference in New Issue
Block a user