Files
zitadel/internal/queue/queue.go
Tim Möhlmann 2727fa719d perf(actionsv2): execution target router (#10564)
# Which Problems Are Solved

The event execution system currently uses a projection handler that
subscribes to and processes all events for all instances. This creates a
high static cost because the system over-fetches event data, handling
many events that are not needed by most instances. This inefficiency is
also reflected in high "rows returned" metrics in the database.

# How the Problems Are Solved

Eliminate the use of a project handler. Instead, events for which
"execution targets" are defined, are directly pushed to the queue by the
eventstore. A Router is populated in the Instance object in the authz
middleware.

- By joining the execution targets to the instance, no additional
queries are needed anymore.
- As part of the instance object, execution targets are now cached as
well.
- Events are queued within the same transaction, giving transactional
guarantees on delivery.
- Uses the "insert many fast` variant of River. Multiple jobs are queued
in a single round-trip to the database.
- Fix compatibility with PostgreSQL 15

# Additional Changes

- The signing key was stored as plain-text in the river job payload in
the DB. This violated our [Secrets
Storage](https://zitadel.com/docs/concepts/architecture/secrets#secrets-storage)
principle. This change removed the field and only uses the encrypted
version of the signing key.
- Fixed the target ordering from descending to ascending.
- Some minor linter warnings on the use of `io.WriteString()`.

# Additional Context

- Introduced in https://github.com/zitadel/zitadel/pull/9249
- Closes https://github.com/zitadel/zitadel/issues/10553
- Closes https://github.com/zitadel/zitadel/issues/9832
- Closes https://github.com/zitadel/zitadel/issues/10372
- Closes https://github.com/zitadel/zitadel/issues/10492

---------

Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
(cherry picked from commit a9ebc06c77)
2025-09-01 08:16:52 +02:00

146 lines
3.5 KiB
Go

package queue
import (
"context"
"database/sql"
"github.com/riverqueue/river"
"github.com/riverqueue/river/riverdriver"
"github.com/riverqueue/river/riverdriver/riverdatabasesql"
"github.com/riverqueue/river/rivertype"
"github.com/riverqueue/rivercontrib/otelriver"
"github.com/robfig/cron/v3"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/telemetry/metrics"
)
// Queue abstracts the underlying queuing library
// For more information see github.com/riverqueue/river
type Queue struct {
driver riverdriver.Driver[*sql.Tx]
client *river.Client[*sql.Tx]
config *river.Config
shouldStart bool
}
type Config struct {
Client *database.DB `mapstructure:"-"` // mapstructure is needed if we would like to use viper to configure the queue
}
func NewQueue(config *Config) (_ *Queue, err error) {
middleware := []rivertype.Middleware{otelriver.NewMiddleware(&otelriver.MiddlewareConfig{
MeterProvider: metrics.GetMetricsProvider(),
})}
return &Queue{
driver: riverdatabasesql.New(config.Client.DB),
config: &river.Config{
Workers: river.NewWorkers(),
Queues: make(map[string]river.QueueConfig),
JobTimeout: -1,
Middleware: middleware,
Schema: schema,
},
}, nil
}
func (q *Queue) ShouldStart() {
if q == nil {
return
}
q.shouldStart = true
}
func (q *Queue) Start(ctx context.Context) (err error) {
if q == nil || !q.shouldStart {
return nil
}
q.client, err = river.NewClient(q.driver, q.config)
if err != nil {
return err
}
return q.client.Start(ctx)
}
func (q *Queue) AddWorkers(w ...Worker) {
if q == nil {
logging.Info("skip adding workers because queue is not set")
return
}
for _, worker := range w {
worker.Register(q.config.Workers, q.config.Queues)
}
}
func (q *Queue) AddPeriodicJob(schedule cron.Schedule, jobArgs river.JobArgs, opts ...InsertOpt) (handle rivertype.PeriodicJobHandle) {
if q == nil {
logging.Info("skip adding periodic job because queue is not set")
return
}
options := new(river.InsertOpts)
for _, opt := range opts {
opt(options)
}
return q.client.PeriodicJobs().Add(
river.NewPeriodicJob(
schedule,
func() (river.JobArgs, *river.InsertOpts) {
return jobArgs, options
},
nil,
),
)
}
type InsertOpt func(*river.InsertOpts)
func WithMaxAttempts(maxAttempts uint8) InsertOpt {
return func(opts *river.InsertOpts) {
opts.MaxAttempts = int(maxAttempts)
}
}
func WithQueueName(name string) InsertOpt {
return func(opts *river.InsertOpts) {
opts.Queue = name
}
}
func (q *Queue) Insert(ctx context.Context, args river.JobArgs, opts ...InsertOpt) error {
_, err := q.client.Insert(ctx, args, applyInsertOpts(opts))
return err
}
// InsertManyFastTx wraps [river.Client.InsertManyFastTx] to insert all jobs in
// a single `COPY FROM` execution, within the existing transaction.
//
// Opts are applied to each job before sending them to river.
func (q *Queue) InsertManyFastTx(ctx context.Context, tx *sql.Tx, args []river.JobArgs, opts ...InsertOpt) error {
params := make([]river.InsertManyParams, len(args))
for i, arg := range args {
params[i] = river.InsertManyParams{
Args: arg,
InsertOpts: applyInsertOpts(opts),
}
}
_, err := q.client.InsertManyFastTx(ctx, tx, params)
return err
}
func applyInsertOpts(opts []InsertOpt) *river.InsertOpts {
options := new(river.InsertOpts)
for _, opt := range opts {
opt(options)
}
return options
}
type Worker interface {
Register(workers *river.Workers, queues map[string]river.QueueConfig)
}