Files
zitadel/internal/execution/worker_test.go
Tim Möhlmann 2727fa719d perf(actionsv2): execution target router (#10564)
# Which Problems Are Solved

The event execution system currently uses a projection handler that
subscribes to and processes all events for all instances. This creates a
high static cost because the system over-fetches event data, handling
many events that are not needed by most instances. This inefficiency is
also reflected in high "rows returned" metrics in the database.

# How the Problems Are Solved

Eliminate the use of a project handler. Instead, events for which
"execution targets" are defined, are directly pushed to the queue by the
eventstore. A Router is populated in the Instance object in the authz
middleware.

- By joining the execution targets to the instance, no additional
queries are needed anymore.
- As part of the instance object, execution targets are now cached as
well.
- Events are queued within the same transaction, giving transactional
guarantees on delivery.
- Uses the "insert many fast` variant of River. Multiple jobs are queued
in a single round-trip to the database.
- Fix compatibility with PostgreSQL 15

# Additional Changes

- The signing key was stored as plain-text in the river job payload in
the DB. This violated our [Secrets
Storage](https://zitadel.com/docs/concepts/architecture/secrets#secrets-storage)
principle. This change removed the field and only uses the encrypted
version of the signing key.
- Fixed the target ordering from descending to ascending.
- Some minor linter warnings on the use of `io.WriteString()`.

# Additional Context

- Introduced in https://github.com/zitadel/zitadel/pull/9249
- Closes https://github.com/zitadel/zitadel/issues/10553
- Closes https://github.com/zitadel/zitadel/issues/9832
- Closes https://github.com/zitadel/zitadel/issues/10372
- Closes https://github.com/zitadel/zitadel/issues/10492

---------

Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
(cherry picked from commit a9ebc06c77)
2025-09-01 08:16:52 +02:00

297 lines
7.2 KiB
Go

package execution
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
"time"
"github.com/riverqueue/river"
"github.com/riverqueue/river/rivertype"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/eventstore"
target_domain "github.com/zitadel/zitadel/internal/execution/target"
"github.com/zitadel/zitadel/internal/repository/action"
exec_repo "github.com/zitadel/zitadel/internal/repository/execution"
"github.com/zitadel/zitadel/internal/repository/user"
"github.com/zitadel/zitadel/internal/zerrors"
)
type fieldsWorker struct {
now nowFunc
}
type argsWorker struct {
job *river.Job[*exec_repo.Request]
}
type wantWorker struct {
targets []target_domain.Target
sendStatusCode int
err assert.ErrorAssertionFunc
}
func newExecutionWorker(f fieldsWorker) *Worker {
return &Worker{
config: WorkerConfig{
Workers: 1,
TransactionDuration: 5 * time.Second,
MaxTtl: 5 * time.Minute,
},
now: f.now,
}
}
const (
userID = "user1"
orgID = "orgID"
instanceID = "instanceID"
eventID = "eventID"
eventData = `{"name":"name","script":"name(){}","timeout":3000000000,"allowedToFail":true}`
)
func Test_handleEventExecution(t *testing.T) {
testNow := time.Now
tests := []struct {
name string
test func() (fieldsWorker, argsWorker, wantWorker)
}{
{
"max TTL",
func() (fieldsWorker, argsWorker, wantWorker) {
return fieldsWorker{
now: testNow,
},
argsWorker{
job: &river.Job[*exec_repo.Request]{
JobRow: &rivertype.JobRow{
CreatedAt: time.Now().Add(-1 * time.Hour),
},
Args: &exec_repo.Request{
Aggregate: &eventstore.Aggregate{
InstanceID: instanceID,
ID: eventID,
ResourceOwner: instanceID,
},
Sequence: 1,
CreatedAt: time.Now().Add(-1 * time.Hour),
EventType: user.HumanInviteCodeAddedType,
UserID: userID,
EventData: []byte(eventData),
},
},
},
wantWorker{
targets: mockTargets(1),
sendStatusCode: http.StatusOK,
err: func(tt assert.TestingT, err error, i ...interface{}) bool {
return errors.Is(err, new(river.JobCancelError))
},
}
},
},
{
"none",
func() (fieldsWorker, argsWorker, wantWorker) {
return fieldsWorker{
now: testNow,
},
argsWorker{
job: &river.Job[*exec_repo.Request]{
JobRow: &rivertype.JobRow{
CreatedAt: time.Now(),
},
Args: &exec_repo.Request{
Aggregate: &eventstore.Aggregate{
InstanceID: instanceID,
ID: eventID,
ResourceOwner: instanceID,
},
Sequence: 1,
CreatedAt: time.Now(),
EventType: user.HumanInviteCodeAddedType,
UserID: userID,
EventData: []byte(eventData),
},
},
},
wantWorker{
targets: mockTargets(0),
sendStatusCode: http.StatusOK,
err: nil,
}
},
},
{
"single",
func() (fieldsWorker, argsWorker, wantWorker) {
return fieldsWorker{
now: testNow,
},
argsWorker{
job: &river.Job[*exec_repo.Request]{
JobRow: &rivertype.JobRow{
CreatedAt: time.Now(),
},
Args: &exec_repo.Request{
Aggregate: &eventstore.Aggregate{
InstanceID: instanceID,
Type: action.AggregateType,
Version: action.AggregateVersion,
ID: eventID,
ResourceOwner: orgID,
},
Sequence: 1,
CreatedAt: time.Now().UTC(),
EventType: action.AddedEventType,
UserID: userID,
EventData: []byte(eventData),
},
},
},
wantWorker{
targets: mockTargets(1),
sendStatusCode: http.StatusOK,
err: nil,
}
},
},
{
"single, failed 400",
func() (fieldsWorker, argsWorker, wantWorker) {
return fieldsWorker{
now: testNow,
},
argsWorker{
job: &river.Job[*exec_repo.Request]{
JobRow: &rivertype.JobRow{
CreatedAt: time.Now(),
},
Args: &exec_repo.Request{
Aggregate: &eventstore.Aggregate{
InstanceID: instanceID,
Type: action.AggregateType,
Version: action.AggregateVersion,
ID: eventID,
ResourceOwner: orgID,
},
Sequence: 1,
CreatedAt: time.Now().UTC(),
EventType: action.AddedEventType,
UserID: userID,
EventData: []byte(eventData),
},
},
},
wantWorker{
targets: mockTargets(1),
sendStatusCode: http.StatusBadRequest,
err: func(tt assert.TestingT, err error, i ...interface{}) bool {
return errors.Is(err, zerrors.ThrowPreconditionFailed(nil, "EXEC-dra6yamk98", "Errors.Execution.Failed"))
},
}
},
},
{
"multiple",
func() (fieldsWorker, argsWorker, wantWorker) {
return fieldsWorker{
now: testNow,
},
argsWorker{
job: &river.Job[*exec_repo.Request]{
JobRow: &rivertype.JobRow{
CreatedAt: time.Now(),
},
Args: &exec_repo.Request{
Aggregate: &eventstore.Aggregate{
InstanceID: instanceID,
Type: action.AggregateType,
Version: action.AggregateVersion,
ID: eventID,
ResourceOwner: orgID,
},
Sequence: 1,
CreatedAt: time.Now().UTC(),
EventType: action.AddedEventType,
UserID: userID,
EventData: []byte(eventData),
},
},
},
wantWorker{
targets: mockTargets(3),
sendStatusCode: http.StatusOK,
err: nil,
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f, a, w := tt.test()
closeFuncs := make([]func(), len(w.targets))
calledFuncs := make([]func() bool, len(w.targets))
for i := range w.targets {
url, closeF, calledF := testServerCall(
exec_repo.ContextInfoFromRequest(a.job.Args),
time.Second,
w.sendStatusCode,
nil,
)
w.targets[i].Endpoint = url
closeFuncs[i] = closeF
calledFuncs[i] = calledF
}
data, err := json.Marshal(w.targets)
require.NoError(t, err)
a.job.Args.TargetsData = data
err = newExecutionWorker(f).Work(
authz.WithInstanceID(context.Background(), instanceID),
a.job,
)
if w.err != nil {
assert.Error(t, err)
return
}
assert.NoError(t, err)
for _, closeF := range closeFuncs {
closeF()
}
for _, calledF := range calledFuncs {
assert.True(t, calledF())
}
})
}
}
func mockTarget() target_domain.Target {
return target_domain.Target{
ExecutionID: "executionID",
TargetID: "targetID",
TargetType: target_domain.TargetTypeWebhook,
Endpoint: "endpoint",
Timeout: time.Minute,
InterruptOnError: true,
}
}
func mockTargets(count int) []target_domain.Target {
var targets []target_domain.Target
if count > 0 {
targets = make([]target_domain.Target, count)
for i := range targets {
targets[i] = mockTarget()
}
}
return targets
}