Files
zitadel/internal/query/projection/projection_test.go

70 lines
1.6 KiB
Go
Raw Normal View History

package projection
import (
"fmt"
"testing"
"github.com/brianvoe/gofakeit/v6"
"github.com/stretchr/testify/require"
gomock "go.uber.org/mock/gomock"
)
func TestStart(t *testing.T) {
duplicateName := gofakeit.Name()
tests := []struct {
name string
projections func(t *testing.T) []projection
err error
}{
{
name: "happy path",
projections: func(t *testing.T) []projection {
ctrl := gomock.NewController(t)
projections := make([]projection, 5)
for i := range 5 {
mock := NewMockprojection(ctrl)
mock.EXPECT().Start(gomock.Any())
mock.EXPECT().String().Return(gofakeit.Name())
projections[i] = mock
}
return projections
},
},
{
name: "same projection used twice error",
projections: func(t *testing.T) []projection {
projections := make([]projection, 5)
ctrl := gomock.NewController(t)
mock := NewMockprojection(ctrl)
mock.EXPECT().String().Return(duplicateName)
mock.EXPECT().Start(gomock.Any())
projections[0] = mock
for i := 1; i < 4; i++ {
mock := NewMockprojection(ctrl)
mock.EXPECT().String().Return(gofakeit.Name())
mock.EXPECT().Start(gomock.Any())
projections[i] = mock
}
mock = NewMockprojection(ctrl)
mock.EXPECT().String().Return(duplicateName)
projections[4] = mock
return projections
},
fix(projections): overhaul the event projection system (#10560) This PR overhauls our event projection system to make it more robust and prevent skipped events under high load. The core change replaces our custom, transaction-based locking with standard PostgreSQL advisory locks. We also introduce a worker pool to manage concurrency and prevent database connection exhaustion. ### Key Changes * **Advisory Locks for Projections:** Replaces exclusive row locks and inspection of `pg_stat_activity` with PostgreSQL advisory locks for managing projection state. This is a more reliable and standard approach to distributed locking. * **Simplified Await Logic:** Removes the complex logic for awaiting open transactions, simplifying it to a more straightforward time-based filtering of events. * **Projection Worker Pool:** Implements a worker pool to limit concurrent projection triggers, preventing connection exhaustion and improving stability under load. A new `MaxParallelTriggers` configuration option is introduced. ### Problem Solved Under high throughput, a race condition could cause projections to miss events from the eventstore. This led to inconsistent data in projection tables (e.g., a user grant might be missing). This PR fixes the underlying locking and concurrency issues to ensure all events are processed reliably. ### How it Works 1. **Event Writing:** When writing events, a *shared* advisory lock is taken. This signals that a write is in progress. 2. **Event Handling (Projections):** * A projection worker attempts to acquire an *exclusive* advisory lock for that specific projection. If the lock is already held, it means another worker is on the job, so the current one backs off. * Once the lock is acquired, the worker briefly acquires and releases the same *shared* lock used by event writers. This acts as a barrier, ensuring it waits for any in-flight writes to complete. * Finally, it processes all events that occurred before its transaction began. ### Additional Information * ZITADEL no longer modifies the `application_name` PostgreSQL variable during event writes. * The lock on the `current_states` table is now `FOR NO KEY UPDATE`. * Fixes https://github.com/zitadel/zitadel/issues/8509 --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Tim Möhlmann <tim+github@zitadel.com> (cherry picked from commit 0575f67e942c3192b36e39fd6ae06b1502bc0f5f)
2025-09-03 17:29:00 +02:00
err: fmt.Errorf("projection for %s already added", duplicateName),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
projections = tt.projections(t)
err := Start(t.Context())
require.Equal(t, tt.err, err)
})
}
}