mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 19:17:32 +00:00
feat: projections auto create their tables (#3324)
* begin init checks for projections * first projection checks * debug notification providers with query fixes * more projections and first index * more projections * more projections * finish projections * fix tests (remove db name) * create tables in setup * fix logging / error handling * add tenant to views * rename tenant to instance_id * add instance_id to all projections * add instance_id to all queries * correct instance_id on projections * add instance_id to failed_events * use separate context for instance * implement features projection * implement features projection * remove unique constraint from setup when migration failed * add error to failed setup event * add instance_id to primary keys * fix IAM projection * remove old migrations folder * fix keysFromYAML test
This commit is contained in:
@@ -21,7 +21,7 @@ func NewAggregate(
|
||||
ID: id,
|
||||
Type: typ,
|
||||
ResourceOwner: authz.GetCtxData(ctx).OrgID,
|
||||
Tenant: authz.GetCtxData(ctx).TenantID,
|
||||
InstanceID: authz.GetInstance(ctx).ID,
|
||||
Version: version,
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func AggregateFromWriteModel(
|
||||
ID: wm.AggregateID,
|
||||
Type: typ,
|
||||
ResourceOwner: wm.ResourceOwner,
|
||||
Tenant: wm.Tenant,
|
||||
InstanceID: wm.InstanceID,
|
||||
Version: version,
|
||||
}
|
||||
}
|
||||
@@ -63,8 +63,8 @@ type Aggregate struct {
|
||||
Type AggregateType `json:"-"`
|
||||
//ResourceOwner is the org this aggregates belongs to
|
||||
ResourceOwner string `json:"-"`
|
||||
//Tenant is the system this aggregate belongs to
|
||||
Tenant string `json:"-"`
|
||||
//InstanceID is the instance this aggregate belongs to
|
||||
InstanceID string `json:"-"`
|
||||
//Version is the semver this aggregate represents
|
||||
Version Version `json:"-"`
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ func BaseEventFromRepo(event *repository.Event) *BaseEvent {
|
||||
ID: event.AggregateID,
|
||||
Type: AggregateType(event.AggregateType),
|
||||
ResourceOwner: event.ResourceOwner.String,
|
||||
Tenant: event.Tenant.String,
|
||||
InstanceID: event.InstanceID.String,
|
||||
Version: Version(event.Version),
|
||||
},
|
||||
EventType: EventType(event.Type),
|
||||
|
@@ -41,7 +41,7 @@ func (es *Eventstore) Health(ctx context.Context) error {
|
||||
//Push pushes the events in a single transaction
|
||||
// an event needs at least an aggregate
|
||||
func (es *Eventstore) Push(ctx context.Context, cmds ...Command) ([]Event, error) {
|
||||
events, constraints, err := commandsToRepository(authz.GetCtxData(ctx).TenantID, cmds)
|
||||
events, constraints, err := commandsToRepository(authz.GetInstance(ctx).ID, cmds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func (es *Eventstore) Push(ctx context.Context, cmds ...Command) ([]Event, error
|
||||
return eventReaders, nil
|
||||
}
|
||||
|
||||
func commandsToRepository(tenantID string, cmds []Command) (events []*repository.Event, constraints []*repository.UniqueConstraint, err error) {
|
||||
func commandsToRepository(instanceID string, cmds []Command) (events []*repository.Event, constraints []*repository.UniqueConstraint, err error) {
|
||||
events = make([]*repository.Event, len(cmds))
|
||||
for i, cmd := range cmds {
|
||||
data, err := EventData(cmd)
|
||||
@@ -82,7 +82,7 @@ func commandsToRepository(tenantID string, cmds []Command) (events []*repository
|
||||
AggregateID: cmd.Aggregate().ID,
|
||||
AggregateType: repository.AggregateType(cmd.Aggregate().Type),
|
||||
ResourceOwner: sql.NullString{String: cmd.Aggregate().ResourceOwner, Valid: cmd.Aggregate().ResourceOwner != ""},
|
||||
Tenant: sql.NullString{String: tenantID, Valid: tenantID != ""},
|
||||
InstanceID: sql.NullString{String: instanceID, Valid: instanceID != ""},
|
||||
EditorService: cmd.EditorService(),
|
||||
EditorUser: cmd.EditorUser(),
|
||||
Type: repository.EventType(cmd.Type()),
|
||||
@@ -113,7 +113,7 @@ func uniqueConstraintsToRepository(constraints []*EventUniqueConstraint) (unique
|
||||
//Filter filters the stored events based on the searchQuery
|
||||
// and maps the events to the defined event structs
|
||||
func (es *Eventstore) Filter(ctx context.Context, queryFactory *SearchQueryBuilder) ([]Event, error) {
|
||||
query, err := queryFactory.build(authz.GetCtxData(ctx).TenantID)
|
||||
query, err := queryFactory.build(authz.GetInstance(ctx).ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -170,7 +170,7 @@ func (es *Eventstore) FilterToReducer(ctx context.Context, searchQuery *SearchQu
|
||||
|
||||
//LatestSequence filters the latest sequence for the given search query
|
||||
func (es *Eventstore) LatestSequence(ctx context.Context, queryFactory *SearchQueryBuilder) (uint64, error) {
|
||||
query, err := queryFactory.build(authz.GetCtxData(ctx).TenantID)
|
||||
query, err := queryFactory.build(authz.GetInstance(ctx).ID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@@ -29,7 +29,7 @@ func newTestEvent(id, description string, data func() interface{}, checkPrevious
|
||||
data: data,
|
||||
shouldCheckPrevious: checkPrevious,
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "resourceOwner", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "resourceOwner", "editorUser"), "editorService"),
|
||||
NewAggregate(authz.NewMockContext("zitadel", "caos", "adlerhurst"), id, "test.aggregate", "v1"),
|
||||
"test.event",
|
||||
),
|
||||
@@ -344,8 +344,8 @@ func Test_eventData(t *testing.T) {
|
||||
|
||||
func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
type args struct {
|
||||
tenantID string
|
||||
events []Command
|
||||
instanceID string
|
||||
events []Command
|
||||
}
|
||||
type res struct {
|
||||
wantErr bool
|
||||
@@ -359,7 +359,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
{
|
||||
name: "one aggregate one event",
|
||||
args: args{
|
||||
tenantID: "tenant",
|
||||
instanceID: "instanceID",
|
||||
events: []Command{
|
||||
newTestEvent(
|
||||
"1",
|
||||
@@ -380,7 +380,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "tenant", Valid: true},
|
||||
InstanceID: sql.NullString{String: "instanceID", Valid: true},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -390,7 +390,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
{
|
||||
name: "one aggregate multiple events",
|
||||
args: args{
|
||||
tenantID: "tenant",
|
||||
instanceID: "instanceID",
|
||||
events: []Command{
|
||||
newTestEvent(
|
||||
"1",
|
||||
@@ -418,7 +418,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "tenant", Valid: true},
|
||||
InstanceID: sql.NullString{String: "instanceID", Valid: true},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -429,7 +429,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "tenant", Valid: true},
|
||||
InstanceID: sql.NullString{String: "instanceID", Valid: true},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -439,7 +439,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
{
|
||||
name: "invalid data",
|
||||
args: args{
|
||||
tenantID: "tenant",
|
||||
instanceID: "instanceID",
|
||||
events: []Command{
|
||||
newTestEvent(
|
||||
"1",
|
||||
@@ -460,7 +460,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
events: []Command{
|
||||
&testEvent{
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "resourceOwner", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "resourceOwner", "editorUser"), "editorService"),
|
||||
NewAggregate(
|
||||
authz.NewMockContext("zitadel", "caos", "adlerhurst"),
|
||||
"",
|
||||
@@ -485,7 +485,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
events: []Command{
|
||||
&testEvent{
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "resourceOwner", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "resourceOwner", "editorUser"), "editorService"),
|
||||
NewAggregate(
|
||||
authz.NewMockContext("zitadel", "caos", "adlerhurst"),
|
||||
"id",
|
||||
@@ -510,7 +510,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
events: []Command{
|
||||
&testEvent{
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "resourceOwner", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "resourceOwner", "editorUser"), "editorService"),
|
||||
NewAggregate(
|
||||
authz.NewMockContext("zitadel", "caos", "adlerhurst"),
|
||||
"id",
|
||||
@@ -535,7 +535,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
events: []Command{
|
||||
&testEvent{
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "resourceOwner", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "resourceOwner", "editorUser"), "editorService"),
|
||||
NewAggregate(
|
||||
authz.NewMockContext("zitadel", "caos", "adlerhurst"),
|
||||
"id",
|
||||
@@ -560,7 +560,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
events: []Command{
|
||||
&testEvent{
|
||||
BaseEvent: *NewBaseEventForPush(
|
||||
service.WithService(authz.NewMockContext("tenant", "", "editorUser"), "editorService"),
|
||||
service.WithService(authz.NewMockContext("instanceID", "", "editorUser"), "editorService"),
|
||||
NewAggregate(
|
||||
authz.NewMockContext("zitadel", "", "adlerhurst"),
|
||||
"id",
|
||||
@@ -585,7 +585,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "", Valid: false},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -630,7 +630,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -641,7 +641,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -654,7 +654,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -665,7 +665,7 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
events, _, err := commandsToRepository(tt.args.tenantID, tt.args.events)
|
||||
events, _, err := commandsToRepository(tt.args.instanceID, tt.args.events)
|
||||
if (err != nil) != tt.res.wantErr {
|
||||
t.Errorf("Eventstore.aggregatesToEvents() error = %v, wantErr %v", err, tt.res.wantErr)
|
||||
return
|
||||
@@ -772,7 +772,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -816,7 +816,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -827,7 +827,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -882,7 +882,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -893,7 +893,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
@@ -906,7 +906,7 @@ func TestEventstore_Push(t *testing.T) {
|
||||
EditorService: "editorService",
|
||||
EditorUser: "editorUser",
|
||||
ResourceOwner: sql.NullString{String: "caos", Valid: true},
|
||||
Tenant: sql.NullString{String: "zitadel"},
|
||||
InstanceID: sql.NullString{String: "zitadel"},
|
||||
Type: "test.event",
|
||||
Version: "v1",
|
||||
},
|
||||
|
@@ -8,15 +8,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
type mockExpectation func(sqlmock.Sqlmock)
|
||||
|
||||
func expectFailureCount(tableName string, projectionName string, failedSeq, failureCount uint64) func(sqlmock.Sqlmock) {
|
||||
func expectFailureCount(tableName string, projectionName, instanceID string, failedSeq, failureCount uint64) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectQuery(`WITH failures AS \(SELECT failure_count FROM `+tableName+` WHERE projection_name = \$1 AND failed_sequence = \$2\) SELECT IF\(EXISTS\(SELECT failure_count FROM failures\), \(SELECT failure_count FROM failures\), 0\) AS failure_count`).
|
||||
WithArgs(projectionName, failedSeq).
|
||||
m.ExpectQuery(`WITH failures AS \(SELECT failure_count FROM `+tableName+` WHERE projection_name = \$1 AND failed_sequence = \$2\ AND instance_id = \$3\) SELECT IF\(EXISTS\(SELECT failure_count FROM failures\), \(SELECT failure_count FROM failures\), 0\) AS failure_count`).
|
||||
WithArgs(projectionName, failedSeq, instanceID).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"failure_count"}).
|
||||
AddRow(failureCount),
|
||||
@@ -24,10 +25,10 @@ func expectFailureCount(tableName string, projectionName string, failedSeq, fail
|
||||
}
|
||||
}
|
||||
|
||||
func expectUpdateFailureCount(tableName string, projectionName string, seq, failureCount uint64) func(sqlmock.Sqlmock) {
|
||||
func expectUpdateFailureCount(tableName string, projectionName, instanceID string, seq, failureCount uint64) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectExec(`UPSERT INTO `+tableName+` \(projection_name, failed_sequence, failure_count, error\) VALUES \(\$1, \$2, \$3, \$4\)`).
|
||||
WithArgs(projectionName, seq, failureCount, sqlmock.AnyArg()).WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
m.ExpectExec(`UPSERT INTO `+tableName+` \(projection_name, failed_sequence, failure_count, error, instance_id\) VALUES \(\$1, \$2, \$3, \$4\, \$5\)`).
|
||||
WithArgs(projectionName, seq, failureCount, sqlmock.AnyArg(), instanceID).WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -4,15 +4,16 @@ import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/handler"
|
||||
)
|
||||
|
||||
const (
|
||||
setFailureCountStmtFormat = "UPSERT INTO %s" +
|
||||
" (projection_name, failed_sequence, failure_count, error)" +
|
||||
" VALUES ($1, $2, $3, $4)"
|
||||
failureCountStmtFormat = "WITH failures AS (SELECT failure_count FROM %s WHERE projection_name = $1 AND failed_sequence = $2)" +
|
||||
" (projection_name, failed_sequence, failure_count, error, instance_id)" +
|
||||
" VALUES ($1, $2, $3, $4, $5)"
|
||||
failureCountStmtFormat = "WITH failures AS (SELECT failure_count FROM %s WHERE projection_name = $1 AND failed_sequence = $2 AND instance_id = $3)" +
|
||||
" SELECT IF(" +
|
||||
"EXISTS(SELECT failure_count FROM failures)," +
|
||||
" (SELECT failure_count FROM failures)," +
|
||||
@@ -21,31 +22,31 @@ const (
|
||||
)
|
||||
|
||||
func (h *StatementHandler) handleFailedStmt(tx *sql.Tx, stmt *handler.Statement, execErr error) (shouldContinue bool) {
|
||||
failureCount, err := h.failureCount(tx, stmt.Sequence)
|
||||
failureCount, err := h.failureCount(tx, stmt.Sequence, stmt.InstanceID)
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName, "seq", stmt.Sequence).WithError(err).Warn("unable to get failure count")
|
||||
logging.WithFields("projection", h.ProjectionName, "sequence", stmt.Sequence).WithError(err).Warn("unable to get failure count")
|
||||
return false
|
||||
}
|
||||
failureCount += 1
|
||||
err = h.setFailureCount(tx, stmt.Sequence, failureCount, execErr)
|
||||
logging.WithFields("projection", h.ProjectionName, "seq", stmt.Sequence).OnError(err).Warn("unable to update failure count")
|
||||
err = h.setFailureCount(tx, stmt.Sequence, failureCount, execErr, stmt.InstanceID)
|
||||
logging.WithFields("projection", h.ProjectionName, "sequence", stmt.Sequence).OnError(err).Warn("unable to update failure count")
|
||||
|
||||
return failureCount >= h.maxFailureCount
|
||||
}
|
||||
|
||||
func (h *StatementHandler) failureCount(tx *sql.Tx, seq uint64) (count uint, err error) {
|
||||
row := tx.QueryRow(h.failureCountStmt, h.ProjectionName, seq)
|
||||
func (h *StatementHandler) failureCount(tx *sql.Tx, seq uint64, instanceID string) (count uint, err error) {
|
||||
row := tx.QueryRow(h.failureCountStmt, h.ProjectionName, seq, instanceID)
|
||||
if err = row.Err(); err != nil {
|
||||
return 0, errors.ThrowInternal(err, "CRDB-Unnex", "unable to update failure count")
|
||||
}
|
||||
if err = row.Scan(&count); err != nil {
|
||||
return 0, errors.ThrowInternal(err, "CRDB-RwSMV", "unable to scann count")
|
||||
return 0, errors.ThrowInternal(err, "CRDB-RwSMV", "unable to scan count")
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (h *StatementHandler) setFailureCount(tx *sql.Tx, seq uint64, count uint, err error) error {
|
||||
_, dbErr := tx.Exec(h.setFailureCountStmt, h.ProjectionName, seq, count, err.Error())
|
||||
func (h *StatementHandler) setFailureCount(tx *sql.Tx, seq uint64, count uint, err error, instanceID string) error {
|
||||
_, dbErr := tx.Exec(h.setFailureCountStmt, h.ProjectionName, seq, count, err.Error(), instanceID)
|
||||
if dbErr != nil {
|
||||
return errors.ThrowInternal(dbErr, "CRDB-4Ht4x", "set failure count failed")
|
||||
}
|
||||
|
@@ -26,7 +26,8 @@ type StatementHandlerConfig struct {
|
||||
MaxFailureCount uint
|
||||
BulkLimit uint64
|
||||
|
||||
Reducers []handler.AggregateReducer
|
||||
Reducers []handler.AggregateReducer
|
||||
InitCheck *handler.Check
|
||||
}
|
||||
|
||||
type StatementHandler struct {
|
||||
@@ -75,6 +76,9 @@ func NewStatementHandler(
|
||||
Locker: NewLocker(config.Client, config.LockTable, config.ProjectionHandlerConfig.ProjectionName),
|
||||
}
|
||||
|
||||
err := h.Init(ctx, config.InitCheck)
|
||||
logging.OnError(err).Fatal("unable to initialize projections")
|
||||
|
||||
go h.ProjectionHandler.Process(
|
||||
ctx,
|
||||
h.reduce,
|
||||
@@ -214,7 +218,7 @@ func (h *StatementHandler) executeStmts(
|
||||
continue
|
||||
}
|
||||
if stmt.PreviousSequence > 0 && stmt.PreviousSequence != sequences[stmt.AggregateType] {
|
||||
logging.WithFields("projection", h.ProjectionName, "aggregateType", stmt.AggregateType, "seq", stmt.Sequence, "prevSeq", stmt.PreviousSequence, "currentSeq", sequences[stmt.AggregateType]).Warn("sequences do not match")
|
||||
logging.WithFields("projection", h.ProjectionName, "aggregateType", stmt.AggregateType, "sequence", stmt.Sequence, "prevSeq", stmt.PreviousSequence, "currentSeq", sequences[stmt.AggregateType]).Warn("sequences do not match")
|
||||
break
|
||||
}
|
||||
err := h.executeStmt(tx, stmt)
|
||||
|
@@ -28,6 +28,7 @@ type testEvent struct {
|
||||
sequence uint64
|
||||
previousSequence uint64
|
||||
aggregateType eventstore.AggregateType
|
||||
instanceID string
|
||||
}
|
||||
|
||||
func (e *testEvent) Sequence() uint64 {
|
||||
@@ -36,7 +37,8 @@ func (e *testEvent) Sequence() uint64 {
|
||||
|
||||
func (e *testEvent) Aggregate() eventstore.Aggregate {
|
||||
return eventstore.Aggregate{
|
||||
Type: e.aggregateType,
|
||||
Type: e.aggregateType,
|
||||
InstanceID: e.instanceID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -786,6 +788,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 5,
|
||||
previousSequence: 0,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -798,6 +801,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 6,
|
||||
previousSequence: 5,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -810,6 +814,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 7,
|
||||
previousSequence: 6,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -830,8 +835,8 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
expectSavePoint(),
|
||||
expectCreateErr("my_projection", []string{"col"}, []string{"$1"}, sql.ErrConnDone),
|
||||
expectSavePointRollback(),
|
||||
expectFailureCount("failed_events", "my_projection", 6, 3),
|
||||
expectUpdateFailureCount("failed_events", "my_projection", 6, 4),
|
||||
expectFailureCount("failed_events", "my_projection", "instanceID", 6, 3),
|
||||
expectUpdateFailureCount("failed_events", "my_projection", "instanceID", 6, 4),
|
||||
},
|
||||
idx: 0,
|
||||
},
|
||||
@@ -850,6 +855,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 5,
|
||||
previousSequence: 0,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -862,6 +868,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 6,
|
||||
previousSequence: 5,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -874,6 +881,7 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 7,
|
||||
previousSequence: 6,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@@ -894,8 +902,8 @@ func TestStatementHandler_executeStmts(t *testing.T) {
|
||||
expectSavePoint(),
|
||||
expectCreateErr("my_projection", []string{"col2"}, []string{"$1"}, sql.ErrConnDone),
|
||||
expectSavePointRollback(),
|
||||
expectFailureCount("failed_events", "my_projection", 6, 4),
|
||||
expectUpdateFailureCount("failed_events", "my_projection", 6, 5),
|
||||
expectFailureCount("failed_events", "my_projection", "instanceID", 6, 4),
|
||||
expectUpdateFailureCount("failed_events", "my_projection", "instanceID", 6, 5),
|
||||
expectSavePoint(),
|
||||
expectCreate("my_projection", []string{"col3"}, []string{"$1"}),
|
||||
expectSavePointRelease(),
|
||||
|
320
internal/eventstore/handler/crdb/init.go
Normal file
320
internal/eventstore/handler/crdb/init.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package crdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/logging"
|
||||
"github.com/lib/pq"
|
||||
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/handler"
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
columns []*Column
|
||||
primaryKey PrimaryKey
|
||||
indices []*Index
|
||||
}
|
||||
|
||||
func NewTable(columns []*Column, key PrimaryKey, indices ...*Index) *Table {
|
||||
return &Table{
|
||||
columns: columns,
|
||||
primaryKey: key,
|
||||
indices: indices,
|
||||
}
|
||||
}
|
||||
|
||||
type SuffixedTable struct {
|
||||
Table
|
||||
suffix string
|
||||
}
|
||||
|
||||
func NewSuffixedTable(columns []*Column, key PrimaryKey, suffix string, indices ...*Index) *SuffixedTable {
|
||||
return &SuffixedTable{
|
||||
Table: Table{
|
||||
columns: columns,
|
||||
primaryKey: key,
|
||||
indices: indices,
|
||||
},
|
||||
suffix: suffix,
|
||||
}
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
Name string
|
||||
Type ColumnType
|
||||
nullable bool
|
||||
defaultValue interface{}
|
||||
deleteCascade string
|
||||
}
|
||||
|
||||
type ColumnOption func(*Column)
|
||||
|
||||
func NewColumn(name string, columnType ColumnType, opts ...ColumnOption) *Column {
|
||||
column := &Column{
|
||||
Name: name,
|
||||
Type: columnType,
|
||||
nullable: false,
|
||||
defaultValue: nil,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(column)
|
||||
}
|
||||
return column
|
||||
}
|
||||
|
||||
func Nullable() ColumnOption {
|
||||
return func(c *Column) {
|
||||
c.nullable = true
|
||||
}
|
||||
}
|
||||
|
||||
func Default(value interface{}) ColumnOption {
|
||||
return func(c *Column) {
|
||||
c.defaultValue = value
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteCascade(column string) ColumnOption {
|
||||
return func(c *Column) {
|
||||
c.deleteCascade = column
|
||||
}
|
||||
}
|
||||
|
||||
type PrimaryKey []string
|
||||
|
||||
func NewPrimaryKey(columnNames ...string) PrimaryKey {
|
||||
return columnNames
|
||||
}
|
||||
|
||||
type ColumnType int32
|
||||
|
||||
const (
|
||||
ColumnTypeText ColumnType = iota
|
||||
ColumnTypeTextArray
|
||||
ColumnTypeJSONB
|
||||
ColumnTypeBytes
|
||||
ColumnTypeTimestamp
|
||||
ColumnTypeEnum
|
||||
ColumnTypeEnumArray
|
||||
ColumnTypeInt64
|
||||
ColumnTypeBool
|
||||
)
|
||||
|
||||
func NewIndex(name string, columns []string, opts ...indexOpts) *Index {
|
||||
i := &Index{
|
||||
Name: name,
|
||||
Columns: columns,
|
||||
bucketCount: 0,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(i)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
Name string
|
||||
Columns []string
|
||||
bucketCount uint16
|
||||
}
|
||||
|
||||
type indexOpts func(*Index)
|
||||
|
||||
func Hash(bucketsCount uint16) indexOpts {
|
||||
return func(i *Index) {
|
||||
i.bucketCount = bucketsCount
|
||||
}
|
||||
}
|
||||
|
||||
//Init implements handler.Init
|
||||
func (h *StatementHandler) Init(ctx context.Context, checks ...*handler.Check) error {
|
||||
for _, check := range checks {
|
||||
if check == nil || check.IsNoop() {
|
||||
return nil
|
||||
}
|
||||
tx, err := h.client.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return caos_errs.ThrowInternal(err, "CRDB-SAdf2", "begin failed")
|
||||
}
|
||||
for i, execute := range check.Executes {
|
||||
logging.WithFields("projection", h.ProjectionName, "execute", i).Debug("executing check")
|
||||
next, err := execute(h.client, h.ProjectionName)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
if !next {
|
||||
logging.WithFields("projection", h.ProjectionName, "execute", i).Debug("skipping next check")
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTableCheck(table *Table, opts ...execOption) *handler.Check {
|
||||
config := execConfig{}
|
||||
create := func(config execConfig) string {
|
||||
return createTableStatement(table, config.tableName, "")
|
||||
}
|
||||
executes := make([]func(handler.Executer, string) (bool, error), len(table.indices)+1)
|
||||
executes[0] = execNextIfExists(config, create, opts, true)
|
||||
for i, index := range table.indices {
|
||||
executes[i+1] = execNextIfExists(config, createIndexStatement(index), opts, true)
|
||||
}
|
||||
return &handler.Check{
|
||||
Executes: executes,
|
||||
}
|
||||
}
|
||||
|
||||
func NewMultiTableCheck(primaryTable *Table, secondaryTables ...*SuffixedTable) *handler.Check {
|
||||
config := execConfig{}
|
||||
create := func(config execConfig) string {
|
||||
stmt := createTableStatement(primaryTable, config.tableName, "")
|
||||
for _, table := range secondaryTables {
|
||||
stmt += createTableStatement(&table.Table, config.tableName, "_"+table.suffix)
|
||||
}
|
||||
return stmt
|
||||
}
|
||||
|
||||
return &handler.Check{
|
||||
Executes: []func(handler.Executer, string) (bool, error){
|
||||
execNextIfExists(config, create, nil, true),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewViewCheck(selectStmt string, secondaryTables ...*SuffixedTable) *handler.Check {
|
||||
config := execConfig{}
|
||||
create := func(config execConfig) string {
|
||||
var stmt string
|
||||
for _, table := range secondaryTables {
|
||||
stmt += createTableStatement(&table.Table, config.tableName, "_"+table.suffix)
|
||||
}
|
||||
stmt += createViewStatement(config.tableName, selectStmt)
|
||||
return stmt
|
||||
}
|
||||
|
||||
return &handler.Check{
|
||||
Executes: []func(handler.Executer, string) (bool, error){
|
||||
execNextIfExists(config, create, nil, false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func execNextIfExists(config execConfig, q query, opts []execOption, executeNext bool) func(handler.Executer, string) (bool, error) {
|
||||
return func(handler handler.Executer, name string) (bool, error) {
|
||||
err := exec(config, q, opts)(handler, name)
|
||||
if isErrAlreadyExists(err) {
|
||||
return executeNext, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func isErrAlreadyExists(err error) bool {
|
||||
caosErr := &caos_errs.CaosError{}
|
||||
if !errors.As(err, &caosErr) {
|
||||
return false
|
||||
}
|
||||
sqlErr, ok := caosErr.GetParent().(*pq.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return sqlErr.Routine == "NewRelationAlreadyExistsError"
|
||||
}
|
||||
|
||||
func createTableStatement(table *Table, tableName string, suffix string) string {
|
||||
stmt := fmt.Sprintf("CREATE TABLE %s (%s, PRIMARY KEY (%s)",
|
||||
tableName+suffix,
|
||||
createColumnsStatement(table.columns, tableName),
|
||||
strings.Join(table.primaryKey, ", "),
|
||||
)
|
||||
for _, index := range table.indices {
|
||||
stmt += fmt.Sprintf(", INDEX %s (%s)", index.Name, strings.Join(index.Columns, ","))
|
||||
}
|
||||
return stmt + ");"
|
||||
}
|
||||
|
||||
func createViewStatement(viewName string, selectStmt string) string {
|
||||
return fmt.Sprintf("CREATE VIEW %s AS %s",
|
||||
viewName,
|
||||
selectStmt,
|
||||
)
|
||||
}
|
||||
|
||||
func createIndexStatement(index *Index) func(config execConfig) string {
|
||||
return func(config execConfig) string {
|
||||
stmt := fmt.Sprintf("CREATE INDEX %s ON %s (%s)",
|
||||
index.Name,
|
||||
config.tableName,
|
||||
strings.Join(index.Columns, ","),
|
||||
)
|
||||
if index.bucketCount == 0 {
|
||||
return stmt + ";"
|
||||
}
|
||||
return fmt.Sprintf("SET experimental_enable_hash_sharded_indexes=on; %s USING HASH WITH BUCKET_COUNT = %d;",
|
||||
stmt, index.bucketCount)
|
||||
}
|
||||
}
|
||||
|
||||
func createColumnsStatement(cols []*Column, tableName string) string {
|
||||
columns := make([]string, len(cols))
|
||||
for i, col := range cols {
|
||||
column := col.Name + " " + columnType(col.Type)
|
||||
if !col.nullable {
|
||||
column += " NOT NULL"
|
||||
}
|
||||
if col.defaultValue != nil {
|
||||
column += " DEFAULT " + defaultValue(col.defaultValue)
|
||||
}
|
||||
if col.deleteCascade != "" {
|
||||
column += fmt.Sprintf(" REFERENCES %s (%s) ON DELETE CASCADE", tableName, col.deleteCascade)
|
||||
}
|
||||
columns[i] = column
|
||||
}
|
||||
return strings.Join(columns, ",")
|
||||
}
|
||||
|
||||
func defaultValue(value interface{}) string {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
return "'" + v + "'"
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func columnType(columnType ColumnType) string {
|
||||
switch columnType {
|
||||
case ColumnTypeText:
|
||||
return "TEXT"
|
||||
case ColumnTypeTextArray:
|
||||
return "TEXT[]"
|
||||
case ColumnTypeTimestamp:
|
||||
return "TIMESTAMPTZ"
|
||||
case ColumnTypeEnum:
|
||||
return "SMALLINT"
|
||||
case ColumnTypeEnumArray:
|
||||
return "SMALLINT[]"
|
||||
case ColumnTypeInt64:
|
||||
return "BIGINT"
|
||||
case ColumnTypeBool:
|
||||
return "BOOLEAN"
|
||||
case ColumnTypeJSONB:
|
||||
return "JSONB"
|
||||
case ColumnTypeBytes:
|
||||
return "BYTES"
|
||||
default:
|
||||
panic("") //TODO: remove?
|
||||
return ""
|
||||
}
|
||||
}
|
@@ -37,7 +37,7 @@ func NewLocker(client *sql.DB, lockTable, projectionName string) Locker {
|
||||
workerName, err := os.Hostname()
|
||||
if err != nil || workerName == "" {
|
||||
workerName, err = id.SonyFlakeGenerator.Next()
|
||||
logging.Log("CRDB-bdO56").OnError(err).Panic("unable to generate lockID")
|
||||
logging.OnError(err).Panic("unable to generate lockID")
|
||||
}
|
||||
return &locker{
|
||||
client: client,
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/handler"
|
||||
)
|
||||
@@ -46,6 +46,7 @@ func NewCreateStatement(event eventstore.Event, values []handler.Column, opts ..
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: exec(config, q, opts),
|
||||
}
|
||||
}
|
||||
@@ -71,6 +72,7 @@ func NewUpsertStatement(event eventstore.Event, values []handler.Column, opts ..
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: exec(config, q, opts),
|
||||
}
|
||||
}
|
||||
@@ -104,6 +106,7 @@ func NewUpdateStatement(event eventstore.Event, values []handler.Column, conditi
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: exec(config, q, opts),
|
||||
}
|
||||
}
|
||||
@@ -129,6 +132,7 @@ func NewDeleteStatement(event eventstore.Event, conditions []handler.Condition,
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: exec(config, q, opts),
|
||||
}
|
||||
}
|
||||
@@ -138,6 +142,7 @@ func NewNoOpStatement(event eventstore.Event) *handler.Statement {
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,6 +158,7 @@ func NewMultiStatement(event eventstore.Event, opts ...func(eventstore.Event) Ex
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: multiExec(execs),
|
||||
}
|
||||
}
|
||||
@@ -278,6 +284,7 @@ func NewCopyStatement(event eventstore.Event, cols []handler.Column, conds []han
|
||||
AggregateType: event.Aggregate().Type,
|
||||
Sequence: event.Sequence(),
|
||||
PreviousSequence: event.PreviousAggregateTypeSequence(),
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
Execute: exec(config, q, opts),
|
||||
}
|
||||
}
|
||||
@@ -327,7 +334,7 @@ func exec(config execConfig, q query, opts []execOption) Exec {
|
||||
}
|
||||
|
||||
if _, err := ex.Exec(q(config), config.args...); err != nil {
|
||||
return errors.ThrowInternal(err, "CRDB-pKtsr", "exec failed")
|
||||
return caos_errs.ThrowInternal(err, "CRDB-pKtsr", "exec failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -608,6 +608,7 @@ func TestNewNoOpStatement(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 5,
|
||||
previousSequence: 3,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
},
|
||||
want: &handler.Statement{
|
||||
@@ -615,6 +616,7 @@ func TestNewNoOpStatement(t *testing.T) {
|
||||
Execute: nil,
|
||||
Sequence: 5,
|
||||
PreviousSequence: 3,
|
||||
InstanceID: "instanceID",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
@@ -270,7 +271,7 @@ func (h *ProjectionHandler) fetchBulkStmts(
|
||||
|
||||
for _, event := range events {
|
||||
if err = h.processEvent(ctx, event, reduce); err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName, "seq", event.Sequence()).WithError(err).Warn("unable to process event in bulk")
|
||||
logging.WithFields("projection", h.ProjectionName, "sequence", event.Sequence(), "instanceID", event.Aggregate().InstanceID).WithError(err).Warn("unable to process event in bulk")
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
14
internal/eventstore/handler/init.go
Normal file
14
internal/eventstore/handler/init.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package handler
|
||||
|
||||
import "context"
|
||||
|
||||
//Init initializes the projection with the given check
|
||||
type Init func(context.Context, *Check) error
|
||||
|
||||
type Check struct {
|
||||
Executes []func(ex Executer, projectionName string) (bool, error)
|
||||
}
|
||||
|
||||
func (c *Check) IsNoop() bool {
|
||||
return len(c.Executes) == 0
|
||||
}
|
@@ -27,6 +27,7 @@ type Statement struct {
|
||||
AggregateType eventstore.AggregateType
|
||||
Sequence uint64
|
||||
PreviousSequence uint64
|
||||
InstanceID string
|
||||
|
||||
Execute func(ex Executer, projectionName string) error
|
||||
}
|
||||
|
@@ -12,7 +12,7 @@ type ReadModel struct {
|
||||
ChangeDate time.Time `json:"-"`
|
||||
Events []Event `json:"-"`
|
||||
ResourceOwner string `json:"-"`
|
||||
Tenant string `json:"-"`
|
||||
InstanceID string `json:"-"`
|
||||
}
|
||||
|
||||
//AppendEvents adds all the events to the read model.
|
||||
@@ -35,8 +35,8 @@ func (rm *ReadModel) Reduce() error {
|
||||
if rm.ResourceOwner == "" {
|
||||
rm.ResourceOwner = rm.Events[0].Aggregate().ResourceOwner
|
||||
}
|
||||
if rm.Tenant == "" {
|
||||
rm.Tenant = rm.Events[0].Aggregate().Tenant
|
||||
if rm.InstanceID == "" {
|
||||
rm.InstanceID = rm.Events[0].Aggregate().InstanceID
|
||||
}
|
||||
|
||||
if rm.CreationDate.IsZero() {
|
||||
|
@@ -56,9 +56,9 @@ type Event struct {
|
||||
// an aggregate can only be managed by one organisation
|
||||
// use the ID of the org
|
||||
ResourceOwner sql.NullString
|
||||
//Tenant is the system where this event belongs to
|
||||
// use the ID of the tenant
|
||||
Tenant sql.NullString
|
||||
//InstanceID is the instance where this event belongs to
|
||||
// use the ID of the instance
|
||||
InstanceID sql.NullString
|
||||
}
|
||||
|
||||
//EventType is the description of the change
|
||||
|
@@ -66,8 +66,8 @@ const (
|
||||
FieldSequence
|
||||
//FieldResourceOwner represents the resource owner field
|
||||
FieldResourceOwner
|
||||
//FieldTenant represents the tenant field
|
||||
FieldTenant
|
||||
//FieldInstanceID represents the instance id field
|
||||
FieldInstanceID
|
||||
//FieldEditorService represents the editor service field
|
||||
FieldEditorService
|
||||
//FieldEditorUser represents the editor user field
|
||||
|
@@ -30,7 +30,7 @@ const (
|
||||
" SELECT MAX(event_sequence) seq, 1 join_me" +
|
||||
" FROM eventstore.events" +
|
||||
" WHERE aggregate_type = $2" +
|
||||
" AND (CASE WHEN $9::STRING IS NULL THEN tenant is null else tenant = $9::STRING END)" +
|
||||
" AND (CASE WHEN $9::STRING IS NULL THEN instance_id is null else instance_id = $9::STRING END)" +
|
||||
") AS agg_type " +
|
||||
// combined with
|
||||
"LEFT JOIN " +
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
" SELECT event_sequence seq, resource_owner ro, 1 join_me" +
|
||||
" FROM eventstore.events" +
|
||||
" WHERE aggregate_type = $2 AND aggregate_id = $3" +
|
||||
" AND (CASE WHEN $9::STRING IS NULL THEN tenant is null else tenant = $9::STRING END)" +
|
||||
" AND (CASE WHEN $9::STRING IS NULL THEN instance_id is null else instance_id = $9::STRING END)" +
|
||||
" ORDER BY event_sequence DESC" +
|
||||
" LIMIT 1" +
|
||||
") AS agg USING(join_me)" +
|
||||
@@ -54,7 +54,7 @@ const (
|
||||
" editor_user," +
|
||||
" editor_service," +
|
||||
" resource_owner," +
|
||||
" tenant," +
|
||||
" instance_id," +
|
||||
" event_sequence," +
|
||||
" previous_aggregate_sequence," +
|
||||
" previous_aggregate_type_sequence" +
|
||||
@@ -70,12 +70,12 @@ const (
|
||||
" $6::VARCHAR AS editor_user," +
|
||||
" $7::VARCHAR AS editor_service," +
|
||||
" IFNULL((resource_owner), $8::VARCHAR) AS resource_owner," +
|
||||
" $9::VARCHAR AS tenant," +
|
||||
" $9::VARCHAR AS instance_id," +
|
||||
" NEXTVAL(CONCAT('eventstore.', IFNULL($9, 'system'), '_seq'))," +
|
||||
" aggregate_sequence AS previous_aggregate_sequence," +
|
||||
" aggregate_type_sequence AS previous_aggregate_type_sequence " +
|
||||
"FROM previous_data " +
|
||||
"RETURNING id, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, creation_date, resource_owner, tenant"
|
||||
"RETURNING id, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, creation_date, resource_owner, instance_id"
|
||||
|
||||
uniqueInsert = `INSERT INTO eventstore.unique_constraints
|
||||
(
|
||||
@@ -120,8 +120,8 @@ func (db *CRDB) Push(ctx context.Context, events []*repository.Event, uniqueCons
|
||||
event.EditorUser,
|
||||
event.EditorService,
|
||||
event.ResourceOwner,
|
||||
event.Tenant,
|
||||
).Scan(&event.ID, &event.Sequence, &previousAggregateSequence, &previousAggregateTypeSequence, &event.CreationDate, &event.ResourceOwner, &event.Tenant)
|
||||
event.InstanceID,
|
||||
).Scan(&event.ID, &event.Sequence, &previousAggregateSequence, &previousAggregateTypeSequence, &event.CreationDate, &event.ResourceOwner, &event.InstanceID)
|
||||
|
||||
event.PreviousAggregateSequence = uint64(previousAggregateSequence)
|
||||
event.PreviousAggregateTypeSequence = uint64(previousAggregateTypeSequence)
|
||||
@@ -132,7 +132,7 @@ func (db *CRDB) Push(ctx context.Context, events []*repository.Event, uniqueCons
|
||||
"aggregateId", event.AggregateID,
|
||||
"aggregateType", event.AggregateType,
|
||||
"eventType", event.Type,
|
||||
"tenant", event.Tenant,
|
||||
"instanceID", event.InstanceID,
|
||||
).WithError(err).Info("query failed")
|
||||
return caos_errs.ThrowInternal(err, "SQL-SBP37", "unable to create event")
|
||||
}
|
||||
@@ -229,7 +229,7 @@ func (db *CRDB) eventQuery() string {
|
||||
", editor_service" +
|
||||
", editor_user" +
|
||||
", resource_owner" +
|
||||
", tenant" +
|
||||
", instance_id" +
|
||||
", aggregate_type" +
|
||||
", aggregate_id" +
|
||||
", aggregate_version" +
|
||||
@@ -250,8 +250,8 @@ func (db *CRDB) columnName(col repository.Field) string {
|
||||
return "event_sequence"
|
||||
case repository.FieldResourceOwner:
|
||||
return "resource_owner"
|
||||
case repository.FieldTenant:
|
||||
return "tenant"
|
||||
case repository.FieldInstanceID:
|
||||
return "instance_id"
|
||||
case repository.FieldEditorService:
|
||||
return "editor_service"
|
||||
case repository.FieldEditorUser:
|
||||
|
@@ -109,7 +109,7 @@ func eventsScanner(scanner scan, dest interface{}) (err error) {
|
||||
&event.EditorService,
|
||||
&event.EditorUser,
|
||||
&event.ResourceOwner,
|
||||
&event.Tenant,
|
||||
&event.InstanceID,
|
||||
&event.AggregateType,
|
||||
&event.AggregateID,
|
||||
&event.Version,
|
||||
|
@@ -130,7 +130,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dest: &[]*repository.Event{},
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
expected: []*repository.Event{
|
||||
{AggregateID: "hodor", AggregateType: "user", Sequence: 5, Data: make(Data, 0)},
|
||||
},
|
||||
@@ -146,7 +146,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dest: []*repository.Event{},
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsErrorInvalidArgument,
|
||||
},
|
||||
},
|
||||
@@ -158,7 +158,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dbErr: sql.ErrConnDone,
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsInternal,
|
||||
},
|
||||
},
|
||||
@@ -592,7 +592,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQuery(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
[]driver.Value{repository.AggregateType("user")},
|
||||
),
|
||||
},
|
||||
@@ -621,7 +621,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQuery(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence LIMIT \$2`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence LIMIT \$2`,
|
||||
[]driver.Value{repository.AggregateType("user"), uint64(5)},
|
||||
),
|
||||
},
|
||||
@@ -650,7 +650,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQuery(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC LIMIT \$2`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC LIMIT \$2`,
|
||||
[]driver.Value{repository.AggregateType("user"), uint64(5)},
|
||||
),
|
||||
},
|
||||
@@ -679,7 +679,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQueryErr(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
[]driver.Value{repository.AggregateType("user")},
|
||||
sql.ErrConnDone),
|
||||
},
|
||||
@@ -708,7 +708,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQuery(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC`,
|
||||
[]driver.Value{repository.AggregateType("user")},
|
||||
&repository.Event{Sequence: 100}),
|
||||
},
|
||||
@@ -776,7 +776,7 @@ func Test_query_events_mocked(t *testing.T) {
|
||||
},
|
||||
fields: fields{
|
||||
mock: newMockClient(t).expectQuery(t,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) OR \( aggregate_type = \$2 AND aggregate_id = \$3 \) ORDER BY event_sequence DESC LIMIT \$4`,
|
||||
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE \( aggregate_type = \$1 \) OR \( aggregate_type = \$2 AND aggregate_id = \$3 \) ORDER BY event_sequence DESC LIMIT \$4`,
|
||||
[]driver.Value{repository.AggregateType("user"), repository.AggregateType("org"), "asdf42", uint64(5)},
|
||||
),
|
||||
},
|
||||
|
@@ -12,7 +12,7 @@ type SearchQueryBuilder struct {
|
||||
limit uint64
|
||||
desc bool
|
||||
resourceOwner string
|
||||
tenant string
|
||||
instanceID string
|
||||
queries []*SearchQuery
|
||||
}
|
||||
|
||||
@@ -68,9 +68,9 @@ func (factory *SearchQueryBuilder) ResourceOwner(resourceOwner string) *SearchQu
|
||||
return factory
|
||||
}
|
||||
|
||||
//Tenant defines the tenant (system) of the events
|
||||
func (factory *SearchQueryBuilder) Tenant(tenant string) *SearchQueryBuilder {
|
||||
factory.tenant = tenant
|
||||
//InstanceID defines the instanceID (system) of the events
|
||||
func (factory *SearchQueryBuilder) InstanceID(instanceID string) *SearchQueryBuilder {
|
||||
factory.instanceID = instanceID
|
||||
return factory
|
||||
}
|
||||
|
||||
@@ -145,13 +145,13 @@ func (query *SearchQuery) Builder() *SearchQueryBuilder {
|
||||
return query.builder
|
||||
}
|
||||
|
||||
func (builder *SearchQueryBuilder) build(tenantID string) (*repository.SearchQuery, error) {
|
||||
func (builder *SearchQueryBuilder) build(instanceID string) (*repository.SearchQuery, error) {
|
||||
if builder == nil ||
|
||||
len(builder.queries) < 1 ||
|
||||
builder.columns.Validate() != nil {
|
||||
return nil, errors.ThrowPreconditionFailed(nil, "MODEL-4m9gs", "builder invalid")
|
||||
}
|
||||
builder.tenant = tenantID
|
||||
builder.instanceID = instanceID
|
||||
filters := make([][]*repository.Filter, len(builder.queries))
|
||||
|
||||
for i, query := range builder.queries {
|
||||
@@ -163,7 +163,7 @@ func (builder *SearchQueryBuilder) build(tenantID string) (*repository.SearchQue
|
||||
query.eventSequenceGreaterFilter,
|
||||
query.eventSequenceLessFilter,
|
||||
query.builder.resourceOwnerFilter,
|
||||
query.builder.tenantFilter,
|
||||
query.builder.instanceIDFilter,
|
||||
} {
|
||||
if filter := f(); filter != nil {
|
||||
if err := filter.Validate(); err != nil {
|
||||
@@ -247,11 +247,11 @@ func (builder *SearchQueryBuilder) resourceOwnerFilter() *repository.Filter {
|
||||
return repository.NewFilter(repository.FieldResourceOwner, builder.resourceOwner, repository.OperationEquals)
|
||||
}
|
||||
|
||||
func (builder *SearchQueryBuilder) tenantFilter() *repository.Filter {
|
||||
if builder.tenant == "" {
|
||||
func (builder *SearchQueryBuilder) instanceIDFilter() *repository.Filter {
|
||||
if builder.instanceID == "" {
|
||||
return nil
|
||||
}
|
||||
return repository.NewFilter(repository.FieldTenant, builder.tenant, repository.OperationEquals)
|
||||
return repository.NewFilter(repository.FieldInstanceID, builder.instanceID, repository.OperationEquals)
|
||||
}
|
||||
|
||||
func (query *SearchQuery) eventDataFilter() *repository.Filter {
|
||||
|
@@ -224,9 +224,9 @@ func TestSearchQuerybuilderSetters(t *testing.T) {
|
||||
|
||||
func TestSearchQuerybuilderBuild(t *testing.T) {
|
||||
type args struct {
|
||||
columns Columns
|
||||
setters []func(*SearchQueryBuilder) *SearchQueryBuilder
|
||||
tenant string
|
||||
columns Columns
|
||||
setters []func(*SearchQueryBuilder) *SearchQueryBuilder
|
||||
instanceID string
|
||||
}
|
||||
type res struct {
|
||||
isErr func(err error) bool
|
||||
@@ -622,7 +622,7 @@ func TestSearchQuerybuilderBuild(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and tenant",
|
||||
name: "filter aggregate type and instanceID",
|
||||
args: args{
|
||||
columns: ColumnsEvent,
|
||||
setters: []func(*SearchQueryBuilder) *SearchQueryBuilder{
|
||||
@@ -630,7 +630,7 @@ func TestSearchQuerybuilderBuild(t *testing.T) {
|
||||
testSetAggregateTypes("user"),
|
||||
),
|
||||
},
|
||||
tenant: "tenant",
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
@@ -641,7 +641,7 @@ func TestSearchQuerybuilderBuild(t *testing.T) {
|
||||
Filters: [][]*repository.Filter{
|
||||
{
|
||||
repository.NewFilter(repository.FieldAggregateType, repository.AggregateType("user"), repository.OperationEquals),
|
||||
repository.NewFilter(repository.FieldTenant, "tenant", repository.OperationEquals),
|
||||
repository.NewFilter(repository.FieldInstanceID, "instanceID", repository.OperationEquals),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -668,7 +668,7 @@ func TestSearchQuerybuilderBuild(t *testing.T) {
|
||||
for _, f := range tt.args.setters {
|
||||
builder = f(builder)
|
||||
}
|
||||
query, err := builder.build(tt.args.tenant)
|
||||
query, err := builder.build(tt.args.instanceID)
|
||||
if tt.res.isErr != nil && !tt.res.isErr(err) {
|
||||
t.Errorf("wrong error(%T): %v", err, err)
|
||||
return
|
||||
|
@@ -122,7 +122,7 @@ func mapEventToV1Event(event Event) *models.Event {
|
||||
AggregateType: models.AggregateType(event.Aggregate().Type),
|
||||
AggregateID: event.Aggregate().ID,
|
||||
ResourceOwner: event.Aggregate().ResourceOwner,
|
||||
Tenant: event.Aggregate().Tenant,
|
||||
InstanceID: event.Aggregate().InstanceID,
|
||||
EditorService: event.EditorService(),
|
||||
EditorUser: event.EditorUser(),
|
||||
Data: event.DataAsBytes(),
|
||||
|
@@ -7,15 +7,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/v1/models"
|
||||
)
|
||||
|
||||
const (
|
||||
selectEscaped = `SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events WHERE aggregate_type = \$1`
|
||||
selectEscaped = `SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events WHERE aggregate_type = \$1`
|
||||
)
|
||||
|
||||
var (
|
||||
eventColumns = []string{"creation_date", "event_type", "event_sequence", "previous_aggregate_sequence", "event_data", "editor_service", "editor_user", "resource_owner", "tenant", "aggregate_type", "aggregate_id", "aggregate_version"}
|
||||
eventColumns = []string{"creation_date", "event_type", "event_sequence", "previous_aggregate_sequence", "event_data", "editor_service", "editor_user", "resource_owner", "instance_id", "aggregate_type", "aggregate_id", "aggregate_version"}
|
||||
expectedFilterEventsLimitFormat = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence LIMIT \$2`).String()
|
||||
expectedFilterEventsDescFormat = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence DESC`).String()
|
||||
expectedFilterEventsAggregateIDLimit = regexp.MustCompile(selectEscaped + ` AND aggregate_id = \$2 ORDER BY event_sequence LIMIT \$3`).String()
|
||||
@@ -23,7 +24,7 @@ var (
|
||||
expectedGetAllEvents = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence`).String()
|
||||
|
||||
expectedInsertStatement = regexp.MustCompile(`INSERT INTO eventstore\.events ` +
|
||||
`\(event_type, aggregate_type, aggregate_id, aggregate_version, creation_date, event_data, editor_user, editor_service, resource_owner, tenant, previous_aggregate_sequence, previous_aggregate_type_sequence\) ` +
|
||||
`\(event_type, aggregate_type, aggregate_id, aggregate_version, creation_date, event_data, editor_user, editor_service, resource_owner, instance_id, previous_aggregate_sequence, previous_aggregate_type_sequence\) ` +
|
||||
`SELECT \$1, \$2, \$3, \$4, COALESCE\(\$5, now\(\)\), \$6, \$7, \$8, \$9, \$10, \$11 ` +
|
||||
`WHERE EXISTS \(` +
|
||||
`SELECT 1 FROM eventstore\.events WHERE aggregate_type = \$12 AND aggregate_id = \$13 HAVING MAX\(event_sequence\) = \$14 OR \(\$14::BIGINT IS NULL AND COUNT\(\*\) = 0\)\) ` +
|
||||
@@ -99,7 +100,7 @@ func (db *dbMock) expectRollback(err error) *dbMock {
|
||||
func (db *dbMock) expectInsertEvent(e *models.Event, returnedSequence uint64) *dbMock {
|
||||
db.mock.ExpectQuery(expectedInsertStatement).
|
||||
WithArgs(
|
||||
e.Type, e.AggregateType, e.AggregateID, e.AggregateVersion, sqlmock.AnyArg(), Data(e.Data), e.EditorUser, e.EditorService, e.ResourceOwner, e.Tenant, Sequence(e.PreviousSequence),
|
||||
e.Type, e.AggregateType, e.AggregateID, e.AggregateVersion, sqlmock.AnyArg(), Data(e.Data), e.EditorUser, e.EditorService, e.ResourceOwner, e.InstanceID, Sequence(e.PreviousSequence),
|
||||
e.AggregateType, e.AggregateID, Sequence(e.PreviousSequence), Sequence(e.PreviousSequence),
|
||||
).
|
||||
WillReturnRows(
|
||||
@@ -113,7 +114,7 @@ func (db *dbMock) expectInsertEvent(e *models.Event, returnedSequence uint64) *d
|
||||
func (db *dbMock) expectInsertEventError(e *models.Event) *dbMock {
|
||||
db.mock.ExpectQuery(expectedInsertStatement).
|
||||
WithArgs(
|
||||
e.Type, e.AggregateType, e.AggregateID, e.AggregateVersion, sqlmock.AnyArg(), Data(e.Data), e.EditorUser, e.EditorService, e.ResourceOwner, e.Tenant, Sequence(e.PreviousSequence),
|
||||
e.Type, e.AggregateType, e.AggregateID, e.AggregateVersion, sqlmock.AnyArg(), Data(e.Data), e.EditorUser, e.EditorService, e.ResourceOwner, e.InstanceID, Sequence(e.PreviousSequence),
|
||||
e.AggregateType, e.AggregateID, Sequence(e.PreviousSequence), Sequence(e.PreviousSequence),
|
||||
).
|
||||
WillReturnError(sql.ErrTxDone)
|
||||
@@ -124,7 +125,7 @@ func (db *dbMock) expectInsertEventError(e *models.Event) *dbMock {
|
||||
func (db *dbMock) expectFilterEventsLimit(aggregateType string, limit uint64, eventCount int) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := 0; i < eventCount; i++ {
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "tenant", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "instanceID", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsLimitFormat).
|
||||
WithArgs(aggregateType, limit).
|
||||
@@ -135,7 +136,7 @@ func (db *dbMock) expectFilterEventsLimit(aggregateType string, limit uint64, ev
|
||||
func (db *dbMock) expectFilterEventsDesc(aggregateType string, eventCount int) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := eventCount; i > 0; i-- {
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "tenant", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "instanceID", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsDescFormat).
|
||||
WillReturnRows(rows)
|
||||
@@ -145,7 +146,7 @@ func (db *dbMock) expectFilterEventsDesc(aggregateType string, eventCount int) *
|
||||
func (db *dbMock) expectFilterEventsAggregateIDLimit(aggregateType, aggregateID string, limit uint64) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := limit; i > 0; i-- {
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "tenant", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "instanceID", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsAggregateIDLimit).
|
||||
WithArgs(aggregateType, aggregateID, limit).
|
||||
@@ -156,7 +157,7 @@ func (db *dbMock) expectFilterEventsAggregateIDLimit(aggregateType, aggregateID
|
||||
func (db *dbMock) expectFilterEventsAggregateIDTypeLimit(aggregateType, aggregateID string, limit uint64) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := limit; i > 0; i-- {
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "tenant", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "instanceID", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsAggregateIDTypeLimit).
|
||||
WithArgs(aggregateType, aggregateID, limit).
|
||||
|
@@ -8,9 +8,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/caos/logging"
|
||||
"github.com/lib/pq"
|
||||
|
||||
z_errors "github.com/caos/zitadel/internal/errors"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/v1/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,7 +24,7 @@ const (
|
||||
", editor_service" +
|
||||
", editor_user" +
|
||||
", resource_owner" +
|
||||
", tenant" +
|
||||
", instance_id" +
|
||||
", aggregate_type" +
|
||||
", aggregate_id" +
|
||||
", aggregate_version" +
|
||||
@@ -117,7 +118,7 @@ func prepareColumns(columns es_models.Columns) (string, func(s scan, dest interf
|
||||
&event.EditorService,
|
||||
&event.EditorUser,
|
||||
&event.ResourceOwner,
|
||||
&event.Tenant,
|
||||
&event.InstanceID,
|
||||
&event.AggregateType,
|
||||
&event.AggregateID,
|
||||
&event.AggregateVersion,
|
||||
@@ -177,8 +178,8 @@ func getField(field es_models.Field) string {
|
||||
return "event_sequence"
|
||||
case es_models.Field_ResourceOwner:
|
||||
return "resource_owner"
|
||||
case es_models.Field_Tenant:
|
||||
return "tenant"
|
||||
case es_models.Field_InstanceID:
|
||||
return "instance_id"
|
||||
case es_models.Field_EditorService:
|
||||
return "editor_service"
|
||||
case es_models.Field_EditorUser:
|
||||
|
@@ -6,9 +6,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/v1/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
func Test_numberPlaceholder(t *testing.T) {
|
||||
@@ -80,7 +81,7 @@ func Test_getField(t *testing.T) {
|
||||
es_models.Field_AggregateID: "aggregate_id",
|
||||
es_models.Field_LatestSequence: "event_sequence",
|
||||
es_models.Field_ResourceOwner: "resource_owner",
|
||||
es_models.Field_Tenant: "tenant",
|
||||
es_models.Field_InstanceID: "instance_id",
|
||||
es_models.Field_EditorService: "editor_service",
|
||||
es_models.Field_EditorUser: "editor_user",
|
||||
es_models.Field_EventType: "event_type",
|
||||
@@ -235,7 +236,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dest: new(es_models.Event),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbRow: []interface{}{time.Time{}, es_models.EventType(""), uint64(5), Sequence(0), Data(nil), "", "", "", "", es_models.AggregateType("user"), "hodor", es_models.Version("")},
|
||||
expected: es_models.Event{AggregateID: "hodor", AggregateType: "user", Sequence: 5, Data: make(Data, 0)},
|
||||
},
|
||||
@@ -247,7 +248,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dest: new(uint64),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsErrorInvalidArgument,
|
||||
},
|
||||
},
|
||||
@@ -259,7 +260,7 @@ func Test_prepareColumns(t *testing.T) {
|
||||
dbErr: sql.ErrConnDone,
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsInternal,
|
||||
},
|
||||
},
|
||||
@@ -430,7 +431,7 @@ func Test_buildQuery(t *testing.T) {
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").OrderDesc(),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user")},
|
||||
},
|
||||
@@ -441,7 +442,7 @@ func Test_buildQuery(t *testing.T) {
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").Limit(5),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence LIMIT $2",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence LIMIT $2",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
|
||||
limit: 5,
|
||||
@@ -453,7 +454,7 @@ func Test_buildQuery(t *testing.T) {
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").Limit(5).OrderDesc(),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, tenant, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC LIMIT $2",
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC LIMIT $2",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
|
||||
limit: 5,
|
||||
|
@@ -23,7 +23,7 @@ type Aggregate struct {
|
||||
editorService string
|
||||
editorUser string
|
||||
resourceOwner string
|
||||
tenant string
|
||||
instanceID string
|
||||
Events []*Event
|
||||
Precondition *precondition
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (a *Aggregate) AppendEvent(typ EventType, payload interface{}) (*Aggregate,
|
||||
EditorService: a.editorService,
|
||||
EditorUser: a.editorUser,
|
||||
ResourceOwner: a.resourceOwner,
|
||||
Tenant: a.tenant,
|
||||
InstanceID: a.instanceID,
|
||||
}
|
||||
|
||||
a.Events = append(a.Events, e)
|
||||
|
@@ -18,9 +18,10 @@ type option func(*Aggregate)
|
||||
|
||||
func (c *AggregateCreator) NewAggregate(ctx context.Context, id string, typ AggregateType, version Version, previousSequence uint64, opts ...option) (*Aggregate, error) {
|
||||
ctxData := authz.GetCtxData(ctx)
|
||||
instance := authz.GetInstance(ctx)
|
||||
editorUser := ctxData.UserID
|
||||
resourceOwner := ctxData.OrgID
|
||||
tenant := ctxData.TenantID
|
||||
instanceID := instance.ID
|
||||
|
||||
aggregate := &Aggregate{
|
||||
ID: id,
|
||||
@@ -31,7 +32,7 @@ func (c *AggregateCreator) NewAggregate(ctx context.Context, id string, typ Aggr
|
||||
editorService: c.serviceName,
|
||||
editorUser: editorUser,
|
||||
resourceOwner: resourceOwner,
|
||||
tenant: tenant,
|
||||
instanceID: instanceID,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
|
@@ -28,7 +28,7 @@ type Event struct {
|
||||
EditorService string
|
||||
EditorUser string
|
||||
ResourceOwner string
|
||||
Tenant string
|
||||
InstanceID string
|
||||
}
|
||||
|
||||
func eventData(i interface{}) ([]byte, error) {
|
||||
|
@@ -11,5 +11,5 @@ const (
|
||||
Field_EditorUser
|
||||
Field_EventType
|
||||
Field_CreationDate
|
||||
Field_Tenant
|
||||
Field_InstanceID
|
||||
)
|
||||
|
@@ -8,7 +8,7 @@ type ObjectRoot struct {
|
||||
AggregateID string `json:"-"`
|
||||
Sequence uint64 `json:"-"`
|
||||
ResourceOwner string `json:"-"`
|
||||
Tenant string `json:"-"`
|
||||
InstanceID string `json:"-"`
|
||||
CreationDate time.Time `json:"-"`
|
||||
ChangeDate time.Time `json:"-"`
|
||||
}
|
||||
@@ -22,8 +22,8 @@ func (o *ObjectRoot) AppendEvent(event *Event) {
|
||||
if o.ResourceOwner == "" {
|
||||
o.ResourceOwner = event.ResourceOwner
|
||||
}
|
||||
if o.Tenant == "" {
|
||||
o.Tenant = event.Tenant
|
||||
if o.InstanceID == "" {
|
||||
o.InstanceID = event.InstanceID
|
||||
}
|
||||
|
||||
o.ChangeDate = event.CreationDate
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
)
|
||||
|
||||
@@ -17,7 +18,7 @@ type SearchQueryFactory struct {
|
||||
sequenceTo uint64
|
||||
eventTypes []EventType
|
||||
resourceOwner string
|
||||
tenant string
|
||||
instanceID string
|
||||
creationDate time.Time
|
||||
}
|
||||
|
||||
@@ -63,8 +64,8 @@ func FactoryFromSearchQuery(query *SearchQuery) *SearchQueryFactory {
|
||||
}
|
||||
case Field_ResourceOwner:
|
||||
factory = factory.ResourceOwner(filter.value.(string))
|
||||
case Field_Tenant:
|
||||
factory = factory.Tenant(filter.value.(string))
|
||||
case Field_InstanceID:
|
||||
factory = factory.InstanceID(filter.value.(string))
|
||||
case Field_EventType:
|
||||
factory = factory.EventTypes(filter.value.([]EventType)...)
|
||||
case Field_EditorService, Field_EditorUser:
|
||||
@@ -123,8 +124,8 @@ func (factory *SearchQueryFactory) ResourceOwner(resourceOwner string) *SearchQu
|
||||
return factory
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) Tenant(tenant string) *SearchQueryFactory {
|
||||
factory.tenant = tenant
|
||||
func (factory *SearchQueryFactory) InstanceID(instanceID string) *SearchQueryFactory {
|
||||
factory.instanceID = instanceID
|
||||
return factory
|
||||
}
|
||||
|
||||
@@ -159,7 +160,7 @@ func (factory *SearchQueryFactory) Build() (*searchQuery, error) {
|
||||
factory.sequenceToFilter,
|
||||
factory.eventTypeFilter,
|
||||
factory.resourceOwnerFilter,
|
||||
factory.tenantFilter,
|
||||
factory.instanceIDFilter,
|
||||
factory.creationDateNewerFilter,
|
||||
} {
|
||||
if filter := f(); filter != nil {
|
||||
@@ -231,11 +232,11 @@ func (factory *SearchQueryFactory) resourceOwnerFilter() *Filter {
|
||||
return NewFilter(Field_ResourceOwner, factory.resourceOwner, Operation_Equals)
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) tenantFilter() *Filter {
|
||||
if factory.tenant == "" {
|
||||
func (factory *SearchQueryFactory) instanceIDFilter() *Filter {
|
||||
if factory.instanceID == "" {
|
||||
return nil
|
||||
}
|
||||
return NewFilter(Field_Tenant, factory.tenant, Operation_Equals)
|
||||
return NewFilter(Field_InstanceID, factory.instanceID, Operation_Equals)
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) creationDateNewerFilter() *Filter {
|
||||
|
@@ -69,8 +69,8 @@ func (q *SearchQuery) ResourceOwnerFilter(resourceOwner string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_ResourceOwner, resourceOwner, Operation_Equals))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) TenantFilter(tenant string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_Tenant, tenant, Operation_Equals))
|
||||
func (q *SearchQuery) InstanceIDFilter(instanceID string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_InstanceID, instanceID, Operation_Equals))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) CreationDateNewerFilter(time time.Time) *SearchQuery {
|
||||
|
@@ -54,7 +54,7 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
|
||||
unprocessedEvents, err := handler.Eventstore().FilterEvents(context.Background(), searchQuery)
|
||||
if err != nil {
|
||||
logging.LogWithFields("HANDL-L6YH1", "seq", event.Sequence).Warn("filter failed")
|
||||
logging.WithFields("HANDL-L6YH1", "sequence", event.Sequence).Warn("filter failed")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,12 +74,12 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
}
|
||||
|
||||
err = handler.Reduce(unprocessedEvent)
|
||||
logging.LogWithFields("HANDL-V42TI", "seq", unprocessedEvent.Sequence).OnError(err).Warn("reduce failed")
|
||||
logging.WithFields("HANDL-V42TI", "sequence", unprocessedEvent.Sequence).OnError(err).Warn("reduce failed")
|
||||
}
|
||||
if len(unprocessedEvents) == eventLimit {
|
||||
logging.LogWithFields("QUERY-BSqe9", "seq", event.Sequence).Warn("didnt process event")
|
||||
logging.WithFields("QUERY-BSqe9", "sequence", event.Sequence).Warn("didnt process event")
|
||||
return
|
||||
}
|
||||
err = handler.Reduce(event)
|
||||
logging.LogWithFields("HANDL-wQDL2", "seq", event.Sequence).OnError(err).Warn("reduce failed")
|
||||
logging.WithFields("HANDL-wQDL2", "sequence", event.Sequence).OnError(err).Warn("reduce failed")
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ type WriteModel struct {
|
||||
ProcessedSequence uint64 `json:"-"`
|
||||
Events []Event `json:"-"`
|
||||
ResourceOwner string `json:"-"`
|
||||
Tenant string `json:"-"`
|
||||
InstanceID string `json:"-"`
|
||||
ChangeDate time.Time `json:"-"`
|
||||
}
|
||||
|
||||
@@ -33,8 +33,8 @@ func (wm *WriteModel) Reduce() error {
|
||||
if wm.ResourceOwner == "" {
|
||||
wm.ResourceOwner = wm.Events[0].Aggregate().ResourceOwner
|
||||
}
|
||||
if wm.Tenant == "" {
|
||||
wm.Tenant = wm.Events[0].Aggregate().Tenant
|
||||
if wm.InstanceID == "" {
|
||||
wm.InstanceID = wm.Events[0].Aggregate().InstanceID
|
||||
}
|
||||
|
||||
wm.ProcessedSequence = wm.Events[len(wm.Events)-1].Sequence()
|
||||
|
Reference in New Issue
Block a user