mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 18:17:35 +00:00
feat(eventstore): add search table (#8191)
# Which Problems Are Solved To improve performance a new table and method is implemented on eventstore. The goal of this table is to index searchable fields on command side to use it on command and query side. The table allows to store one primitive value (numeric, text) per row. The eventstore framework is extended by the `Search`-method which allows to search for objects. The `Command`-interface is extended by the `SearchOperations()`-method which does manipulate the the `search`-table. # How the Problems Are Solved This PR adds the capability of improving performance for command and query side by using the `Search`-method of the eventstore instead of using one of the `Filter`-methods. # Open Tasks - [x] Add feature flag - [x] Unit tests - [ ] ~~Benchmarks if needed~~ - [x] Ensure no behavior change - [x] Add setup step to fill table with current data - [x] Add projection which ensures data added between setup and start of the new version are also added to the table # Additional Changes The `Search`-method is currently used by `ProjectGrant`-command side. # Additional Context - Closes https://github.com/zitadel/zitadel/issues/8094
This commit is contained in:
@@ -8,6 +8,7 @@ type Config struct {
|
||||
PushTimeout time.Duration
|
||||
MaxRetries uint32
|
||||
|
||||
Pusher Pusher
|
||||
Querier Querier
|
||||
Pusher Pusher
|
||||
Querier Querier
|
||||
Searcher Searcher
|
||||
}
|
||||
|
@@ -31,6 +31,8 @@ type Command interface {
|
||||
Payload() any
|
||||
// UniqueConstraints should be added for unique attributes of an event, if nil constraints will not be checked
|
||||
UniqueConstraints() []*UniqueConstraint
|
||||
// Fields should be added for fields which should be indexed for lookup, if nil fields will not be indexed
|
||||
Fields() []*FieldOperation
|
||||
}
|
||||
|
||||
// Event is a stored activity
|
||||
|
@@ -123,3 +123,7 @@ func NewBaseEventForPush(ctx context.Context, aggregate *Aggregate, typ EventTyp
|
||||
EventType: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func (*BaseEvent) Fields() []*FieldOperation {
|
||||
return nil
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
// Eventstore abstracts all functions needed to store valid events
|
||||
@@ -19,8 +20,9 @@ type Eventstore struct {
|
||||
PushTimeout time.Duration
|
||||
maxRetries int
|
||||
|
||||
pusher Pusher
|
||||
querier Querier
|
||||
pusher Pusher
|
||||
querier Querier
|
||||
searcher Searcher
|
||||
|
||||
instances []string
|
||||
lastInstanceQuery time.Time
|
||||
@@ -62,8 +64,9 @@ func NewEventstore(config *Config) *Eventstore {
|
||||
PushTimeout: config.PushTimeout,
|
||||
maxRetries: int(config.MaxRetries),
|
||||
|
||||
pusher: config.Pusher,
|
||||
querier: config.Querier,
|
||||
pusher: config.Pusher,
|
||||
querier: config.Querier,
|
||||
searcher: config.Searcher,
|
||||
|
||||
instancesMu: sync.Mutex{},
|
||||
}
|
||||
@@ -127,6 +130,20 @@ func (es *Eventstore) AggregateTypes() []string {
|
||||
return aggregateTypes
|
||||
}
|
||||
|
||||
// FillFields implements the [Searcher] interface
|
||||
func (es *Eventstore) FillFields(ctx context.Context, events ...FillFieldsEvent) error {
|
||||
return es.searcher.FillFields(ctx, events...)
|
||||
}
|
||||
|
||||
// Search implements the [Searcher] interface
|
||||
func (es *Eventstore) Search(ctx context.Context, conditions ...map[FieldType]any) ([]*SearchResult, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, zerrors.ThrowInvalidArgument(nil, "V3-5Xbr1", "no search conditions")
|
||||
}
|
||||
|
||||
return es.searcher.Search(ctx, conditions...)
|
||||
}
|
||||
|
||||
// Filter filters the stored events based on the searchQuery
|
||||
// and maps the events to the defined event structs
|
||||
//
|
||||
@@ -262,6 +279,22 @@ type Pusher interface {
|
||||
Push(ctx context.Context, commands ...Command) (_ []Event, err error)
|
||||
}
|
||||
|
||||
type FillFieldsEvent interface {
|
||||
Event
|
||||
Fields() []*FieldOperation
|
||||
}
|
||||
|
||||
type Searcher interface {
|
||||
// Search allows to search for specific fields of objects
|
||||
// The instance id is taken from the context
|
||||
// The list of conditions are combined with AND
|
||||
// The search fields are combined with OR
|
||||
// At least one must be defined
|
||||
Search(ctx context.Context, conditions ...map[FieldType]any) (result []*SearchResult, err error)
|
||||
// FillFields is to insert the fields of previously stored events
|
||||
FillFields(ctx context.Context, events ...FillFieldsEvent) error
|
||||
}
|
||||
|
||||
func appendEventType(typ EventType) {
|
||||
i := sort.SearchStrings(eventTypes, string(typ))
|
||||
if i < len(eventTypes) && eventTypes[i] == string(typ) {
|
||||
|
140
internal/eventstore/field.go
Normal file
140
internal/eventstore/field.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package eventstore
|
||||
|
||||
// FieldOperation if the definition of the operation to be executed on the field
|
||||
type FieldOperation struct {
|
||||
// Set a field in the field table
|
||||
// if [SearchField.UpsertConflictFields] are set the field will be updated if the conflict fields match
|
||||
// if no [SearchField.UpsertConflictFields] are set the field will be inserted
|
||||
Set *Field
|
||||
// Remove fields using the map as `AND`ed conditions
|
||||
Remove map[FieldType]any
|
||||
}
|
||||
|
||||
type SearchResult struct {
|
||||
Aggregate Aggregate
|
||||
Object Object
|
||||
FieldName string
|
||||
// Value represents the stored value
|
||||
// use the Unmarshal method to parse the value to the desired type
|
||||
Value interface {
|
||||
// Unmarshal parses the value to ptr
|
||||
Unmarshal(ptr any) error
|
||||
}
|
||||
}
|
||||
|
||||
// // NumericResultValue marshals the value to the given type
|
||||
|
||||
type Object struct {
|
||||
// Type of the object
|
||||
Type string
|
||||
// ID of the object
|
||||
ID string
|
||||
// Revision of the object, if an object evolves the revision should be increased
|
||||
// analog to current projection versioning
|
||||
Revision uint8
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Aggregate *Aggregate
|
||||
Object Object
|
||||
UpsertConflictFields []FieldType
|
||||
FieldName string
|
||||
Value Value
|
||||
}
|
||||
|
||||
type Value struct {
|
||||
Value any
|
||||
// MustBeUnique defines if the field must be unique
|
||||
// This field will replace unique constraints in the future
|
||||
// If MustBeUnique is true the value must be a primitive type
|
||||
MustBeUnique bool
|
||||
// ShouldIndex defines if the field should be indexed
|
||||
// If the field is not indexed it can not be used in search queries
|
||||
// If ShouldIndex is true the value must be a primitive type
|
||||
ShouldIndex bool
|
||||
}
|
||||
|
||||
type SearchValueType int8
|
||||
|
||||
const (
|
||||
SearchValueTypeString SearchValueType = iota
|
||||
SearchValueTypeNumeric
|
||||
)
|
||||
|
||||
// SetSearchField sets the field based on the defined parameters
|
||||
// if conflictFields are set the field will be updated if the conflict fields match
|
||||
func SetField(aggregate *Aggregate, object Object, fieldName string, value *Value, conflictFields ...FieldType) *FieldOperation {
|
||||
return &FieldOperation{
|
||||
Set: &Field{
|
||||
Aggregate: aggregate,
|
||||
Object: object,
|
||||
UpsertConflictFields: conflictFields,
|
||||
FieldName: fieldName,
|
||||
Value: *value,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSearchFields removes fields using the map as `AND`ed conditions
|
||||
func RemoveSearchFields(clause map[FieldType]any) *FieldOperation {
|
||||
return &FieldOperation{
|
||||
Remove: clause,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSearchFieldsByAggregate removes fields using the aggregate as `AND`ed conditions
|
||||
func RemoveSearchFieldsByAggregate(aggregate *Aggregate) *FieldOperation {
|
||||
return &FieldOperation{
|
||||
Remove: map[FieldType]any{
|
||||
FieldTypeInstanceID: aggregate.InstanceID,
|
||||
FieldTypeResourceOwner: aggregate.ResourceOwner,
|
||||
FieldTypeAggregateType: aggregate.Type,
|
||||
FieldTypeAggregateID: aggregate.ID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSearchFieldsByAggregateAndObject removes fields using the aggregate and object as `AND`ed conditions
|
||||
func RemoveSearchFieldsByAggregateAndObject(aggregate *Aggregate, object Object) *FieldOperation {
|
||||
return &FieldOperation{
|
||||
Remove: map[FieldType]any{
|
||||
FieldTypeInstanceID: aggregate.InstanceID,
|
||||
FieldTypeResourceOwner: aggregate.ResourceOwner,
|
||||
FieldTypeAggregateType: aggregate.Type,
|
||||
FieldTypeAggregateID: aggregate.ID,
|
||||
FieldTypeObjectType: object.Type,
|
||||
FieldTypeObjectID: object.ID,
|
||||
FieldTypeObjectRevision: object.Revision,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSearchFieldsByAggregateAndObjectAndField removes fields using the aggregate, object and field as `AND`ed conditions
|
||||
func RemoveSearchFieldsByAggregateAndObjectAndField(aggregate *Aggregate, object Object, field string) *FieldOperation {
|
||||
return &FieldOperation{
|
||||
Remove: map[FieldType]any{
|
||||
FieldTypeInstanceID: aggregate.InstanceID,
|
||||
FieldTypeResourceOwner: aggregate.ResourceOwner,
|
||||
FieldTypeAggregateType: aggregate.Type,
|
||||
FieldTypeAggregateID: aggregate.ID,
|
||||
FieldTypeObjectType: object.Type,
|
||||
FieldTypeObjectID: object.ID,
|
||||
FieldTypeObjectRevision: object.Revision,
|
||||
FieldTypeFieldName: field,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type FieldType int8
|
||||
|
||||
const (
|
||||
FieldTypeAggregateType FieldType = iota
|
||||
FieldTypeAggregateID
|
||||
FieldTypeInstanceID
|
||||
FieldTypeResourceOwner
|
||||
FieldTypeObjectType
|
||||
FieldTypeObjectID
|
||||
FieldTypeObjectRevision
|
||||
FieldTypeFieldName
|
||||
FieldTypeValue
|
||||
)
|
205
internal/eventstore/handler/v2/field_handler.go
Normal file
205
internal/eventstore/handler/v2/field_handler.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
type FieldHandler struct {
|
||||
Handler
|
||||
}
|
||||
|
||||
type fieldProjection struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Name implements Projection.
|
||||
func (f *fieldProjection) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Reducers implements Projection.
|
||||
func (f *fieldProjection) Reducers() []AggregateReducer {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Projection = (*fieldProjection)(nil)
|
||||
|
||||
func NewFieldHandler(config *Config, name string, eventTypes map[eventstore.AggregateType][]eventstore.EventType) *FieldHandler {
|
||||
return &FieldHandler{
|
||||
Handler: Handler{
|
||||
projection: &fieldProjection{name: name},
|
||||
client: config.Client,
|
||||
es: config.Eventstore,
|
||||
bulkLimit: config.BulkLimit,
|
||||
eventTypes: eventTypes,
|
||||
requeueEvery: config.RequeueEvery,
|
||||
handleActiveInstances: config.HandleActiveInstances,
|
||||
now: time.Now,
|
||||
maxFailureCount: config.MaxFailureCount,
|
||||
retryFailedAfter: config.RetryFailedAfter,
|
||||
triggeredInstancesSync: sync.Map{},
|
||||
triggerWithoutEvents: config.TriggerWithoutEvents,
|
||||
txDuration: config.TransactionDuration,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FieldHandler) Trigger(ctx context.Context, opts ...TriggerOpt) (err error) {
|
||||
config := new(triggerConfig)
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
cancel := h.lockInstance(ctx, config)
|
||||
if cancel == nil {
|
||||
return nil
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
for i := 0; ; i++ {
|
||||
additionalIteration, err := h.processEvents(ctx, config)
|
||||
h.log().OnError(err).Info("process events failed")
|
||||
h.log().WithField("iteration", i).Debug("trigger iteration")
|
||||
if !additionalIteration || err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FieldHandler) processEvents(ctx context.Context, config *triggerConfig) (additionalIteration bool, err error) {
|
||||
defer func() {
|
||||
pgErr := new(pgconn.PgError)
|
||||
if errors.As(err, &pgErr) {
|
||||
// error returned if the row is currently locked by another connection
|
||||
if pgErr.Code == "55P03" {
|
||||
h.log().Debug("state already locked")
|
||||
err = nil
|
||||
additionalIteration = false
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
txCtx := ctx
|
||||
if h.txDuration > 0 {
|
||||
var cancel, cancelTx func()
|
||||
// add 100ms to store current state if iteration takes too long
|
||||
txCtx, cancelTx = context.WithTimeout(ctx, h.txDuration+100*time.Millisecond)
|
||||
defer cancelTx()
|
||||
ctx, cancel = context.WithTimeout(ctx, h.txDuration)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ctx, spanBeginTx := tracing.NewNamedSpan(ctx, "db.BeginTx")
|
||||
tx, err := h.client.BeginTx(txCtx, nil)
|
||||
spanBeginTx.EndWithError(err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && !errors.Is(err, &executionError{}) {
|
||||
rollbackErr := tx.Rollback()
|
||||
h.log().OnError(rollbackErr).Debug("unable to rollback tx")
|
||||
return
|
||||
}
|
||||
commitErr := tx.Commit()
|
||||
if err == nil {
|
||||
err = commitErr
|
||||
}
|
||||
}()
|
||||
|
||||
currentState, err := h.currentState(ctx, tx, config)
|
||||
if err != nil {
|
||||
if errors.Is(err, errJustUpdated) {
|
||||
return false, nil
|
||||
}
|
||||
return additionalIteration, err
|
||||
}
|
||||
// stop execution if currentState.eventTimestamp >= config.maxCreatedAt
|
||||
if config.maxPosition != 0 && currentState.position >= config.maxPosition {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
events, additionalIteration, err := h.fetchEvents(ctx, tx, currentState)
|
||||
if err != nil {
|
||||
return additionalIteration, err
|
||||
}
|
||||
if len(events) == 0 {
|
||||
err = h.setState(tx, currentState)
|
||||
return additionalIteration, err
|
||||
}
|
||||
|
||||
err = h.es.FillFields(ctx, events...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = h.setState(tx, currentState)
|
||||
|
||||
return additionalIteration, err
|
||||
}
|
||||
|
||||
func (h *FieldHandler) fetchEvents(ctx context.Context, tx *sql.Tx, currentState *state) (_ []eventstore.FillFieldsEvent, additionalIteration bool, err error) {
|
||||
events, err := h.es.Filter(ctx, h.eventQuery(currentState).SetTx(tx))
|
||||
if err != nil {
|
||||
h.log().WithError(err).Debug("filter eventstore failed")
|
||||
return nil, false, err
|
||||
}
|
||||
eventAmount := len(events)
|
||||
|
||||
idx, offset := skipPreviouslyReducedEvents(events, currentState)
|
||||
|
||||
if currentState.position == events[len(events)-1].Position() {
|
||||
offset += currentState.offset
|
||||
}
|
||||
currentState.position = events[len(events)-1].Position()
|
||||
currentState.offset = offset
|
||||
currentState.aggregateID = events[len(events)-1].Aggregate().ID
|
||||
currentState.aggregateType = events[len(events)-1].Aggregate().Type
|
||||
currentState.sequence = events[len(events)-1].Sequence()
|
||||
currentState.eventTimestamp = events[len(events)-1].CreatedAt()
|
||||
|
||||
if idx+1 == len(events) {
|
||||
return nil, false, nil
|
||||
}
|
||||
events = events[idx+1:]
|
||||
|
||||
additionalIteration = eventAmount == int(h.bulkLimit)
|
||||
|
||||
fillFieldsEvents := make([]eventstore.FillFieldsEvent, len(events))
|
||||
highestPosition := events[len(events)-1].Position()
|
||||
for i, event := range events {
|
||||
if event.Position() == highestPosition {
|
||||
offset++
|
||||
}
|
||||
fillFieldsEvents[i] = event.(eventstore.FillFieldsEvent)
|
||||
}
|
||||
|
||||
return fillFieldsEvents, additionalIteration, nil
|
||||
}
|
||||
|
||||
func skipPreviouslyReducedEvents(events []eventstore.Event, currentState *state) (index int, offset uint32) {
|
||||
var position float64
|
||||
for i, event := range events {
|
||||
if event.Position() != position {
|
||||
offset = 0
|
||||
position = event.Position()
|
||||
}
|
||||
offset++
|
||||
if event.Position() == currentState.position &&
|
||||
event.Aggregate().ID == currentState.aggregateID &&
|
||||
event.Aggregate().Type == currentState.aggregateType &&
|
||||
event.Sequence() == currentState.sequence {
|
||||
return i, offset
|
||||
}
|
||||
}
|
||||
return -1, 0
|
||||
}
|
@@ -28,6 +28,7 @@ type EventStore interface {
|
||||
FilterToQueryReducer(ctx context.Context, reducer eventstore.QueryReducer) error
|
||||
Filter(ctx context.Context, queryFactory *eventstore.SearchQueryBuilder) ([]eventstore.Event, error)
|
||||
Push(ctx context.Context, cmds ...eventstore.Command) ([]eventstore.Event, error)
|
||||
FillFields(ctx context.Context, events ...eventstore.FillFieldsEvent) error
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@@ -542,7 +543,7 @@ func (h *Handler) generateStatements(ctx context.Context, tx *sql.Tx, currentSta
|
||||
return []*Statement{stmt}, false, nil
|
||||
}
|
||||
|
||||
events, err := h.es.Filter(ctx, h.eventQuery(currentState))
|
||||
events, err := h.es.Filter(ctx, h.eventQuery(currentState).SetTx(tx))
|
||||
if err != nil {
|
||||
h.log().WithError(err).Debug("filter eventstore failed")
|
||||
return nil, false, err
|
||||
@@ -554,7 +555,7 @@ func (h *Handler) generateStatements(ctx context.Context, tx *sql.Tx, currentSta
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
idx := skipPreviouslyReduced(statements, currentState)
|
||||
idx := skipPreviouslyReducedStatements(statements, currentState)
|
||||
if idx+1 == len(statements) {
|
||||
currentState.position = statements[len(statements)-1].Position
|
||||
currentState.offset = statements[len(statements)-1].offset
|
||||
@@ -576,7 +577,7 @@ func (h *Handler) generateStatements(ctx context.Context, tx *sql.Tx, currentSta
|
||||
return statements, additionalIteration, nil
|
||||
}
|
||||
|
||||
func skipPreviouslyReduced(statements []*Statement, currentState *state) int {
|
||||
func skipPreviouslyReducedStatements(statements []*Statement, currentState *state) int {
|
||||
for i, statement := range statements {
|
||||
if statement.Position == currentState.position &&
|
||||
statement.AggregateID == currentState.aggregateID &&
|
||||
@@ -655,12 +656,11 @@ func (h *Handler) eventQuery(currentState *state) *eventstore.SearchQueryBuilder
|
||||
}
|
||||
|
||||
for aggregateType, eventTypes := range h.eventTypes {
|
||||
query := builder.
|
||||
builder = builder.
|
||||
AddQuery().
|
||||
AggregateTypes(aggregateType).
|
||||
EventTypes(eventTypes...)
|
||||
|
||||
builder = query.Builder()
|
||||
EventTypes(eventTypes...).
|
||||
Builder()
|
||||
}
|
||||
|
||||
return builder
|
||||
|
@@ -120,3 +120,7 @@ func (e *Event) Payload() any {
|
||||
func (e *Event) UniqueConstraints() []*eventstore.UniqueConstraint {
|
||||
return e.Constraints
|
||||
}
|
||||
|
||||
func (e *Event) Fields() []*eventstore.FieldOperation {
|
||||
return nil
|
||||
}
|
||||
|
367
internal/eventstore/v3/field.go
Normal file
367
internal/eventstore/v3/field.go
Normal file
@@ -0,0 +1,367 @@
|
||||
package eventstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
"github.com/zitadel/zitadel/internal/zerrors"
|
||||
)
|
||||
|
||||
type fieldValue struct {
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (value *fieldValue) Unmarshal(ptr any) error {
|
||||
return json.Unmarshal(value.value, ptr)
|
||||
}
|
||||
|
||||
func (es *Eventstore) FillFields(ctx context.Context, events ...eventstore.FillFieldsEvent) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer span.End()
|
||||
|
||||
tx, err := es.client.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
return handleFieldFillEvents(ctx, tx, events)
|
||||
}
|
||||
|
||||
// Search implements the [eventstore.Search] method
|
||||
func (es *Eventstore) Search(ctx context.Context, conditions ...map[eventstore.FieldType]any) (result []*eventstore.SearchResult, err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer span.EndWithError(err)
|
||||
|
||||
var builder strings.Builder
|
||||
args := buildSearchStatement(ctx, &builder, conditions...)
|
||||
|
||||
err = es.client.QueryContext(
|
||||
ctx,
|
||||
func(rows *sql.Rows) error {
|
||||
for rows.Next() {
|
||||
var (
|
||||
res eventstore.SearchResult
|
||||
value fieldValue
|
||||
)
|
||||
err = rows.Scan(
|
||||
&res.Aggregate.InstanceID,
|
||||
&res.Aggregate.ResourceOwner,
|
||||
&res.Aggregate.Type,
|
||||
&res.Aggregate.ID,
|
||||
&res.Object.Type,
|
||||
&res.Object.ID,
|
||||
&res.Object.Revision,
|
||||
&res.FieldName,
|
||||
&value.value,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Value = &value
|
||||
|
||||
result = append(result, &res)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
builder.String(),
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
const searchQueryPrefix = `SELECT instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value FROM eventstore.fields WHERE instance_id = $1`
|
||||
|
||||
func buildSearchStatement(ctx context.Context, builder *strings.Builder, conditions ...map[eventstore.FieldType]any) []any {
|
||||
args := make([]any, 0, len(conditions)*4+1)
|
||||
args = append(args, authz.GetInstance(ctx).InstanceID())
|
||||
|
||||
builder.WriteString(searchQueryPrefix)
|
||||
|
||||
builder.WriteString(" AND ")
|
||||
if len(conditions) > 1 {
|
||||
builder.WriteRune('(')
|
||||
}
|
||||
for i, condition := range conditions {
|
||||
if i > 0 {
|
||||
builder.WriteString(" OR ")
|
||||
}
|
||||
if len(condition) > 1 {
|
||||
builder.WriteRune('(')
|
||||
}
|
||||
args = append(args, buildSearchCondition(builder, len(args)+1, condition)...)
|
||||
if len(condition) > 1 {
|
||||
builder.WriteRune(')')
|
||||
}
|
||||
}
|
||||
if len(conditions) > 1 {
|
||||
builder.WriteRune(')')
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func buildSearchCondition(builder *strings.Builder, index int, conditions map[eventstore.FieldType]any) []any {
|
||||
args := make([]any, 0, len(conditions))
|
||||
|
||||
orderedCondition := make([]eventstore.FieldType, 0, len(conditions))
|
||||
for field := range conditions {
|
||||
orderedCondition = append(orderedCondition, field)
|
||||
}
|
||||
slices.Sort(orderedCondition)
|
||||
|
||||
for _, field := range orderedCondition {
|
||||
if len(args) > 0 {
|
||||
builder.WriteString(" AND ")
|
||||
}
|
||||
builder.WriteString(fieldNameByType(field, conditions[field]))
|
||||
builder.WriteString(" = $")
|
||||
builder.WriteString(strconv.Itoa(index + len(args)))
|
||||
args = append(args, conditions[field])
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func handleFieldCommands(ctx context.Context, tx *sql.Tx, commands []eventstore.Command) error {
|
||||
for _, command := range commands {
|
||||
if len(command.Fields()) > 0 {
|
||||
if err := handleFieldOperations(ctx, tx, command.Fields()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleFieldFillEvents(ctx context.Context, tx *sql.Tx, events []eventstore.FillFieldsEvent) error {
|
||||
for _, event := range events {
|
||||
if len(event.Fields()) > 0 {
|
||||
if err := handleFieldOperations(ctx, tx, event.Fields()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleFieldOperations(ctx context.Context, tx *sql.Tx, operations []*eventstore.FieldOperation) error {
|
||||
for _, operation := range operations {
|
||||
if operation.Set != nil {
|
||||
if err := handleFieldSet(ctx, tx, operation.Set); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if operation.Remove != nil {
|
||||
if err := handleSearchDelete(ctx, tx, operation.Remove); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleFieldSet(ctx context.Context, tx *sql.Tx, field *eventstore.Field) error {
|
||||
if len(field.UpsertConflictFields) == 0 {
|
||||
return handleSearchInsert(ctx, tx, field)
|
||||
}
|
||||
return handleSearchUpsert(ctx, tx, field)
|
||||
}
|
||||
|
||||
const (
|
||||
insertField = `INSERT INTO eventstore.fields (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`
|
||||
)
|
||||
|
||||
func handleSearchInsert(ctx context.Context, tx *sql.Tx, field *eventstore.Field) error {
|
||||
value, err := json.Marshal(field.Value.Value)
|
||||
if err != nil {
|
||||
return zerrors.ThrowInvalidArgument(err, "V3-fcrW1", "unable to marshal field value")
|
||||
}
|
||||
_, err = tx.ExecContext(
|
||||
ctx,
|
||||
insertField,
|
||||
|
||||
field.Aggregate.InstanceID,
|
||||
field.Aggregate.ResourceOwner,
|
||||
field.Aggregate.Type,
|
||||
field.Aggregate.ID,
|
||||
field.Object.Type,
|
||||
field.Object.ID,
|
||||
field.Object.Revision,
|
||||
field.FieldName,
|
||||
value,
|
||||
field.Value.MustBeUnique,
|
||||
field.Value.ShouldIndex,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
fieldsUpsertPrefix = `WITH upsert AS (UPDATE eventstore.fields SET (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) = ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) WHERE `
|
||||
fieldsUpsertSuffix = ` RETURNING * ) INSERT INTO eventstore.fields (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) SELECT $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 WHERE NOT EXISTS (SELECT 1 FROM upsert)`
|
||||
)
|
||||
|
||||
func handleSearchUpsert(ctx context.Context, tx *sql.Tx, field *eventstore.Field) error {
|
||||
value, err := json.Marshal(field.Value.Value)
|
||||
if err != nil {
|
||||
return zerrors.ThrowInvalidArgument(err, "V3-fcrW1", "unable to marshal field value")
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(
|
||||
ctx,
|
||||
writeUpsertField(field.UpsertConflictFields),
|
||||
|
||||
field.Aggregate.InstanceID,
|
||||
field.Aggregate.ResourceOwner,
|
||||
field.Aggregate.Type,
|
||||
field.Aggregate.ID,
|
||||
field.Object.Type,
|
||||
field.Object.ID,
|
||||
field.Object.Revision,
|
||||
field.FieldName,
|
||||
value,
|
||||
field.Value.MustBeUnique,
|
||||
field.Value.ShouldIndex,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeUpsertField(fields []eventstore.FieldType) string {
|
||||
var builder strings.Builder
|
||||
|
||||
builder.WriteString(fieldsUpsertPrefix)
|
||||
for i, fieldName := range fields {
|
||||
if i > 0 {
|
||||
builder.WriteString(" AND ")
|
||||
}
|
||||
name, index := searchFieldNameAndIndexByTypeForPush(fieldName)
|
||||
|
||||
builder.WriteString(name)
|
||||
builder.WriteString(" = ")
|
||||
builder.WriteString(index)
|
||||
}
|
||||
builder.WriteString(fieldsUpsertSuffix)
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
const removeSearch = `DELETE FROM eventstore.fields WHERE `
|
||||
|
||||
func handleSearchDelete(ctx context.Context, tx *sql.Tx, clauses map[eventstore.FieldType]any) error {
|
||||
if len(clauses) == 0 {
|
||||
return zerrors.ThrowInvalidArgument(nil, "V3-oqlBZ", "no conditions")
|
||||
}
|
||||
stmt, args := writeDeleteField(clauses)
|
||||
_, err := tx.ExecContext(ctx, stmt, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeDeleteField(clauses map[eventstore.FieldType]any) (string, []any) {
|
||||
var (
|
||||
builder strings.Builder
|
||||
args = make([]any, 0, len(clauses))
|
||||
)
|
||||
builder.WriteString(removeSearch)
|
||||
|
||||
orderedCondition := make([]eventstore.FieldType, 0, len(clauses))
|
||||
for field := range clauses {
|
||||
orderedCondition = append(orderedCondition, field)
|
||||
}
|
||||
slices.Sort(orderedCondition)
|
||||
|
||||
for _, fieldName := range orderedCondition {
|
||||
if len(args) > 0 {
|
||||
builder.WriteString(" AND ")
|
||||
}
|
||||
builder.WriteString(fieldNameByType(fieldName, clauses[fieldName]))
|
||||
|
||||
builder.WriteString(" = $")
|
||||
builder.WriteString(strconv.Itoa(len(args) + 1))
|
||||
|
||||
args = append(args, clauses[fieldName])
|
||||
}
|
||||
|
||||
return builder.String(), args
|
||||
}
|
||||
|
||||
func fieldNameByType(typ eventstore.FieldType, value any) string {
|
||||
switch typ {
|
||||
case eventstore.FieldTypeAggregateID:
|
||||
return "aggregate_id"
|
||||
case eventstore.FieldTypeAggregateType:
|
||||
return "aggregate_type"
|
||||
case eventstore.FieldTypeInstanceID:
|
||||
return "instance_id"
|
||||
case eventstore.FieldTypeResourceOwner:
|
||||
return "resource_owner"
|
||||
case eventstore.FieldTypeFieldName:
|
||||
return "field_name"
|
||||
case eventstore.FieldTypeObjectType:
|
||||
return "object_type"
|
||||
case eventstore.FieldTypeObjectID:
|
||||
return "object_id"
|
||||
case eventstore.FieldTypeObjectRevision:
|
||||
return "object_revision"
|
||||
case eventstore.FieldTypeValue:
|
||||
return valueColumn(value)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func searchFieldNameAndIndexByTypeForPush(typ eventstore.FieldType) (string, string) {
|
||||
switch typ {
|
||||
case eventstore.FieldTypeInstanceID:
|
||||
return "instance_id", "$1"
|
||||
case eventstore.FieldTypeResourceOwner:
|
||||
return "resource_owner", "$2"
|
||||
case eventstore.FieldTypeAggregateType:
|
||||
return "aggregate_type", "$3"
|
||||
case eventstore.FieldTypeAggregateID:
|
||||
return "aggregate_id", "$4"
|
||||
case eventstore.FieldTypeObjectType:
|
||||
return "object_type", "$5"
|
||||
case eventstore.FieldTypeObjectID:
|
||||
return "object_id", "$6"
|
||||
case eventstore.FieldTypeObjectRevision:
|
||||
return "object_revision", "$7"
|
||||
case eventstore.FieldTypeFieldName:
|
||||
return "field_name", "$8"
|
||||
case eventstore.FieldTypeValue:
|
||||
return "value", "$9"
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func valueColumn(value any) string {
|
||||
//nolint: exhaustive
|
||||
switch reflect.TypeOf(value).Kind() {
|
||||
case reflect.Bool:
|
||||
return "bool_value"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64:
|
||||
return "number_value"
|
||||
case reflect.String:
|
||||
return "text_value"
|
||||
}
|
||||
return ""
|
||||
}
|
260
internal/eventstore/v3/field_test.go
Normal file
260
internal/eventstore/v3/field_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package eventstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
func Test_handleSearchDelete(t *testing.T) {
|
||||
type args struct {
|
||||
clauses map[eventstore.FieldType]any
|
||||
}
|
||||
type want struct {
|
||||
stmt string
|
||||
args []any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "1 condition",
|
||||
args: args{
|
||||
clauses: map[eventstore.FieldType]any{
|
||||
eventstore.FieldTypeInstanceID: "i_id",
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "DELETE FROM eventstore.fields WHERE instance_id = $1",
|
||||
args: []any{"i_id"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 conditions",
|
||||
args: args{
|
||||
clauses: map[eventstore.FieldType]any{
|
||||
eventstore.FieldTypeInstanceID: "i_id",
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "DELETE FROM eventstore.fields WHERE aggregate_id = $1 AND instance_id = $2",
|
||||
args: []any{"a_id", "i_id"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stmt, args := writeDeleteField(tt.args.clauses)
|
||||
if stmt != tt.want.stmt {
|
||||
t.Errorf("handleSearchDelete() stmt = %q, want %q", stmt, tt.want.stmt)
|
||||
}
|
||||
assert.Equal(t, tt.want.args, args)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_writeUpsertField(t *testing.T) {
|
||||
type args struct {
|
||||
fields []eventstore.FieldType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "1 field",
|
||||
args: args{
|
||||
fields: []eventstore.FieldType{
|
||||
eventstore.FieldTypeInstanceID,
|
||||
},
|
||||
},
|
||||
want: "WITH upsert AS (UPDATE eventstore.fields SET (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) = ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) WHERE instance_id = $1 RETURNING * ) INSERT INTO eventstore.fields (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) SELECT $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 WHERE NOT EXISTS (SELECT 1 FROM upsert)",
|
||||
},
|
||||
{
|
||||
name: "2 fields",
|
||||
args: args{
|
||||
fields: []eventstore.FieldType{
|
||||
eventstore.FieldTypeInstanceID,
|
||||
eventstore.FieldTypeAggregateType,
|
||||
},
|
||||
},
|
||||
want: "WITH upsert AS (UPDATE eventstore.fields SET (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) = ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) WHERE instance_id = $1 AND aggregate_type = $3 RETURNING * ) INSERT INTO eventstore.fields (instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value, value_must_be_unique, should_index) SELECT $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 WHERE NOT EXISTS (SELECT 1 FROM upsert)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := writeUpsertField(tt.args.fields); got != tt.want {
|
||||
t.Errorf("writeUpsertField() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildSearchCondition(t *testing.T) {
|
||||
type args struct {
|
||||
index int
|
||||
conditions map[eventstore.FieldType]any
|
||||
}
|
||||
type want struct {
|
||||
stmt string
|
||||
args []any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "1 condition",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: map[eventstore.FieldType]any{
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "aggregate_id = $1",
|
||||
args: []any{"a_id"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 condition",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: map[eventstore.FieldType]any{
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
eventstore.FieldTypeInstanceID: "i_id",
|
||||
eventstore.FieldTypeAggregateType: "a_type",
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "aggregate_type = $1 AND aggregate_id = $2 AND instance_id = $3",
|
||||
args: []any{"a_type", "a_id", "i_id"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var builder strings.Builder
|
||||
|
||||
if got := buildSearchCondition(&builder, tt.args.index, tt.args.conditions); !reflect.DeepEqual(got, tt.want.args) {
|
||||
t.Errorf("buildSearchCondition() = %v, want %v", got, tt.want)
|
||||
}
|
||||
if tt.want.stmt != builder.String() {
|
||||
t.Errorf("buildSearchCondition() stmt = %q, want %q", builder.String(), tt.want.stmt)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildSearchStatement(t *testing.T) {
|
||||
type args struct {
|
||||
index int
|
||||
conditions []map[eventstore.FieldType]any
|
||||
}
|
||||
type want struct {
|
||||
stmt string
|
||||
args []any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "1 condition with 1 field",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: []map[eventstore.FieldType]any{
|
||||
{
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "SELECT instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value FROM eventstore.fields WHERE instance_id = $1 AND aggregate_id = $2",
|
||||
args: []any{"a_id"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1 condition with 3 fields",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: []map[eventstore.FieldType]any{
|
||||
{
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
eventstore.FieldTypeInstanceID: "i_id",
|
||||
eventstore.FieldTypeAggregateType: "a_type",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "SELECT instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value FROM eventstore.fields WHERE instance_id = $1 AND (aggregate_type = $2 AND aggregate_id = $3 AND instance_id = $4)",
|
||||
args: []any{"a_type", "a_id", "i_id"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 condition with 1 field",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: []map[eventstore.FieldType]any{
|
||||
{
|
||||
eventstore.FieldTypeAggregateID: "a_id",
|
||||
},
|
||||
{
|
||||
eventstore.FieldTypeAggregateType: "a_type",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "SELECT instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value FROM eventstore.fields WHERE instance_id = $1 AND (aggregate_id = $2 OR aggregate_type = $3)",
|
||||
args: []any{"a_id", "a_type"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 condition with 2 fields",
|
||||
args: args{
|
||||
index: 1,
|
||||
conditions: []map[eventstore.FieldType]any{
|
||||
{
|
||||
eventstore.FieldTypeAggregateID: "a_id1",
|
||||
eventstore.FieldTypeAggregateType: "a_type1",
|
||||
},
|
||||
{
|
||||
eventstore.FieldTypeAggregateID: "a_id2",
|
||||
eventstore.FieldTypeAggregateType: "a_type2",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
stmt: "SELECT instance_id, resource_owner, aggregate_type, aggregate_id, object_type, object_id, object_revision, field_name, value FROM eventstore.fields WHERE instance_id = $1 AND ((aggregate_type = $2 AND aggregate_id = $3) OR (aggregate_type = $4 AND aggregate_id = $5))",
|
||||
args: []any{"a_type1", "a_id1", "a_type2", "a_id2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var builder strings.Builder
|
||||
tt.want.args = append([]any{"i_id"}, tt.want.args...)
|
||||
ctx := authz.WithInstanceID(context.Background(), "i_id")
|
||||
|
||||
if got := buildSearchStatement(ctx, &builder, tt.args.conditions...); !reflect.DeepEqual(got, tt.want.args) {
|
||||
t.Errorf("buildSearchStatement() = %v, want %v", got, tt.want)
|
||||
}
|
||||
if tt.want.stmt != builder.String() {
|
||||
t.Errorf("buildSearchStatement() stmt = %q, want %q", builder.String(), tt.want.stmt)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -42,6 +42,10 @@ func (m *mockCommand) UniqueConstraints() []*eventstore.UniqueConstraint {
|
||||
return m.constraints
|
||||
}
|
||||
|
||||
func (e *mockCommand) Fields() []*eventstore.FieldOperation {
|
||||
return nil
|
||||
}
|
||||
|
||||
func mockEvent(aggregate *eventstore.Aggregate, sequence uint64, payload Payload) eventstore.Event {
|
||||
return &event{
|
||||
aggregate: aggregate,
|
||||
|
@@ -41,7 +41,20 @@ func (es *Eventstore) Push(ctx context.Context, commands ...eventstore.Command)
|
||||
return err
|
||||
}
|
||||
|
||||
return handleUniqueConstraints(ctx, tx, commands)
|
||||
if err = handleUniqueConstraints(ctx, tx, commands); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// CockroachDB by default does not allow multiple modifications of the same table using ON CONFLICT
|
||||
// Thats why we enable it manually
|
||||
if es.client.Type() == "cockroach" {
|
||||
_, err = tx.Exec("SET enable_multiple_modifications_of_table = on")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return handleFieldCommands(ctx, tx, commands)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user