refactor(v2): init eventstore package (#7806)

* refactor(v2): init database package

* refactor(v2): init eventstore package

* add mock package

* test query constructors

* option based push analog to query
This commit is contained in:
Silvan
2024-04-26 17:05:21 +02:00
committed by GitHub
parent 2254434692
commit 5811a7b6a5
16 changed files with 5681 additions and 0 deletions

View File

@@ -0,0 +1,64 @@
package postgres
import (
"encoding/json"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
func intentToCommands(intent *intent) (commands []*command, err error) {
commands = make([]*command, len(intent.Commands()))
for i, cmd := range intent.Commands() {
var payload unmarshalPayload
if cmd.Payload() != nil {
payload, err = json.Marshal(cmd.Payload())
if err != nil {
logging.WithError(err).Warn("marshal payload failed")
return nil, zerrors.ThrowInternal(err, "POSTG-MInPK", "Errors.Internal")
}
}
commands[i] = &command{
Event: &eventstore.Event[eventstore.StoragePayload]{
Aggregate: *intent.Aggregate(),
Creator: cmd.Creator(),
Revision: cmd.Revision(),
Type: cmd.Type(),
// always add at least 1 to the currently stored sequence
Sequence: intent.sequence + uint32(i) + 1,
Payload: payload,
},
intent: intent,
uniqueConstraints: cmd.UniqueConstraints(),
}
}
return commands, nil
}
type command struct {
*eventstore.Event[eventstore.StoragePayload]
intent *intent
uniqueConstraints []*eventstore.UniqueConstraint
}
var _ eventstore.StoragePayload = (unmarshalPayload)(nil)
type unmarshalPayload []byte
// Unmarshal implements eventstore.StoragePayload.
func (p unmarshalPayload) Unmarshal(ptr any) error {
if len(p) == 0 {
return nil
}
if err := json.Unmarshal(p, ptr); err != nil {
return zerrors.ThrowInternal(err, "POSTG-u8qVo", "Errors.Internal")
}
return nil
}

View File

@@ -0,0 +1,42 @@
package postgres
import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
type intent struct {
*eventstore.PushAggregate
sequence uint32
}
func makeIntents(pushIntent *eventstore.PushIntent) []*intent {
res := make([]*intent, len(pushIntent.Aggregates()))
for i, aggregate := range pushIntent.Aggregates() {
res[i] = &intent{PushAggregate: aggregate}
}
return res
}
func intentByAggregate(intents []*intent, aggregate *eventstore.Aggregate) *intent {
for _, intent := range intents {
if intent.PushAggregate.Aggregate().Equals(aggregate) {
return intent
}
}
logging.WithFields("instance", aggregate.Instance, "owner", aggregate.Owner, "type", aggregate.Type, "id", aggregate.ID).Panic("no intent found")
return nil
}
func checkSequences(intents []*intent) bool {
for _, intent := range intents {
if !eventstore.CheckSequence(intent.sequence, intent.PushAggregate.CurrentSequence()) {
return false
}
}
return true
}

View File

@@ -0,0 +1,122 @@
package postgres
import (
"testing"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
func Test_checkSequences(t *testing.T) {
type args struct {
intents []*intent
}
tests := []struct {
name string
args args
want bool
}{
{
name: "ignore",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.IgnoreCurrentSequence(),
),
},
},
},
want: true,
},
{
name: "ignores",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.IgnoreCurrentSequence(),
),
},
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
),
},
},
},
want: true,
},
{
name: "matches",
args: args{
intents: []*intent{
{
sequence: 0,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceMatches(0),
),
},
},
},
want: true,
},
{
name: "does not match",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceMatches(2),
),
},
},
},
want: false,
},
{
name: "at least",
args: args{
intents: []*intent{
{
sequence: 10,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceAtLeast(0),
),
},
},
},
want: true,
},
{
name: "at least too low",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceAtLeast(2),
),
},
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := checkSequences(tt.args.intents); got != tt.want {
t.Errorf("checkSequences() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -0,0 +1,245 @@
package postgres
import (
"context"
"database/sql"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
"github.com/zitadel/zitadel/internal/v2/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
// Push implements eventstore.Pusher.
func (s *Storage) Push(ctx context.Context, intent *eventstore.PushIntent) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
tx := intent.Tx()
if tx == nil {
tx, err = s.client.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false})
if err != nil {
return err
}
defer func() {
err = database.CloseTx(tx, err)
}()
}
// allows smaller wait times on query side for instances which are not actively writing
if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil {
return err
}
intents, err := lockAggregates(ctx, tx, intent)
if err != nil {
return err
}
if !checkSequences(intents) {
return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched")
}
commands := make([]*command, 0, len(intents))
for _, intent := range intents {
additionalCommands, err := intentToCommands(intent)
if err != nil {
return err
}
commands = append(commands, additionalCommands...)
}
err = uniqueConstraints(ctx, tx, commands)
if err != nil {
return err
}
return push(ctx, tx, intent, commands)
}
// setAppName for the the current transaction
func setAppName(ctx context.Context, tx *sql.Tx, name string) error {
_, err := tx.ExecContext(ctx, "SET LOCAL application_name TO $1", name)
if err != nil {
logging.WithFields("name", name).WithError(err).Debug("setting app name failed")
return zerrors.ThrowInternal(err, "POSTG-G3OmZ", "Errors.Internal")
}
return nil
}
func lockAggregates(ctx context.Context, tx *sql.Tx, intent *eventstore.PushIntent) (_ []*intent, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
stmt.WriteString("WITH existing AS (")
for i, aggregate := range intent.Aggregates() {
if i > 0 {
stmt.WriteString(" UNION ALL ")
}
stmt.WriteString(`(SELECT instance_id, aggregate_type, aggregate_id, "sequence" FROM eventstore.events2 WHERE instance_id = `)
stmt.WriteArgs(intent.Instance())
stmt.WriteString(` AND aggregate_type = `)
stmt.WriteArgs(aggregate.Type())
stmt.WriteString(` AND aggregate_id = `)
stmt.WriteArgs(aggregate.ID())
stmt.WriteString(` AND owner = `)
stmt.WriteArgs(aggregate.Owner())
stmt.WriteString(` ORDER BY "sequence" DESC LIMIT 1)`)
}
stmt.WriteString(") SELECT e.instance_id, e.owner, e.aggregate_type, e.aggregate_id, e.sequence FROM eventstore.events2 e JOIN existing ON e.instance_id = existing.instance_id AND e.aggregate_type = existing.aggregate_type AND e.aggregate_id = existing.aggregate_id AND e.sequence = existing.sequence FOR UPDATE")
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return nil, err
}
res := makeIntents(intent)
err = database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
var sequence sql.Null[uint32]
agg := new(eventstore.Aggregate)
err := scan(
&agg.Instance,
&agg.Owner,
&agg.Type,
&agg.ID,
&sequence,
)
if err != nil {
return err
}
intentByAggregate(res, agg).sequence = sequence.V
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func push(ctx context.Context, tx *sql.Tx, reducer eventstore.Reducer, commands []*command) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
stmt.WriteString(`INSERT INTO eventstore.events2 (instance_id, "owner", aggregate_type, aggregate_id, revision, creator, event_type, payload, "sequence", in_tx_order, created_at, "position") VALUES `)
for i, cmd := range commands {
if i > 0 {
stmt.WriteString(", ")
}
cmd.Position.InPositionOrder = uint32(i)
stmt.WriteString(`(`)
stmt.WriteArgs(
cmd.Aggregate.Instance,
cmd.Aggregate.Owner,
cmd.Aggregate.Type,
cmd.Aggregate.ID,
cmd.Revision,
cmd.Creator,
cmd.Type,
cmd.Payload,
cmd.Sequence,
i,
)
stmt.WriteString(", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp())")
stmt.WriteString(`)`)
}
stmt.WriteString(` RETURNING created_at, "position"`)
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return err
}
var i int
return database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
defer func() { i++ }()
err := scan(
&commands[i].CreatedAt,
&commands[i].Position.Position,
)
if err != nil {
return err
}
return reducer.Reduce(commands[i].Event)
})
}
func uniqueConstraints(ctx context.Context, tx *sql.Tx, commands []*command) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
for _, cmd := range commands {
if len(cmd.uniqueConstraints) == 0 {
continue
}
for _, constraint := range cmd.uniqueConstraints {
stmt.Reset()
instance := cmd.Aggregate.Instance
if constraint.IsGlobal {
instance = ""
}
switch constraint.Action {
case eventstore.UniqueConstraintAdd:
stmt.WriteString(`INSERT INTO eventstore.unique_constraints (instance_id, unique_type, unique_field) VALUES (`)
stmt.WriteArgs(instance, constraint.UniqueType, constraint.UniqueField)
stmt.WriteRune(')')
case eventstore.UniqueConstraintInstanceRemove:
stmt.WriteString(`DELETE FROM eventstore.unique_constraints WHERE instance_id = `)
stmt.WriteArgs(instance)
case eventstore.UniqueConstraintRemove:
stmt.WriteString(`DELETE FROM eventstore.unique_constraints WHERE `)
stmt.WriteString(deleteUniqueConstraintClause)
stmt.AppendArgs(
instance,
constraint.UniqueType,
constraint.UniqueField,
)
}
_, err := tx.ExecContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
logging.WithFields("action", constraint.Action).Warn("handling of unique constraint failed")
errMessage := constraint.ErrorMessage
if errMessage == "" {
errMessage = "Errors.Internal"
}
return zerrors.ThrowAlreadyExists(err, "POSTG-QzjyP", errMessage)
}
}
}
return nil
}
// the query is so complex because we accidentally stored unique constraint case sensitive
// the query checks first if there is a case sensitive match and afterwards if there is a case insensitive match
var deleteUniqueConstraintClause = `
(instance_id = $1 AND unique_type = $2 AND unique_field = (
SELECT unique_field from (
SELECT instance_id, unique_type, unique_field
FROM eventstore.unique_constraints
WHERE instance_id = $1 AND unique_type = $2 AND unique_field = $3
UNION ALL
SELECT instance_id, unique_type, unique_field
FROM eventstore.unique_constraints
WHERE instance_id = $1 AND unique_type = $2 AND unique_field = LOWER($3)
) AS case_insensitive_constraints LIMIT 1)
)`

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,289 @@
package postgres
import (
"context"
"database/sql"
"slices"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
"github.com/zitadel/zitadel/internal/v2/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
func (s *Storage) Query(ctx context.Context, query *eventstore.Query) (eventCount int, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
writeQuery(&stmt, query)
if query.Tx() != nil {
return executeQuery(ctx, query.Tx(), &stmt, query)
}
return executeQuery(ctx, s.client.DB, &stmt, query)
}
func executeQuery(ctx context.Context, tx database.Querier, stmt *database.Statement, reducer eventstore.Reducer) (eventCount int, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return 0, err
}
err = database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
e := new(eventstore.Event[eventstore.StoragePayload])
var payload sql.Null[[]byte]
err := scan(
&e.CreatedAt,
&e.Type,
&e.Sequence,
&e.Position.Position,
&e.Position.InPositionOrder,
&payload,
&e.Creator,
&e.Aggregate.Owner,
&e.Aggregate.Instance,
&e.Aggregate.Type,
&e.Aggregate.ID,
&e.Revision,
)
if err != nil {
return err
}
e.Payload = unmarshalPayload(payload.V)
eventCount++
return reducer.Reduce(e)
})
return eventCount, err
}
var (
selectColumns = `SELECT created_at, event_type, "sequence", "position", in_tx_order, payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision`
// TODO: condition must know if it's args are named parameters or not
// instancePlaceholder = database.Placeholder("@instance_id")
)
func writeQuery(stmt *database.Statement, query *eventstore.Query) {
stmt.WriteString(selectColumns)
// stmt.SetNamedArg(instancePlaceholder, query.Instance())
stmt.WriteString(" FROM (")
writeFilters(stmt, query.Filters())
stmt.WriteRune(')')
writePagination(stmt, query.Pagination())
}
var from = " FROM eventstore.events2"
func writeFilters(stmt *database.Statement, filters []*eventstore.Filter) {
if len(filters) == 0 {
logging.Fatal("query does not contain filters")
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" UNION ALL ")
}
stmt.WriteRune('(')
stmt.WriteString(selectColumns)
stmt.WriteString(from)
writeFilter(stmt, filter)
stmt.WriteString(")")
}
}
func writeFilter(stmt *database.Statement, filter *eventstore.Filter) {
stmt.WriteString(" WHERE ")
filter.Parent().Instance().Write(stmt, "instance_id")
writeAggregateFilters(stmt, filter.AggregateFilters())
writePagination(stmt, filter.Pagination())
}
func writePagination(stmt *database.Statement, pagination *eventstore.Pagination) {
writePosition(stmt, pagination.Position())
writeOrdering(stmt, pagination.Desc())
if pagination.Pagination() != nil {
pagination.Pagination().Write(stmt)
}
}
func writePosition(stmt *database.Statement, position *eventstore.PositionCondition) {
if position == nil {
return
}
max := position.Max()
min := position.Min()
stmt.WriteString(" AND ")
if max != nil {
if max.InPositionOrder > 0 {
stmt.WriteString("((")
database.NewNumberEquals(max.Position).Write(stmt, "position")
stmt.WriteString(" AND ")
database.NewNumberLess(max.InPositionOrder).Write(stmt, "in_tx_order")
stmt.WriteRune(')')
stmt.WriteString(" OR ")
}
database.NewNumberLess(max.Position).Write(stmt, "position")
if max.InPositionOrder > 0 {
stmt.WriteRune(')')
}
}
if max != nil && min != nil {
stmt.WriteString(" AND ")
}
if min != nil {
if min.InPositionOrder > 0 {
stmt.WriteString("((")
database.NewNumberEquals(min.Position).Write(stmt, "position")
stmt.WriteString(" AND ")
database.NewNumberGreater(min.InPositionOrder).Write(stmt, "in_tx_order")
stmt.WriteRune(')')
stmt.WriteString(" OR ")
}
database.NewNumberGreater(min.Position).Write(stmt, "position")
if min.InPositionOrder > 0 {
stmt.WriteRune(')')
}
}
}
func writeAggregateFilters(stmt *database.Statement, filters []*eventstore.AggregateFilter) {
if len(filters) == 0 {
return
}
stmt.WriteString(" AND ")
if len(filters) > 1 {
stmt.WriteRune('(')
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" OR ")
}
writeAggregateFilter(stmt, filter)
}
if len(filters) > 1 {
stmt.WriteRune(')')
}
}
func writeAggregateFilter(stmt *database.Statement, filter *eventstore.AggregateFilter) {
conditions := definedConditions([]*condition{
{column: "aggregate_type", condition: filter.Type()},
{column: "aggregate_id", condition: filter.IDs()},
})
if len(conditions) > 1 || len(filter.Events()) > 0 {
stmt.WriteRune('(')
}
writeConditions(
stmt,
conditions,
" AND ",
)
writeEventFilters(stmt, filter.Events())
if len(conditions) > 1 || len(filter.Events()) > 0 {
stmt.WriteRune(')')
}
}
func writeEventFilters(stmt *database.Statement, filters []*eventstore.EventFilter) {
if len(filters) == 0 {
return
}
stmt.WriteString(" AND ")
if len(filters) > 1 {
stmt.WriteRune('(')
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" OR ")
}
writeEventFilter(stmt, filter)
}
if len(filters) > 1 {
stmt.WriteRune(')')
}
}
func writeEventFilter(stmt *database.Statement, filter *eventstore.EventFilter) {
conditions := definedConditions([]*condition{
{column: "event_type", condition: filter.Types()},
{column: "created_at", condition: filter.CreatedAt()},
{column: "sequence", condition: filter.Sequence()},
{column: "revision", condition: filter.Revision()},
{column: "creator", condition: filter.Creators()},
})
if len(conditions) > 1 {
stmt.WriteRune('(')
}
writeConditions(
stmt,
conditions,
" AND ",
)
if len(conditions) > 1 {
stmt.WriteRune(')')
}
}
type condition struct {
column string
condition database.Condition
}
func writeConditions(stmt *database.Statement, conditions []*condition, sep string) {
var i int
for _, cond := range conditions {
if i > 0 {
stmt.WriteString(sep)
}
cond.condition.Write(stmt, cond.column)
i++
}
}
func definedConditions(conditions []*condition) []*condition {
return slices.DeleteFunc(conditions, func(cond *condition) bool {
return cond.condition == nil
})
}
func writeOrdering(stmt *database.Statement, descending bool) {
stmt.WriteString(" ORDER BY position")
if descending {
stmt.WriteString(" DESC")
}
stmt.WriteString(", in_tx_order")
if descending {
stmt.WriteString(" DESC")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
package postgres
import (
"context"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
var (
_ eventstore.Pusher = (*Storage)(nil)
_ eventstore.Querier = (*Storage)(nil)
)
type Storage struct {
client *database.DB
}
func New(client *database.DB) *Storage {
return &Storage{
client: client,
}
}
// Health implements eventstore.Pusher.
func (s *Storage) Health(ctx context.Context) error {
return s.client.PingContext(ctx)
}