chore: move the go code into a subfolder

This commit is contained in:
Florian Forster
2025-08-05 15:20:32 -07:00
parent 4ad22ba456
commit cd2921de26
2978 changed files with 373 additions and 300 deletions

View File

@@ -0,0 +1,64 @@
package postgres
import (
"encoding/json"
"reflect"
"time"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
func intentToCommands(intent *intent) (commands []*command, err error) {
commands = make([]*command, len(intent.Commands()))
for i, cmd := range intent.Commands() {
payload, err := marshalPayload(cmd.Payload)
if err != nil {
return nil, zerrors.ThrowInternal(err, "POSTG-MInPK", "Errors.Internal")
}
commands[i] = &command{
Command: cmd,
intent: intent,
sequence: intent.nextSequence(),
payload: payload,
}
}
return commands, nil
}
func marshalPayload(payload any) ([]byte, error) {
if payload == nil || reflect.ValueOf(payload).IsZero() {
return nil, nil
}
return json.Marshal(payload)
}
type command struct {
*eventstore.Command
intent *intent
payload []byte
position eventstore.GlobalPosition
createdAt time.Time
sequence uint32
}
func (cmd *command) toEvent() *eventstore.StorageEvent {
return &eventstore.StorageEvent{
Action: eventstore.Action[eventstore.Unmarshal]{
Creator: cmd.Creator,
Type: cmd.Type,
Revision: cmd.Revision,
Payload: func(ptr any) error {
return json.Unmarshal(cmd.payload, ptr)
},
},
Aggregate: *cmd.intent.Aggregate(),
Sequence: cmd.intent.sequence,
Position: cmd.position,
CreatedAt: cmd.createdAt,
}
}

View File

@@ -0,0 +1,47 @@
package postgres
import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
type intent struct {
*eventstore.PushAggregate
sequence uint32
}
func (i *intent) nextSequence() uint32 {
i.sequence++
return i.sequence
}
func makeIntents(pushIntent *eventstore.PushIntent) []*intent {
res := make([]*intent, len(pushIntent.Aggregates()))
for i, aggregate := range pushIntent.Aggregates() {
res[i] = &intent{PushAggregate: aggregate}
}
return res
}
func intentByAggregate(intents []*intent, aggregate *eventstore.Aggregate) *intent {
for _, intent := range intents {
if intent.PushAggregate.Aggregate().Equals(aggregate) {
return intent
}
}
logging.WithFields("instance", aggregate.Instance, "owner", aggregate.Owner, "type", aggregate.Type, "id", aggregate.ID).Panic("no intent found")
return nil
}
func checkSequences(intents []*intent) bool {
for _, intent := range intents {
if !eventstore.CheckSequence(intent.sequence, intent.PushAggregate.CurrentSequence()) {
return false
}
}
return true
}

View File

@@ -0,0 +1,122 @@
package postgres
import (
"testing"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
func Test_checkSequences(t *testing.T) {
type args struct {
intents []*intent
}
tests := []struct {
name string
args args
want bool
}{
{
name: "ignore",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.IgnoreCurrentSequence(),
),
},
},
},
want: true,
},
{
name: "ignores",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.IgnoreCurrentSequence(),
),
},
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
),
},
},
},
want: true,
},
{
name: "matches",
args: args{
intents: []*intent{
{
sequence: 0,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceMatches(0),
),
},
},
},
want: true,
},
{
name: "does not match",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceMatches(2),
),
},
},
},
want: false,
},
{
name: "at least",
args: args{
intents: []*intent{
{
sequence: 10,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceAtLeast(0),
),
},
},
},
want: true,
},
{
name: "at least too low",
args: args{
intents: []*intent{
{
sequence: 1,
PushAggregate: eventstore.NewPushAggregate(
"", "", "",
eventstore.CurrentSequenceAtLeast(2),
),
},
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := checkSequences(tt.args.intents); got != tt.want {
t.Errorf("checkSequences() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -0,0 +1,262 @@
package postgres
import (
"context"
"database/sql"
"fmt"
"github.com/cockroachdb/cockroach-go/v2/crdb"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
"github.com/zitadel/zitadel/internal/v2/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
"github.com/zitadel/zitadel/internal/zerrors"
)
// Push implements eventstore.Pusher.
func (s *Storage) Push(ctx context.Context, intent *eventstore.PushIntent) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
tx := intent.Tx()
if tx == nil {
tx, err = s.client.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false})
if err != nil {
return err
}
defer func() {
err = database.CloseTx(tx, err)
}()
}
var retryCount uint32
return crdb.Execute(func() (err error) {
defer func() {
if err == nil {
return
}
if retryCount < s.config.MaxRetries {
retryCount++
return
}
logging.WithFields("retry_count", retryCount).WithError(err).Debug("max retry count reached")
err = zerrors.ThrowInternal(err, "POSTG-VJfJz", "Errors.Internal")
}()
// allows smaller wait times on query side for instances which are not actively writing
if err := setAppName(ctx, tx, "es_pusher_"+intent.Instance()); err != nil {
return err
}
intents, err := lockAggregates(ctx, tx, intent)
if err != nil {
return err
}
if !checkSequences(intents) {
return zerrors.ThrowInvalidArgument(nil, "POSTG-KOM6E", "Errors.Internal.Eventstore.SequenceNotMatched")
}
commands := make([]*command, 0, len(intents))
for _, intent := range intents {
additionalCommands, err := intentToCommands(intent)
if err != nil {
return err
}
commands = append(commands, additionalCommands...)
}
err = uniqueConstraints(ctx, tx, commands)
if err != nil {
return err
}
return s.push(ctx, tx, intent, commands)
})
}
// setAppName for the the current transaction
func setAppName(ctx context.Context, tx *sql.Tx, name string) error {
_, err := tx.ExecContext(ctx, fmt.Sprintf("SET LOCAL application_name TO '%s'", name))
if err != nil {
logging.WithFields("name", name).WithError(err).Debug("setting app name failed")
return zerrors.ThrowInternal(err, "POSTG-G3OmZ", "Errors.Internal")
}
return nil
}
func lockAggregates(ctx context.Context, tx *sql.Tx, intent *eventstore.PushIntent) (_ []*intent, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
stmt.WriteString("WITH existing AS (")
for i, aggregate := range intent.Aggregates() {
if i > 0 {
stmt.WriteString(" UNION ALL ")
}
stmt.WriteString(`(SELECT instance_id, aggregate_type, aggregate_id, "sequence" FROM eventstore.events2 WHERE instance_id = `)
stmt.WriteArgs(intent.Instance())
stmt.WriteString(` AND aggregate_type = `)
stmt.WriteArgs(aggregate.Type())
stmt.WriteString(` AND aggregate_id = `)
stmt.WriteArgs(aggregate.ID())
stmt.WriteString(` AND owner = `)
stmt.WriteArgs(aggregate.Owner())
stmt.WriteString(` ORDER BY "sequence" DESC LIMIT 1)`)
}
stmt.WriteString(") SELECT e.instance_id, e.owner, e.aggregate_type, e.aggregate_id, e.sequence FROM eventstore.events2 e JOIN existing ON e.instance_id = existing.instance_id AND e.aggregate_type = existing.aggregate_type AND e.aggregate_id = existing.aggregate_id AND e.sequence = existing.sequence FOR UPDATE")
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return nil, err
}
res := makeIntents(intent)
err = database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
var sequence sql.Null[uint32]
agg := new(eventstore.Aggregate)
err := scan(
&agg.Instance,
&agg.Owner,
&agg.Type,
&agg.ID,
&sequence,
)
if err != nil {
return err
}
intentByAggregate(res, agg).sequence = sequence.V
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
func (s *Storage) push(ctx context.Context, tx *sql.Tx, reducer eventstore.Reducer, commands []*command) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
stmt.WriteString(`INSERT INTO eventstore.events2 (instance_id, "owner", aggregate_type, aggregate_id, revision, creator, event_type, payload, "sequence", in_tx_order, created_at, "position") VALUES `)
for i, cmd := range commands {
if i > 0 {
stmt.WriteString(", ")
}
cmd.position.InPositionOrder = uint32(i)
stmt.WriteString(`(`)
stmt.WriteArgs(
cmd.intent.Aggregate().Instance,
cmd.intent.Aggregate().Owner,
cmd.intent.Aggregate().Type,
cmd.intent.Aggregate().ID,
cmd.Revision,
cmd.Creator,
cmd.Type,
cmd.payload,
cmd.sequence,
cmd.position.InPositionOrder,
)
stmt.WriteString(", statement_timestamp(), EXTRACT(EPOCH FROM clock_timestamp()))")
}
stmt.WriteString(` RETURNING created_at, "position"`)
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return err
}
var i int
return database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
defer func() { i++ }()
err := scan(
&commands[i].createdAt,
&commands[i].position.Position,
)
if err != nil {
return err
}
return reducer.Reduce(commands[i].toEvent())
})
}
func uniqueConstraints(ctx context.Context, tx *sql.Tx, commands []*command) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
for _, cmd := range commands {
if len(cmd.UniqueConstraints) == 0 {
continue
}
for _, constraint := range cmd.UniqueConstraints {
stmt.Reset()
instance := cmd.intent.PushAggregate.Aggregate().Instance
if constraint.IsGlobal {
instance = ""
}
switch constraint.Action {
case eventstore.UniqueConstraintAdd:
stmt.WriteString(`INSERT INTO eventstore.unique_constraints (instance_id, unique_type, unique_field) VALUES (`)
stmt.WriteArgs(instance, constraint.UniqueType, constraint.UniqueField)
stmt.WriteRune(')')
case eventstore.UniqueConstraintInstanceRemove:
stmt.WriteString(`DELETE FROM eventstore.unique_constraints WHERE instance_id = `)
stmt.WriteArgs(instance)
case eventstore.UniqueConstraintRemove:
stmt.WriteString(`DELETE FROM eventstore.unique_constraints WHERE `)
stmt.WriteString(deleteUniqueConstraintClause)
stmt.AppendArgs(
instance,
constraint.UniqueType,
constraint.UniqueField,
)
}
_, err := tx.ExecContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
logging.WithFields("action", constraint.Action).Warn("handling of unique constraint failed")
errMessage := constraint.ErrorMessage
if errMessage == "" {
errMessage = "Errors.Internal"
}
return zerrors.ThrowAlreadyExists(err, "POSTG-QzjyP", errMessage)
}
}
}
return nil
}
// the query is so complex because we accidentally stored unique constraint case sensitive
// the query checks first if there is a case sensitive match and afterwards if there is a case insensitive match
var deleteUniqueConstraintClause = `
(instance_id = $1 AND unique_type = $2 AND unique_field = (
SELECT unique_field from (
SELECT instance_id, unique_type, unique_field
FROM eventstore.unique_constraints
WHERE instance_id = $1 AND unique_type = $2 AND unique_field = $3
UNION ALL
SELECT instance_id, unique_type, unique_field
FROM eventstore.unique_constraints
WHERE instance_id = $1 AND unique_type = $2 AND unique_field = LOWER($3)
) AS case_insensitive_constraints LIMIT 1)
)`

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
package postgres
import (
"context"
"database/sql"
"encoding/json"
"slices"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
"github.com/zitadel/zitadel/internal/v2/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
func (s *Storage) Query(ctx context.Context, query *eventstore.Query) (eventCount int, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
var stmt database.Statement
writeQuery(&stmt, query)
if query.Tx() != nil {
return executeQuery(ctx, query.Tx(), &stmt, query)
}
return executeQuery(ctx, s.client.DB, &stmt, query)
}
func executeQuery(ctx context.Context, tx database.Querier, stmt *database.Statement, reducer eventstore.Reducer) (eventCount int, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
//nolint:rowserrcheck
// rows is checked by database.MapRowsToObject
rows, err := tx.QueryContext(ctx, stmt.String(), stmt.Args()...)
if err != nil {
return 0, err
}
err = database.MapRowsToObject(rows, func(scan func(dest ...any) error) error {
e := new(eventstore.StorageEvent)
var payload sql.Null[[]byte]
err := scan(
&e.CreatedAt,
&e.Type,
&e.Sequence,
&e.Position.Position,
&e.Position.InPositionOrder,
&payload,
&e.Creator,
&e.Aggregate.Owner,
&e.Aggregate.Instance,
&e.Aggregate.Type,
&e.Aggregate.ID,
&e.Revision,
)
if err != nil {
return err
}
e.Payload = func(ptr any) error {
if len(payload.V) == 0 {
return nil
}
return json.Unmarshal(payload.V, ptr)
}
eventCount++
return reducer.Reduce(e)
})
return eventCount, err
}
var (
selectColumns = `SELECT created_at, event_type, "sequence", "position", in_tx_order, payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision`
// TODO: condition must know if it's args are named parameters or not
// instancePlaceholder = database.Placeholder("@instance_id")
)
func writeQuery(stmt *database.Statement, query *eventstore.Query) {
stmt.WriteString(selectColumns)
// stmt.SetNamedArg(instancePlaceholder, query.Instance())
stmt.WriteString(" FROM (")
writeFilters(stmt, query.Filters())
stmt.WriteString(") sub")
writePagination(stmt, query.Pagination())
}
var from = " FROM eventstore.events2"
func writeFilters(stmt *database.Statement, filters []*eventstore.Filter) {
if len(filters) == 0 {
logging.Fatal("query does not contain filters")
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" UNION ALL ")
}
stmt.WriteRune('(')
stmt.WriteString(selectColumns)
stmt.WriteString(from)
writeFilter(stmt, filter)
stmt.WriteString(")")
}
}
func writeFilter(stmt *database.Statement, filter *eventstore.Filter) {
stmt.WriteString(" WHERE ")
filter.Parent().Instance().Write(stmt, "instance_id")
writeAggregateFilters(stmt, filter.AggregateFilters())
writePagination(stmt, filter.Pagination())
}
func writePagination(stmt *database.Statement, pagination *eventstore.Pagination) {
writePosition(stmt, pagination.Position())
writeOrdering(stmt, pagination.Desc())
if pagination.Pagination() != nil {
pagination.Pagination().Write(stmt)
}
}
func writePosition(stmt *database.Statement, position *eventstore.PositionCondition) {
if position == nil {
return
}
max := position.Max()
min := position.Min()
stmt.WriteString(" AND ")
if max != nil {
if max.InPositionOrder > 0 {
stmt.WriteString("((")
database.NewNumberEquals(max.Position).Write(stmt, "position")
stmt.WriteString(" AND ")
database.NewNumberLess(max.InPositionOrder).Write(stmt, "in_tx_order")
stmt.WriteRune(')')
stmt.WriteString(" OR ")
}
database.NewNumberLess(max.Position).Write(stmt, "position")
if max.InPositionOrder > 0 {
stmt.WriteRune(')')
}
}
if max != nil && min != nil {
stmt.WriteString(" AND ")
}
if min != nil {
if min.InPositionOrder > 0 {
stmt.WriteString("((")
database.NewNumberEquals(min.Position).Write(stmt, "position")
stmt.WriteString(" AND ")
database.NewNumberGreater(min.InPositionOrder).Write(stmt, "in_tx_order")
stmt.WriteRune(')')
stmt.WriteString(" OR ")
}
database.NewNumberGreater(min.Position).Write(stmt, "position")
if min.InPositionOrder > 0 {
stmt.WriteRune(')')
}
}
}
func writeAggregateFilters(stmt *database.Statement, filters []*eventstore.AggregateFilter) {
if len(filters) == 0 {
return
}
stmt.WriteString(" AND ")
if len(filters) > 1 {
stmt.WriteRune('(')
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" OR ")
}
writeAggregateFilter(stmt, filter)
}
if len(filters) > 1 {
stmt.WriteRune(')')
}
}
func writeAggregateFilter(stmt *database.Statement, filter *eventstore.AggregateFilter) {
conditions := definedConditions([]*condition{
{column: "owner", condition: filter.Owners()},
{column: "aggregate_type", condition: filter.Type()},
{column: "aggregate_id", condition: filter.IDs()},
})
if len(conditions) > 1 || len(filter.Events()) > 0 {
stmt.WriteRune('(')
}
writeConditions(
stmt,
conditions,
" AND ",
)
writeEventFilters(stmt, filter.Events())
if len(conditions) > 1 || len(filter.Events()) > 0 {
stmt.WriteRune(')')
}
}
func writeEventFilters(stmt *database.Statement, filters []*eventstore.EventFilter) {
if len(filters) == 0 {
return
}
stmt.WriteString(" AND ")
if len(filters) > 1 {
stmt.WriteRune('(')
}
for i, filter := range filters {
if i > 0 {
stmt.WriteString(" OR ")
}
writeEventFilter(stmt, filter)
}
if len(filters) > 1 {
stmt.WriteRune(')')
}
}
func writeEventFilter(stmt *database.Statement, filter *eventstore.EventFilter) {
conditions := definedConditions([]*condition{
{column: "event_type", condition: filter.Types()},
{column: "created_at", condition: filter.CreatedAt()},
{column: "sequence", condition: filter.Sequence()},
{column: "revision", condition: filter.Revision()},
{column: "creator", condition: filter.Creators()},
})
if len(conditions) > 1 {
stmt.WriteRune('(')
}
writeConditions(
stmt,
conditions,
" AND ",
)
if len(conditions) > 1 {
stmt.WriteRune(')')
}
}
type condition struct {
column string
condition database.Condition
}
func writeConditions(stmt *database.Statement, conditions []*condition, sep string) {
var i int
for _, cond := range conditions {
if i > 0 {
stmt.WriteString(sep)
}
cond.condition.Write(stmt, cond.column)
i++
}
}
func definedConditions(conditions []*condition) []*condition {
return slices.DeleteFunc(conditions, func(cond *condition) bool {
return cond.condition == nil
})
}
func writeOrdering(stmt *database.Statement, descending bool) {
stmt.WriteString(" ORDER BY position")
if descending {
stmt.WriteString(" DESC")
}
stmt.WriteString(", in_tx_order")
if descending {
stmt.WriteString(" DESC")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,34 @@
package postgres
import (
"context"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/v2/eventstore"
)
var (
_ eventstore.Pusher = (*Storage)(nil)
_ eventstore.Querier = (*Storage)(nil)
)
type Storage struct {
client *database.DB
config *Config
}
type Config struct {
MaxRetries uint32
}
func New(client *database.DB, config *Config) *Storage {
return &Storage{
client: client,
config: config,
}
}
// Health implements eventstore.Pusher.
func (s *Storage) Health(ctx context.Context) error {
return s.client.PingContext(ctx)
}