mirror of
https://github.com/zitadel/zitadel.git
synced 2025-04-23 06:51:32 +00:00

# Which Problems Are Solved [A recent performance enhancement]((https://github.com/zitadel/zitadel/pull/9497)) aimed at optimizing event store queries, specifically those involving multiple aggregate type filters, has successfully improved index utilization. While the query planner now correctly selects relevant indexes, it employs [bitmap index scans](https://www.postgresql.org/docs/current/indexes-bitmap-scans.html) to retrieve data. This approach, while beneficial in many scenarios, introduces a potential I/O bottleneck. The bitmap index scan first identifies the required database blocks and then utilizes a bitmap to access the corresponding rows from the table's heap. This subsequent "bitmap heap scan" can result in significant I/O overhead, particularly when queries return a substantial number of rows across numerous data pages. ## Impact: Under heavy load or with queries filtering for a wide range of events across multiple aggregate types, this increased I/O activity may lead to: - Increased query latency. - Elevated disk utilization. - Potential performance degradation of the event store and dependent systems. # How the Problems Are Solved To address this I/O bottleneck and further optimize query performance, the projection handler has been modified. Instead of employing multiple OR clauses for each aggregate type, the aggregate and event type filters are now combined using IN ARRAY filters. Technical Details: This change allows the PostgreSQL query planner to leverage [index-only scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html). By utilizing IN ARRAY filters, the database can efficiently retrieve the necessary data directly from the index, eliminating the need to access the table's heap. This results in: * Reduced I/O: Index-only scans significantly minimize disk I/O operations, as the database avoids reading data pages from the main table. * Improved Query Performance: By reducing I/O, query execution times are substantially improved, leading to lower latency. # Additional Changes - rollback of https://github.com/zitadel/zitadel/pull/9497 # Additional Information ## Query Plan of previous query ```sql SELECT created_at, event_type, "sequence", "position", payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision FROM eventstore.events2 WHERE instance_id = '<INSTANCE_ID>' AND ( ( instance_id = '<INSTANCE_ID>' AND "position" > <POSITION> AND aggregate_type = 'project' AND event_type = ANY(ARRAY[ 'project.application.added' ,'project.application.changed' ,'project.application.deactivated' ,'project.application.reactivated' ,'project.application.removed' ,'project.removed' ,'project.application.config.api.added' ,'project.application.config.api.changed' ,'project.application.config.api.secret.changed' ,'project.application.config.api.secret.updated' ,'project.application.config.oidc.added' ,'project.application.config.oidc.changed' ,'project.application.config.oidc.secret.changed' ,'project.application.config.oidc.secret.updated' ,'project.application.config.saml.added' ,'project.application.config.saml.changed' ]) ) OR ( instance_id = '<INSTANCE_ID>' AND "position" > <POSITION> AND aggregate_type = 'org' AND event_type = 'org.removed' ) OR ( instance_id = '<INSTANCE_ID>' AND "position" > <POSITION> AND aggregate_type = 'instance' AND event_type = 'instance.removed' ) ) AND "position" > 1741600905.3495 AND "position" < ( SELECT COALESCE(EXTRACT(EPOCH FROM min(xact_start)), EXTRACT(EPOCH FROM now())) FROM pg_stat_activity WHERE datname = current_database() AND application_name = ANY(ARRAY['zitadel_es_pusher_', 'zitadel_es_pusher', 'zitadel_es_pusher_<INSTANCE_ID>']) AND state <> 'idle' ) ORDER BY "position", in_tx_order LIMIT 200 OFFSET 1; ``` ``` Limit (cost=120.08..120.09 rows=7 width=361) (actual time=2.167..2.172 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order InitPlan 1 -> Aggregate (cost=2.74..2.76 rows=1 width=32) (actual time=1.813..1.815 rows=1 loops=1) Output: COALESCE(EXTRACT(epoch FROM min(s.xact_start)), EXTRACT(epoch FROM now())) -> Nested Loop (cost=0.00..2.74 rows=1 width=8) (actual time=1.803..1.805 rows=0 loops=1) Output: s.xact_start Join Filter: (d.oid = s.datid) -> Seq Scan on pg_catalog.pg_database d (cost=0.00..1.07 rows=1 width=4) (actual time=0.016..0.021 rows=1 loops=1) Output: d.oid, d.datname, d.datdba, d.encoding, d.datlocprovider, d.datistemplate, d.datallowconn, d.dathasloginevt, d.datconnlimit, d.datfrozenxid, d.datminmxid, d.dattablespace, d.datcollate, d.datctype, d.datlocale, d.daticurules, d.datcollversion, d.datacl Filter: (d.datname = current_database()) Rows Removed by Filter: 4 -> Function Scan on pg_catalog.pg_stat_get_activity s (cost=0.00..1.63 rows=3 width=16) (actual time=1.781..1.781 rows=0 loops=1) Output: s.datid, s.pid, s.usesysid, s.application_name, s.state, s.query, s.wait_event_type, s.wait_event, s.xact_start, s.query_start, s.backend_start, s.state_change, s.client_addr, s.client_hostname, s.client_port, s.backend_xid, s.backend_xmin, s.backend_type, s.ssl, s.sslversion, s.sslcipher, s.sslbits, s.ssl_client_dn, s.ssl_client_serial, s.ssl_issuer_dn, s.gss_auth, s.gss_princ, s.gss_enc, s.gss_delegation, s.leader_pid, s.query_id Function Call: pg_stat_get_activity(NULL::integer) Filter: ((s.state <> 'idle'::text) AND (s.application_name = ANY ('{zitadel_es_pusher_,zitadel_es_pusher,zitadel_es_pusher_<INSTANCE_ID>}'::text[]))) Rows Removed by Filter: 49 -> Sort (cost=117.31..117.33 rows=8 width=361) (actual time=2.167..2.168 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order Sort Key: events2."position", events2.in_tx_order Sort Method: quicksort Memory: 25kB -> Bitmap Heap Scan on eventstore.events2 (cost=84.92..117.19 rows=8 width=361) (actual time=2.088..2.089 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order Recheck Cond: (((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'project'::text) AND (events2.event_type = ANY ('{project.application.added,project.application.changed,project.application.deactivated,project.application.reactivated,project.application.removed,project.removed,project.application.config.api.added,project.application.config.api.changed,project.application.config.api.secret.changed,project.application.config.api.secret.updated,project.application.config.oidc.added,project.application.config.oidc.changed,project.application.config.oidc.secret.changed,project.application.config.oidc.secret.updated,project.application.config.saml.added,project.application.config.saml.changed}'::text[])) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1)) OR ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'org'::text) AND (events2.event_type = 'org.removed'::text) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1)) OR ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'instance'::text) AND (events2.event_type = 'instance.removed'::text) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1))) -> BitmapOr (cost=84.88..84.88 rows=8 width=0) (actual time=2.080..2.081 rows=0 loops=1) -> Bitmap Index Scan on es_projection (cost=0.00..75.44 rows=8 width=0) (actual time=2.016..2.017 rows=0 loops=1) Index Cond: ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'project'::text) AND (events2.event_type = ANY ('{project.application.added,project.application.changed,project.application.deactivated,project.application.reactivated,project.application.removed,project.removed,project.application.config.api.added,project.application.config.api.changed,project.application.config.api.secret.changed,project.application.config.api.secret.updated,project.application.config.oidc.added,project.application.config.oidc.changed,project.application.config.oidc.secret.changed,project.application.config.oidc.secret.updated,project.application.config.saml.added,project.application.config.saml.changed}'::text[])) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1)) -> Bitmap Index Scan on es_projection (cost=0.00..4.71 rows=1 width=0) (actual time=0.016..0.016 rows=0 loops=1) Index Cond: ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'org'::text) AND (events2.event_type = 'org.removed'::text) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1)) -> Bitmap Index Scan on es_projection (cost=0.00..4.71 rows=1 width=0) (actual time=0.045..0.045 rows=0 loops=1) Index Cond: ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = 'instance'::text) AND (events2.event_type = 'instance.removed'::text) AND (events2."position" > <POSITION>) AND (events2."position" > 1741600905.3495) AND (events2."position" < (InitPlan 1).col1)) Query Identifier: 3194938266011254479 Planning Time: 1.295 ms Execution Time: 2.832 ms ``` ## Query Plan of new query ```sql SELECT created_at, event_type, "sequence", "position", payload, creator, "owner", instance_id, aggregate_type, aggregate_id, revision FROM eventstore.events2 WHERE instance_id = '<INSTANCE_ID>' AND "position" > <POSITION> AND aggregate_type = ANY(ARRAY['project', 'instance', 'org']) AND event_type = ANY(ARRAY[ 'project.application.added' ,'project.application.changed' ,'project.application.deactivated' ,'project.application.reactivated' ,'project.application.removed' ,'project.removed' ,'project.application.config.api.added' ,'project.application.config.api.changed' ,'project.application.config.api.secret.changed' ,'project.application.config.api.secret.updated' ,'project.application.config.oidc.added' ,'project.application.config.oidc.changed' ,'project.application.config.oidc.secret.changed' ,'project.application.config.oidc.secret.updated' ,'project.application.config.saml.added' ,'project.application.config.saml.changed' ,'org.removed' ,'instance.removed' ]) AND "position" < ( SELECT COALESCE(EXTRACT(EPOCH FROM min(xact_start)), EXTRACT(EPOCH FROM now())) FROM pg_stat_activity WHERE datname = current_database() AND application_name = ANY(ARRAY['zitadel_es_pusher_', 'zitadel_es_pusher', 'zitadel_es_pusher_<INSTANCE_ID>']) AND state <> 'idle' ) ORDER BY "position", in_tx_order LIMIT 200 OFFSET 1; ``` ``` Limit (cost=293.34..293.36 rows=8 width=361) (actual time=4.686..4.689 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order InitPlan 1 -> Aggregate (cost=2.74..2.76 rows=1 width=32) (actual time=1.717..1.719 rows=1 loops=1) Output: COALESCE(EXTRACT(epoch FROM min(s.xact_start)), EXTRACT(epoch FROM now())) -> Nested Loop (cost=0.00..2.74 rows=1 width=8) (actual time=1.658..1.659 rows=0 loops=1) Output: s.xact_start Join Filter: (d.oid = s.datid) -> Seq Scan on pg_catalog.pg_database d (cost=0.00..1.07 rows=1 width=4) (actual time=0.026..0.028 rows=1 loops=1) Output: d.oid, d.datname, d.datdba, d.encoding, d.datlocprovider, d.datistemplate, d.datallowconn, d.dathasloginevt, d.datconnlimit, d.datfrozenxid, d.datminmxid, d.dattablespace, d.datcollate, d.datctype, d.datlocale, d.daticurules, d.datcollversion, d.datacl Filter: (d.datname = current_database()) Rows Removed by Filter: 4 -> Function Scan on pg_catalog.pg_stat_get_activity s (cost=0.00..1.63 rows=3 width=16) (actual time=1.628..1.628 rows=0 loops=1) Output: s.datid, s.pid, s.usesysid, s.application_name, s.state, s.query, s.wait_event_type, s.wait_event, s.xact_start, s.query_start, s.backend_start, s.state_change, s.client_addr, s.client_hostname, s.client_port, s.backend_xid, s.backend_xmin, s.backend_type, s.ssl, s.sslversion, s.sslcipher, s.sslbits, s.ssl_client_dn, s.ssl_client_serial, s.ssl_issuer_dn, s.gss_auth, s.gss_princ, s.gss_enc, s.gss_delegation, s.leader_pid, s.query_id Function Call: pg_stat_get_activity(NULL::integer) Filter: ((s.state <> 'idle'::text) AND (s.application_name = ANY ('{zitadel_es_pusher_,zitadel_es_pusher,zitadel_es_pusher_<INSTANCE_ID>}'::text[]))) Rows Removed by Filter: 42 -> Sort (cost=290.58..290.60 rows=9 width=361) (actual time=4.685..4.685 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order Sort Key: events2."position", events2.in_tx_order Sort Method: quicksort Memory: 25kB -> Index Scan using es_projection on eventstore.events2 (cost=0.70..290.43 rows=9 width=361) (actual time=4.616..4.617 rows=0 loops=1) Output: events2.created_at, events2.event_type, events2.sequence, events2."position", events2.payload, events2.creator, events2.owner, events2.instance_id, events2.aggregate_type, events2.aggregate_id, events2.revision, events2.in_tx_order Index Cond: ((events2.instance_id = '<INSTANCE_ID>'::text) AND (events2.aggregate_type = ANY ('{project,instance,org}'::text[])) AND (events2.event_type = ANY ('{project.application.added,project.application.changed,project.application.deactivated,project.application.reactivated,project.application.removed,project.removed,project.application.config.api.added,project.application.config.api.changed,project.application.config.api.secret.changed,project.application.config.api.secret.updated,project.application.config.oidc.added,project.application.config.oidc.changed,project.application.config.oidc.secret.changed,project.application.config.oidc.secret.updated,project.application.config.saml.added,project.application.config.saml.changed,org.removed,instance.removed}'::text[])) AND (events2."position" > <POSITION>) AND (events2."position" < (InitPlan 1).col1)) Query Identifier: -8254550537132386499 Planning Time: 2.864 ms Execution Time: 5.414 ms ```
456 lines
14 KiB
Go
456 lines
14 KiB
Go
package sql
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"errors"
|
|
"regexp"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/cockroachdb/cockroach-go/v2/crdb"
|
|
"github.com/jackc/pgx/v5/pgconn"
|
|
"github.com/zitadel/logging"
|
|
|
|
"github.com/zitadel/zitadel/internal/api/authz"
|
|
"github.com/zitadel/zitadel/internal/database"
|
|
"github.com/zitadel/zitadel/internal/eventstore"
|
|
"github.com/zitadel/zitadel/internal/eventstore/repository"
|
|
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
|
"github.com/zitadel/zitadel/internal/zerrors"
|
|
)
|
|
|
|
const (
|
|
//as soon as stored procedures are possible in crdb
|
|
// we could move the code to migrations and call the procedure
|
|
// traking issue: https://github.com/cockroachdb/cockroach/issues/17511
|
|
//
|
|
//previous_data selects the needed data of the latest event of the aggregate
|
|
// and buffers it (crdb inmemory)
|
|
crdbInsert = "WITH previous_data (aggregate_type_sequence, aggregate_sequence, resource_owner) AS (" +
|
|
"SELECT agg_type.seq, agg.seq, agg.ro FROM " +
|
|
"(" +
|
|
//max sequence of requested aggregate type
|
|
" SELECT MAX(event_sequence) seq, 1 join_me" +
|
|
" FROM eventstore.events" +
|
|
" WHERE aggregate_type = $2" +
|
|
" AND (CASE WHEN $9::TEXT IS NULL THEN instance_id is null else instance_id = $9::TEXT END)" +
|
|
") AS agg_type " +
|
|
// combined with
|
|
"LEFT JOIN " +
|
|
"(" +
|
|
// max sequence and resource owner of aggregate root
|
|
" SELECT event_sequence seq, resource_owner ro, 1 join_me" +
|
|
" FROM eventstore.events" +
|
|
" WHERE aggregate_type = $2 AND aggregate_id = $3" +
|
|
" AND (CASE WHEN $9::TEXT IS NULL THEN instance_id is null else instance_id = $9::TEXT END)" +
|
|
" ORDER BY event_sequence DESC" +
|
|
" LIMIT 1" +
|
|
") AS agg USING(join_me)" +
|
|
") " +
|
|
"INSERT INTO eventstore.events (" +
|
|
" event_type," +
|
|
" aggregate_type," +
|
|
" aggregate_id," +
|
|
" aggregate_version," +
|
|
" creation_date," +
|
|
" position," +
|
|
" event_data," +
|
|
" editor_user," +
|
|
" editor_service," +
|
|
" resource_owner," +
|
|
" instance_id," +
|
|
" event_sequence," +
|
|
" previous_aggregate_sequence," +
|
|
" previous_aggregate_type_sequence," +
|
|
" in_tx_order" +
|
|
") " +
|
|
// defines the data to be inserted
|
|
"SELECT" +
|
|
" $1::VARCHAR AS event_type," +
|
|
" $2::VARCHAR AS aggregate_type," +
|
|
" $3::VARCHAR AS aggregate_id," +
|
|
" $4::VARCHAR AS aggregate_version," +
|
|
" hlc_to_timestamp(cluster_logical_timestamp()) AS creation_date," +
|
|
" cluster_logical_timestamp() AS position," +
|
|
" $5::JSONB AS event_data," +
|
|
" $6::VARCHAR AS editor_user," +
|
|
" $7::VARCHAR AS editor_service," +
|
|
" COALESCE((resource_owner), $8::VARCHAR) AS resource_owner," +
|
|
" $9::VARCHAR AS instance_id," +
|
|
" COALESCE(aggregate_sequence, 0)+1," +
|
|
" aggregate_sequence AS previous_aggregate_sequence," +
|
|
" aggregate_type_sequence AS previous_aggregate_type_sequence," +
|
|
" $10 AS in_tx_order " +
|
|
"FROM previous_data " +
|
|
"RETURNING id, event_sequence, creation_date, resource_owner, instance_id"
|
|
|
|
uniqueInsert = `INSERT INTO eventstore.unique_constraints
|
|
(
|
|
unique_type,
|
|
unique_field,
|
|
instance_id
|
|
)
|
|
VALUES (
|
|
$1,
|
|
$2,
|
|
$3
|
|
)`
|
|
|
|
uniqueDelete = `DELETE FROM eventstore.unique_constraints
|
|
WHERE unique_type = $1 and unique_field = $2 and instance_id = $3`
|
|
uniqueDeleteInstance = `DELETE FROM eventstore.unique_constraints
|
|
WHERE instance_id = $1`
|
|
)
|
|
|
|
// awaitOpenTransactions ensures event ordering, so we don't events younger that open transactions
|
|
var (
|
|
awaitOpenTransactionsV1 string
|
|
awaitOpenTransactionsV2 string
|
|
)
|
|
|
|
func awaitOpenTransactions(useV1 bool) string {
|
|
if useV1 {
|
|
return awaitOpenTransactionsV1
|
|
}
|
|
return awaitOpenTransactionsV2
|
|
}
|
|
|
|
type CRDB struct {
|
|
*database.DB
|
|
}
|
|
|
|
func NewCRDB(client *database.DB) *CRDB {
|
|
switch client.Type() {
|
|
case "cockroach":
|
|
awaitOpenTransactionsV1 = " AND creation_date::TIMESTAMP < (SELECT COALESCE(MIN(start), NOW())::TIMESTAMP FROM crdb_internal.cluster_transactions where application_name = ANY(?))"
|
|
awaitOpenTransactionsV2 = ` AND hlc_to_timestamp("position") < (SELECT COALESCE(MIN(start), NOW())::TIMESTAMP FROM crdb_internal.cluster_transactions where application_name = ANY(?))`
|
|
case "postgres":
|
|
awaitOpenTransactionsV1 = ` AND EXTRACT(EPOCH FROM created_at) < (SELECT COALESCE(EXTRACT(EPOCH FROM min(xact_start)), EXTRACT(EPOCH FROM now())) FROM pg_stat_activity WHERE datname = current_database() AND application_name = ANY(?) AND state <> 'idle')`
|
|
awaitOpenTransactionsV2 = ` AND "position" < (SELECT COALESCE(EXTRACT(EPOCH FROM min(xact_start)), EXTRACT(EPOCH FROM now())) FROM pg_stat_activity WHERE datname = current_database() AND application_name = ANY(?) AND state <> 'idle')`
|
|
}
|
|
|
|
return &CRDB{client}
|
|
}
|
|
|
|
func (db *CRDB) Health(ctx context.Context) error { return db.Ping() }
|
|
|
|
// Push adds all events to the eventstreams of the aggregates.
|
|
// This call is transaction save. The transaction will be rolled back if one event fails
|
|
func (db *CRDB) Push(ctx context.Context, commands ...eventstore.Command) (events []eventstore.Event, err error) {
|
|
events = make([]eventstore.Event, len(commands))
|
|
|
|
err = crdb.ExecuteTx(ctx, db.DB.DB, nil, func(tx *sql.Tx) error {
|
|
|
|
var uniqueConstraints []*eventstore.UniqueConstraint
|
|
|
|
for i, command := range commands {
|
|
if command.Aggregate().InstanceID == "" {
|
|
command.Aggregate().InstanceID = authz.GetInstance(ctx).InstanceID()
|
|
}
|
|
|
|
var payload []byte
|
|
if command.Payload() != nil {
|
|
payload, err = json.Marshal(command.Payload())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
e := &repository.Event{
|
|
Typ: command.Type(),
|
|
Data: payload,
|
|
EditorUser: command.Creator(),
|
|
Version: command.Aggregate().Version,
|
|
AggregateID: command.Aggregate().ID,
|
|
AggregateType: command.Aggregate().Type,
|
|
ResourceOwner: sql.NullString{String: command.Aggregate().ResourceOwner, Valid: command.Aggregate().ResourceOwner != ""},
|
|
InstanceID: command.Aggregate().InstanceID,
|
|
}
|
|
|
|
err := tx.QueryRowContext(ctx, crdbInsert,
|
|
e.Type(),
|
|
e.Aggregate().Type,
|
|
e.Aggregate().ID,
|
|
e.Aggregate().Version,
|
|
payload,
|
|
e.Creator(),
|
|
"zitadel",
|
|
e.Aggregate().ResourceOwner,
|
|
e.Aggregate().InstanceID,
|
|
i,
|
|
).Scan(&e.ID, &e.Seq, &e.CreationDate, &e.ResourceOwner, &e.InstanceID)
|
|
|
|
if err != nil {
|
|
logging.WithFields(
|
|
"aggregate", e.Aggregate().Type,
|
|
"aggregateId", e.Aggregate().ID,
|
|
"aggregateType", e.Aggregate().Type,
|
|
"eventType", e.Type(),
|
|
"instanceID", e.Aggregate().InstanceID,
|
|
).WithError(err).Debug("query failed")
|
|
return zerrors.ThrowInternal(err, "SQL-SBP37", "unable to create event")
|
|
}
|
|
|
|
uniqueConstraints = append(uniqueConstraints, command.UniqueConstraints()...)
|
|
events[i] = e
|
|
}
|
|
|
|
return db.handleUniqueConstraints(ctx, tx, uniqueConstraints...)
|
|
})
|
|
if err != nil && !errors.Is(err, &zerrors.ZitadelError{}) {
|
|
err = zerrors.ThrowInternal(err, "SQL-DjgtG", "unable to store events")
|
|
}
|
|
|
|
return events, err
|
|
}
|
|
|
|
// handleUniqueConstraints adds or removes unique constraints
|
|
func (db *CRDB) handleUniqueConstraints(ctx context.Context, tx *sql.Tx, uniqueConstraints ...*eventstore.UniqueConstraint) (err error) {
|
|
if len(uniqueConstraints) == 0 || (len(uniqueConstraints) == 1 && uniqueConstraints[0] == nil) {
|
|
return nil
|
|
}
|
|
|
|
for _, uniqueConstraint := range uniqueConstraints {
|
|
uniqueConstraint.UniqueField = strings.ToLower(uniqueConstraint.UniqueField)
|
|
switch uniqueConstraint.Action {
|
|
case eventstore.UniqueConstraintAdd:
|
|
_, err := tx.ExecContext(ctx, uniqueInsert, uniqueConstraint.UniqueType, uniqueConstraint.UniqueField, authz.GetInstance(ctx).InstanceID())
|
|
if err != nil {
|
|
logging.WithFields(
|
|
"unique_type", uniqueConstraint.UniqueType,
|
|
"unique_field", uniqueConstraint.UniqueField).WithError(err).Info("insert unique constraint failed")
|
|
|
|
if db.isUniqueViolationError(err) {
|
|
return zerrors.ThrowAlreadyExists(err, "SQL-wHcEq", uniqueConstraint.ErrorMessage)
|
|
}
|
|
|
|
return zerrors.ThrowInternal(err, "SQL-dM9ds", "unable to create unique constraint")
|
|
}
|
|
case eventstore.UniqueConstraintRemove:
|
|
_, err := tx.ExecContext(ctx, uniqueDelete, uniqueConstraint.UniqueType, uniqueConstraint.UniqueField, authz.GetInstance(ctx).InstanceID())
|
|
if err != nil {
|
|
logging.WithFields(
|
|
"unique_type", uniqueConstraint.UniqueType,
|
|
"unique_field", uniqueConstraint.UniqueField).WithError(err).Info("delete unique constraint failed")
|
|
return zerrors.ThrowInternal(err, "SQL-6n88i", "unable to remove unique constraint")
|
|
}
|
|
case eventstore.UniqueConstraintInstanceRemove:
|
|
_, err := tx.ExecContext(ctx, uniqueDeleteInstance, authz.GetInstance(ctx).InstanceID())
|
|
if err != nil {
|
|
logging.WithFields(
|
|
"instance_id", authz.GetInstance(ctx).InstanceID()).WithError(err).Info("delete instance unique constraints failed")
|
|
return zerrors.ThrowInternal(err, "SQL-6n88i", "unable to remove unique constraints of instance")
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// FilterToReducer finds all events matching the given search query and passes them to the reduce function.
|
|
func (crdb *CRDB) FilterToReducer(ctx context.Context, searchQuery *eventstore.SearchQueryBuilder, reduce eventstore.Reducer) (err error) {
|
|
ctx, span := tracing.NewSpan(ctx)
|
|
defer func() { span.EndWithError(err) }()
|
|
|
|
err = query(ctx, crdb, searchQuery, reduce, false)
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
pgErr := new(pgconn.PgError)
|
|
// check events2 not exists
|
|
if errors.As(err, &pgErr) && pgErr.Code == "42P01" {
|
|
return query(ctx, crdb, searchQuery, reduce, true)
|
|
}
|
|
return err
|
|
}
|
|
|
|
// LatestSequence returns the latest sequence found by the search query
|
|
func (db *CRDB) LatestSequence(ctx context.Context, searchQuery *eventstore.SearchQueryBuilder) (float64, error) {
|
|
var position sql.NullFloat64
|
|
err := query(ctx, db, searchQuery, &position, false)
|
|
return position.Float64, err
|
|
}
|
|
|
|
// InstanceIDs returns the instance ids found by the search query
|
|
func (db *CRDB) InstanceIDs(ctx context.Context, searchQuery *eventstore.SearchQueryBuilder) ([]string, error) {
|
|
var ids []string
|
|
err := query(ctx, db, searchQuery, &ids, false)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return ids, nil
|
|
}
|
|
|
|
func (db *CRDB) Client() *database.DB {
|
|
return db.DB
|
|
}
|
|
|
|
func (db *CRDB) orderByEventSequence(desc, shouldOrderBySequence, useV1 bool) string {
|
|
if useV1 {
|
|
if desc {
|
|
return ` ORDER BY event_sequence DESC`
|
|
}
|
|
return ` ORDER BY event_sequence`
|
|
}
|
|
if shouldOrderBySequence {
|
|
if desc {
|
|
return ` ORDER BY "sequence" DESC`
|
|
}
|
|
return ` ORDER BY "sequence"`
|
|
}
|
|
|
|
if desc {
|
|
return ` ORDER BY "position" DESC, in_tx_order DESC`
|
|
}
|
|
return ` ORDER BY "position", in_tx_order`
|
|
}
|
|
|
|
func (db *CRDB) eventQuery(useV1 bool) string {
|
|
if useV1 {
|
|
return "SELECT" +
|
|
" creation_date" +
|
|
", event_type" +
|
|
", event_sequence" +
|
|
", event_data" +
|
|
", editor_user" +
|
|
", resource_owner" +
|
|
", instance_id" +
|
|
", aggregate_type" +
|
|
", aggregate_id" +
|
|
", aggregate_version" +
|
|
" FROM eventstore.events"
|
|
}
|
|
return "SELECT" +
|
|
" created_at" +
|
|
", event_type" +
|
|
`, "sequence"` +
|
|
`, "position"` +
|
|
", payload" +
|
|
", creator" +
|
|
`, "owner"` +
|
|
", instance_id" +
|
|
", aggregate_type" +
|
|
", aggregate_id" +
|
|
", revision" +
|
|
" FROM eventstore.events2"
|
|
}
|
|
|
|
func (db *CRDB) maxSequenceQuery(useV1 bool) string {
|
|
if useV1 {
|
|
return `SELECT event_sequence FROM eventstore.events`
|
|
}
|
|
return `SELECT "position" FROM eventstore.events2`
|
|
}
|
|
|
|
func (db *CRDB) instanceIDsQuery(useV1 bool) string {
|
|
table := "eventstore.events2"
|
|
if useV1 {
|
|
table = "eventstore.events"
|
|
}
|
|
return "SELECT DISTINCT instance_id FROM " + table
|
|
}
|
|
|
|
func (db *CRDB) columnName(col repository.Field, useV1 bool) string {
|
|
switch col {
|
|
case repository.FieldAggregateID:
|
|
return "aggregate_id"
|
|
case repository.FieldAggregateType:
|
|
return "aggregate_type"
|
|
case repository.FieldSequence:
|
|
if useV1 {
|
|
return "event_sequence"
|
|
}
|
|
return `"sequence"`
|
|
case repository.FieldResourceOwner:
|
|
if useV1 {
|
|
return "resource_owner"
|
|
}
|
|
return `"owner"`
|
|
case repository.FieldInstanceID:
|
|
return "instance_id"
|
|
case repository.FieldEditorService:
|
|
if useV1 {
|
|
return "editor_service"
|
|
}
|
|
return ""
|
|
case repository.FieldEditorUser:
|
|
if useV1 {
|
|
return "editor_user"
|
|
}
|
|
return "creator"
|
|
case repository.FieldEventType:
|
|
return "event_type"
|
|
case repository.FieldEventData:
|
|
if useV1 {
|
|
return "event_data"
|
|
}
|
|
return "payload"
|
|
case repository.FieldCreationDate:
|
|
if useV1 {
|
|
return "creation_date"
|
|
}
|
|
return "created_at"
|
|
case repository.FieldPosition:
|
|
return `"position"`
|
|
default:
|
|
return ""
|
|
}
|
|
}
|
|
|
|
func (db *CRDB) conditionFormat(operation repository.Operation) string {
|
|
switch operation {
|
|
case repository.OperationIn:
|
|
return "%s %s ANY(?)"
|
|
case repository.OperationNotIn:
|
|
return "%s %s ALL(?)"
|
|
}
|
|
return "%s %s ?"
|
|
}
|
|
|
|
func (db *CRDB) operation(operation repository.Operation) string {
|
|
switch operation {
|
|
case repository.OperationEquals, repository.OperationIn:
|
|
return "="
|
|
case repository.OperationGreater:
|
|
return ">"
|
|
case repository.OperationLess:
|
|
return "<"
|
|
case repository.OperationJSONContains:
|
|
return "@>"
|
|
case repository.OperationNotIn:
|
|
return "<>"
|
|
}
|
|
return ""
|
|
}
|
|
|
|
var (
|
|
placeholder = regexp.MustCompile(`\?`)
|
|
)
|
|
|
|
// placeholder replaces all "?" with postgres placeholders ($<NUMBER>)
|
|
func (db *CRDB) placeholder(query string) string {
|
|
occurances := placeholder.FindAllStringIndex(query, -1)
|
|
if len(occurances) == 0 {
|
|
return query
|
|
}
|
|
replaced := query[:occurances[0][0]]
|
|
|
|
for i, l := range occurances {
|
|
nextIDX := len(query)
|
|
if i < len(occurances)-1 {
|
|
nextIDX = occurances[i+1][0]
|
|
}
|
|
replaced = replaced + "$" + strconv.Itoa(i+1) + query[l[1]:nextIDX]
|
|
}
|
|
return replaced
|
|
}
|
|
|
|
func (db *CRDB) isUniqueViolationError(err error) bool {
|
|
if pgxErr, ok := err.(*pgconn.PgError); ok {
|
|
if pgxErr.Code == "23505" {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|