mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-16 04:48:04 +00:00
dab5d9e756
# Which Problems Are Solved If many events are written to the same aggregate id it can happen that zitadel [starts to retry the push transaction](48ffc902cc/internal/eventstore/eventstore.go (L101)
) because [the locking behaviour](48ffc902cc/internal/eventstore/v3/sequence.go (L25)
) during push does compute the wrong sequence because newly committed events are not visible to the transaction. These events impact the current sequence. In cases with high command traffic on a single aggregate id this can have severe impact on general performance of zitadel. Because many connections of the `eventstore pusher` database pool are blocked by each other. # How the Problems Are Solved To improve the performance this locking mechanism was removed and the business logic of push is moved to sql functions which reduce network traffic and can be analyzed by the database before the actual push. For clients of the eventstore framework nothing changed. # Additional Changes - after a connection is established prefetches the newly added database types - `eventstore.BaseEvent` now returns the correct revision of the event # Additional Context - part of https://github.com/zitadel/zitadel/issues/8931 --------- Co-authored-by: Tim Möhlmann <tim+github@zitadel.com> Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Max Peintner <max@caos.ch> Co-authored-by: Elio Bischof <elio@zitadel.com> Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Miguel Cabrerizo <30386061+doncicuto@users.noreply.github.com> Co-authored-by: Joakim Lodén <Loddan@users.noreply.github.com> Co-authored-by: Yxnt <Yxnt@users.noreply.github.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: Harsha Reddy <harsha.reddy@klaviyo.com> Co-authored-by: Zach H <zhirschtritt@gmail.com>
256 lines
6.8 KiB
Go
256 lines
6.8 KiB
Go
package eventstore_test
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"os"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/cockroachdb/cockroach-go/v2/testserver"
|
|
"github.com/jackc/pgx/v5/pgxpool"
|
|
"github.com/jackc/pgx/v5/stdlib"
|
|
"github.com/zitadel/logging"
|
|
|
|
"github.com/zitadel/zitadel/cmd/initialise"
|
|
"github.com/zitadel/zitadel/internal/database"
|
|
"github.com/zitadel/zitadel/internal/database/cockroach"
|
|
"github.com/zitadel/zitadel/internal/eventstore"
|
|
es_sql "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
|
|
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
|
|
)
|
|
|
|
var (
|
|
testCRDBClient *database.DB
|
|
queriers map[string]eventstore.Querier = make(map[string]eventstore.Querier)
|
|
pushers map[string]eventstore.Pusher = make(map[string]eventstore.Pusher)
|
|
clients map[string]*database.DB = make(map[string]*database.DB)
|
|
)
|
|
|
|
func TestMain(m *testing.M) {
|
|
opts := make([]testserver.TestServerOpt, 0, 1)
|
|
if version := os.Getenv("ZITADEL_CRDB_VERSION"); version != "" {
|
|
opts = append(opts, testserver.CustomVersionOpt(version))
|
|
}
|
|
ts, err := testserver.NewTestServer(opts...)
|
|
if err != nil {
|
|
logging.WithFields("error", err).Fatal("unable to start db")
|
|
}
|
|
|
|
testCRDBClient = &database.DB{
|
|
Database: new(testDB),
|
|
}
|
|
|
|
connConfig, err := pgxpool.ParseConfig(ts.PGURL().String())
|
|
if err != nil {
|
|
logging.WithFields("error", err).Fatal("unable to parse db url")
|
|
}
|
|
connConfig.AfterConnect = new_es.RegisterEventstoreTypes
|
|
pool, err := pgxpool.NewWithConfig(context.Background(), connConfig)
|
|
if err != nil {
|
|
logging.WithFields("error", err).Fatal("unable to create db pool")
|
|
}
|
|
testCRDBClient.DB = stdlib.OpenDBFromPool(pool)
|
|
if err = testCRDBClient.Ping(); err != nil {
|
|
logging.WithFields("error", err).Fatal("unable to ping db")
|
|
}
|
|
|
|
v2 := &es_sql.CRDB{DB: testCRDBClient}
|
|
queriers["v2(inmemory)"] = v2
|
|
clients["v2(inmemory)"] = testCRDBClient
|
|
|
|
pushers["v3(inmemory)"] = new_es.NewEventstore(testCRDBClient)
|
|
clients["v3(inmemory)"] = testCRDBClient
|
|
|
|
if localDB, err := connectLocalhost(); err == nil {
|
|
if err = initDB(context.Background(), localDB); err != nil {
|
|
logging.WithFields("error", err).Fatal("migrations failed")
|
|
}
|
|
pushers["v3(singlenode)"] = new_es.NewEventstore(localDB)
|
|
clients["v3(singlenode)"] = localDB
|
|
}
|
|
|
|
// pushers["v2(inmemory)"] = v2
|
|
|
|
defer func() {
|
|
testCRDBClient.Close()
|
|
ts.Stop()
|
|
}()
|
|
|
|
if err = initDB(context.Background(), testCRDBClient); err != nil {
|
|
logging.WithFields("error", err).Fatal("migrations failed")
|
|
}
|
|
|
|
os.Exit(m.Run())
|
|
}
|
|
|
|
func initDB(ctx context.Context, db *database.DB) error {
|
|
initialise.ReadStmts("cockroach")
|
|
config := new(database.Config)
|
|
config.SetConnector(&cockroach.Config{
|
|
User: cockroach.User{
|
|
Username: "zitadel",
|
|
},
|
|
Database: "zitadel",
|
|
})
|
|
err := initialise.Init(ctx, db,
|
|
initialise.VerifyUser(config.Username(), ""),
|
|
initialise.VerifyDatabase(config.DatabaseName()),
|
|
initialise.VerifyGrant(config.DatabaseName(), config.Username()),
|
|
initialise.VerifySettings(config.DatabaseName(), config.Username()))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = initialise.VerifyZitadel(ctx, db, *config)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
// create old events
|
|
_, err = db.Exec(oldEventsTable)
|
|
return err
|
|
}
|
|
|
|
func connectLocalhost() (*database.DB, error) {
|
|
client, err := sql.Open("pgx", "postgresql://root@localhost:26257/defaultdb?sslmode=disable")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if err = client.Ping(); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &database.DB{
|
|
DB: client,
|
|
Database: new(testDB),
|
|
}, nil
|
|
}
|
|
|
|
type testDB struct{}
|
|
|
|
func (_ *testDB) Timetravel(time.Duration) string { return " AS OF SYSTEM TIME '-1 ms' " }
|
|
|
|
func (*testDB) DatabaseName() string { return "db" }
|
|
|
|
func (*testDB) Username() string { return "user" }
|
|
|
|
func (*testDB) Type() string { return "cockroach" }
|
|
|
|
func generateCommand(aggregateType eventstore.AggregateType, aggregateID string, opts ...func(*testEvent)) eventstore.Command {
|
|
e := &testEvent{
|
|
BaseEvent: eventstore.BaseEvent{
|
|
Agg: &eventstore.Aggregate{
|
|
ID: aggregateID,
|
|
Type: aggregateType,
|
|
ResourceOwner: "ro",
|
|
Version: "v1",
|
|
},
|
|
Service: "svc",
|
|
EventType: "test.created",
|
|
},
|
|
}
|
|
|
|
for _, opt := range opts {
|
|
opt(e)
|
|
}
|
|
|
|
return e
|
|
}
|
|
|
|
type testEvent struct {
|
|
eventstore.BaseEvent
|
|
uniqueConstraints []*eventstore.UniqueConstraint
|
|
}
|
|
|
|
func (e *testEvent) Payload() any {
|
|
return e.BaseEvent.Data
|
|
}
|
|
|
|
func (e *testEvent) UniqueConstraints() []*eventstore.UniqueConstraint {
|
|
return e.uniqueConstraints
|
|
}
|
|
|
|
func canceledCtx() context.Context {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
cancel()
|
|
return ctx
|
|
}
|
|
|
|
func fillUniqueData(unique_type, field, instanceID string) error {
|
|
_, err := testCRDBClient.Exec("INSERT INTO eventstore.unique_constraints (unique_type, unique_field, instance_id) VALUES ($1, $2, $3)", unique_type, field, instanceID)
|
|
return err
|
|
}
|
|
|
|
func generateAddUniqueConstraint(table, uniqueField string) func(e *testEvent) {
|
|
return func(e *testEvent) {
|
|
e.uniqueConstraints = append(e.uniqueConstraints,
|
|
&eventstore.UniqueConstraint{
|
|
UniqueType: table,
|
|
UniqueField: uniqueField,
|
|
Action: eventstore.UniqueConstraintAdd,
|
|
},
|
|
)
|
|
}
|
|
}
|
|
|
|
func generateRemoveUniqueConstraint(table, uniqueField string) func(e *testEvent) {
|
|
return func(e *testEvent) {
|
|
e.uniqueConstraints = append(e.uniqueConstraints,
|
|
&eventstore.UniqueConstraint{
|
|
UniqueType: table,
|
|
UniqueField: uniqueField,
|
|
Action: eventstore.UniqueConstraintRemove,
|
|
},
|
|
)
|
|
}
|
|
}
|
|
|
|
func withTestData(data any) func(e *testEvent) {
|
|
return func(e *testEvent) {
|
|
d, err := json.Marshal(data)
|
|
if err != nil {
|
|
panic("marshal data failed")
|
|
}
|
|
e.BaseEvent.Data = d
|
|
}
|
|
}
|
|
|
|
func cleanupEventstore(client *database.DB) func() {
|
|
return func() {
|
|
_, err := client.Exec("TRUNCATE eventstore.events")
|
|
if err != nil {
|
|
logging.Warnf("unable to truncate events: %v", err)
|
|
}
|
|
_, err = client.Exec("TRUNCATE eventstore.events2")
|
|
if err != nil {
|
|
logging.Warnf("unable to truncate events: %v", err)
|
|
}
|
|
_, err = client.Exec("TRUNCATE eventstore.unique_constraints")
|
|
if err != nil {
|
|
logging.Warnf("unable to truncate unique constraints: %v", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
const oldEventsTable = `CREATE TABLE IF NOT EXISTS eventstore.events (
|
|
id UUID DEFAULT gen_random_uuid()
|
|
, event_type TEXT NOT NULL
|
|
, aggregate_type TEXT NOT NULL
|
|
, aggregate_id TEXT NOT NULL
|
|
, aggregate_version TEXT NOT NULL
|
|
, event_sequence BIGINT NOT NULL
|
|
, previous_aggregate_sequence BIGINT
|
|
, previous_aggregate_type_sequence INT8
|
|
, creation_date TIMESTAMPTZ NOT NULL DEFAULT now()
|
|
, created_at TIMESTAMPTZ NOT NULL DEFAULT clock_timestamp()
|
|
, event_data JSONB
|
|
, editor_user TEXT NOT NULL
|
|
, editor_service TEXT
|
|
, resource_owner TEXT NOT NULL
|
|
, instance_id TEXT NOT NULL
|
|
, "position" DECIMAL NOT NULL
|
|
, in_tx_order INTEGER NOT NULL
|
|
|
|
, PRIMARY KEY (instance_id, aggregate_type, aggregate_id, event_sequence DESC)
|
|
);`
|