perf: query data AS OF SYSTEM TIME (#5231)

Queries the data in the storage layser at the timestamp when the call hit the API layer
This commit is contained in:
Silvan
2023-02-27 22:36:43 +01:00
committed by GitHub
parent 80003939ad
commit e38abdcdf3
170 changed files with 3101 additions and 3169 deletions

View File

@@ -1,16 +1,16 @@
package eventstore
import (
"database/sql"
"time"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore/repository"
z_sql "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
)
type Config struct {
PushTimeout time.Duration
Client *sql.DB
Client *database.DB
repo repository.Repository
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/eventstore/handler"
@@ -19,7 +20,7 @@ var (
type StatementHandlerConfig struct {
handler.ProjectionHandlerConfig
Client *sql.DB
Client *database.DB
SequenceTable string
LockTable string
FailedEventsTable string
@@ -34,7 +35,7 @@ type StatementHandler struct {
*handler.ProjectionHandler
Locker
client *sql.DB
client *database.DB
sequenceTable string
currentSequenceStmt string
updateSequencesBaseStmt string
@@ -74,7 +75,7 @@ func NewStatementHandler(
aggregates: aggregateTypes,
reduces: reduces,
bulkLimit: config.BulkLimit,
Locker: NewLocker(config.Client, config.LockTable, config.ProjectionName),
Locker: NewLocker(config.Client.DB, config.LockTable, config.ProjectionName),
initCheck: config.InitCheck,
initialized: make(chan bool),
}
@@ -96,7 +97,7 @@ func (h *StatementHandler) SearchQuery(ctx context.Context, instanceIDs []string
return nil, 0, err
}
queryBuilder := eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).Limit(h.bulkLimit)
queryBuilder := eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).Limit(h.bulkLimit).AllowTimeTravel()
for _, aggregateType := range h.aggregates {
for _, instanceID := range instanceIDs {

View File

@@ -12,6 +12,7 @@ import (
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/assert"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/eventstore/handler"
"github.com/zitadel/zitadel/internal/eventstore/repository"
@@ -114,6 +115,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
},
SearchQueryBuilder: eventstore.
NewSearchQueryBuilder(eventstore.ColumnsEvent).
AllowTimeTravel().
AddQuery().
AggregateTypes("testAgg").
SequenceGreater(5).
@@ -143,6 +145,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
},
SearchQueryBuilder: eventstore.
NewSearchQueryBuilder(eventstore.ColumnsEvent).
AllowTimeTravel().
AddQuery().
AggregateTypes("testAgg").
SequenceGreater(5).
@@ -171,7 +174,9 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
},
SequenceTable: tt.fields.sequenceTable,
BulkLimit: tt.fields.bulkLimit,
Client: client,
Client: &database.DB{
DB: client,
},
})
h.aggregates = tt.fields.aggregates
@@ -549,7 +554,9 @@ func TestStatementHandler_Update(t *testing.T) {
sequenceTable: "my_sequences",
currentSequenceStmt: fmt.Sprintf(currentSequenceStmtFormat, "my_sequences"),
updateSequencesBaseStmt: fmt.Sprintf(updateCurrentSequencesStmtFormat, "my_sequences"),
client: client,
client: &database.DB{
DB: client,
},
}
h.aggregates = tt.fields.aggregates
@@ -1121,7 +1128,9 @@ func TestStatementHandler_executeStmts(t *testing.T) {
ProjectionName: tt.fields.projectionName,
RequeueEvery: 0,
},
Client: client,
Client: &database.DB{
DB: client,
},
FailedEventsTable: tt.fields.failedEventsTable,
MaxFailureCount: tt.fields.maxFailureCount,
},

View File

@@ -194,7 +194,7 @@ func (h *ProjectionHandler) schedule(ctx context.Context) {
var succeededOnce bool
var err error
// get every instance id except empty (system)
query := eventstore.NewSearchQueryBuilder(eventstore.ColumnsInstanceIDs).AddQuery().ExcludedInstanceID("")
query := eventstore.NewSearchQueryBuilder(eventstore.ColumnsInstanceIDs).AllowTimeTravel().AddQuery().ExcludedInstanceID("")
for range h.triggerProjection.C {
if !succeededOnce {
// (re)check if it has succeeded in the meantime

View File

@@ -4,6 +4,7 @@ import (
"database/sql"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach-go/v2/testserver"
"github.com/zitadel/logging"
@@ -14,7 +15,7 @@ import (
)
var (
testCRDBClient *sql.DB
testCRDBClient *database.DB
)
func TestMain(m *testing.M) {
@@ -23,7 +24,11 @@ func TestMain(m *testing.M) {
logging.WithFields("error", err).Fatal("unable to start db")
}
testCRDBClient, err = sql.Open("postgres", ts.PGURL().String())
testCRDBClient = &database.DB{
Database: new(testDB),
}
testCRDBClient.DB, err = sql.Open("postgres", ts.PGURL().String())
if err != nil {
logging.WithFields("error", err).Fatal("unable to connect to db")
}
@@ -39,7 +44,7 @@ func TestMain(m *testing.M) {
ts.Stop()
}()
if err = initDB(testCRDBClient); err != nil {
if err = initDB(testCRDBClient.DB); err != nil {
logging.WithFields("error", err).Fatal("migrations failed")
}
@@ -57,10 +62,20 @@ func initDB(db *sql.DB) error {
})
err := initialise.Init(db,
initialise.VerifyUser(config.Username(), ""),
initialise.VerifyDatabase(config.Database()),
initialise.VerifyGrant(config.Database(), config.Username()))
initialise.VerifyDatabase(config.DatabaseName()),
initialise.VerifyGrant(config.DatabaseName(), config.Username()))
if err != nil {
return err
}
return initialise.VerifyZitadel(db, *config)
}
type testDB struct{}
func (_ *testDB) Timetravel(time.Duration) string { return " AS OF SYSTEM TIME '-1 ms' " }
func (*testDB) DatabaseName() string { return "db" }
func (*testDB) Username() string { return "user" }
func (*testDB) Type() string { return "type" }

View File

@@ -2,7 +2,7 @@ package eventstore
import "time"
//ReadModel is the minimum representation of a read model.
// ReadModel is the minimum representation of a read model.
// It implements a basic reducer
// it might be saved in a database or in memory
type ReadModel struct {
@@ -15,14 +15,13 @@ type ReadModel struct {
InstanceID string `json:"-"`
}
//AppendEvents adds all the events to the read model.
// AppendEvents adds all the events to the read model.
// The function doesn't compute the new state of the read model
func (rm *ReadModel) AppendEvents(events ...Event) *ReadModel {
func (rm *ReadModel) AppendEvents(events ...Event) {
rm.Events = append(rm.Events, events...)
return rm
}
//Reduce is the basic implementation of reducer
// Reduce is the basic implementation of reducer
// If this function is extended the extending function should be the last step
func (rm *ReadModel) Reduce() error {
if len(rm.Events) == 0 {

View File

@@ -8,11 +8,12 @@ import (
// SearchQuery defines the which and how data are queried
type SearchQuery struct {
Columns Columns
Limit uint64
Desc bool
Filters [][]*Filter
Tx *sql.Tx
Columns Columns
Limit uint64
Desc bool
Filters [][]*Filter
Tx *sql.Tx
AllowTimeTravel bool
}
// Columns defines which fields of the event are needed for the query

View File

@@ -13,6 +13,7 @@ import (
"github.com/lib/pq"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
caos_errs "github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/eventstore/repository"
)
@@ -97,19 +98,19 @@ const (
)
type CRDB struct {
client *sql.DB
*database.DB
}
func NewCRDB(client *sql.DB) *CRDB {
func NewCRDB(client *database.DB) *CRDB {
return &CRDB{client}
}
func (db *CRDB) Health(ctx context.Context) error { return db.client.Ping() }
func (db *CRDB) Health(ctx context.Context) error { return db.Ping() }
// Push adds all events to the eventstreams of the aggregates.
// This call is transaction save. The transaction will be rolled back if one event fails
func (db *CRDB) Push(ctx context.Context, events []*repository.Event, uniqueConstraints ...*repository.UniqueConstraint) error {
err := crdb.ExecuteTx(ctx, db.client, nil, func(tx *sql.Tx) error {
err := crdb.ExecuteTx(ctx, db.DB.DB, nil, func(tx *sql.Tx) error {
var (
previousAggregateSequence Sequence
@@ -159,7 +160,7 @@ func (db *CRDB) Push(ctx context.Context, events []*repository.Event, uniqueCons
var instanceRegexp = regexp.MustCompile(`eventstore\.i_[0-9a-zA-Z]{1,}_seq`)
func (db *CRDB) CreateInstance(ctx context.Context, instanceID string) error {
row := db.client.QueryRowContext(ctx, "SELECT CONCAT('eventstore.i_', $1::TEXT, '_seq')", instanceID)
row := db.QueryRowContext(ctx, "SELECT CONCAT('eventstore.i_', $1::TEXT, '_seq')", instanceID)
if row.Err() != nil {
return caos_errs.ThrowInvalidArgument(row.Err(), "SQL-7gtFA", "Errors.InvalidArgument")
}
@@ -168,7 +169,7 @@ func (db *CRDB) CreateInstance(ctx context.Context, instanceID string) error {
return caos_errs.ThrowInvalidArgument(err, "SQL-7gtFA", "Errors.InvalidArgument")
}
if _, err := db.client.ExecContext(ctx, "CREATE SEQUENCE "+sequenceName); err != nil {
if _, err := db.ExecContext(ctx, "CREATE SEQUENCE "+sequenceName); err != nil {
return caos_errs.ThrowInternal(err, "SQL-7gtFA", "Errors.Internal")
}
@@ -249,7 +250,7 @@ func (db *CRDB) InstanceIDs(ctx context.Context, searchQuery *repository.SearchQ
}
func (db *CRDB) db() *sql.DB {
return db.client
return db.DB.DB
}
func (db *CRDB) orderByEventSequence(desc bool) string {

View File

@@ -437,7 +437,10 @@ func TestCRDB_Push_OneAggregate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
if tt.args.uniqueDataType != "" && tt.args.uniqueDataField != "" {
err := fillUniqueData(tt.args.uniqueDataType, tt.args.uniqueDataField, tt.args.uniqueDataInstanceID)
@@ -561,7 +564,10 @@ func TestCRDB_Push_MultipleAggregate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
if err := db.Push(context.Background(), tt.args.events); (err != nil) != tt.res.wantErr {
t.Errorf("CRDB.Push() error = %v, wantErr %v", err, tt.res.wantErr)
@@ -638,7 +644,7 @@ func TestCRDB_CreateInstance(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{DB: testCRDBClient},
}
if err := db.CreateInstance(context.Background(), tt.args.instanceID); (err != nil) != tt.res.wantErr {
@@ -776,7 +782,10 @@ func TestCRDB_Push_Parallel(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
wg := sync.WaitGroup{}
@@ -897,7 +906,10 @@ func TestCRDB_Filter(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
// setup initial data for query
@@ -987,7 +999,10 @@ func TestCRDB_LatestSequence(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
// setup initial data for query
@@ -1131,7 +1146,10 @@ func TestCRDB_Push_ResourceOwner(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: testCRDBClient,
DB: &database.DB{
DB: testCRDBClient,
Database: new(testDB),
},
}
if err := db.Push(context.Background(), tt.args.events); err != nil {
t.Errorf("CRDB.Push() error = %v", err)

View File

@@ -4,6 +4,7 @@ import (
"database/sql"
"os"
"testing"
"time"
"github.com/cockroachdb/cockroach-go/v2/testserver"
"github.com/zitadel/logging"
@@ -53,8 +54,8 @@ func initDB(db *sql.DB) error {
err := initialise.Init(db,
initialise.VerifyUser(config.Username(), ""),
initialise.VerifyDatabase(config.Database()),
initialise.VerifyGrant(config.Database(), config.Username()))
initialise.VerifyDatabase(config.DatabaseName()),
initialise.VerifyGrant(config.DatabaseName(), config.Username()))
if err != nil {
return err
}
@@ -66,3 +67,13 @@ func fillUniqueData(unique_type, field, instanceID string) error {
_, err := testCRDBClient.Exec("INSERT INTO eventstore.unique_constraints (unique_type, unique_field, instance_id) VALUES ($1, $2, $3)", unique_type, field, instanceID)
return err
}
type testDB struct{}
func (_ *testDB) Timetravel(time.Duration) string { return " AS OF SYSTEM TIME '-1 ms' " }
func (*testDB) DatabaseName() string { return "db" }
func (*testDB) Username() string { return "user" }
func (*testDB) Type() string { return "type" }

View File

@@ -10,6 +10,8 @@ import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/api/call"
"github.com/zitadel/zitadel/internal/database/dialect"
z_errors "github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/eventstore/repository"
)
@@ -24,6 +26,7 @@ type querier interface {
instanceIDsQuery() string
db() *sql.DB
orderByEventSequence(desc bool) string
dialect.Database
}
type scan func(dest ...interface{}) error
@@ -34,6 +37,11 @@ func query(ctx context.Context, criteria querier, searchQuery *repository.Search
if where == "" || query == "" {
return z_errors.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
}
if searchQuery.Tx == nil {
if travel := prepareTimeTravel(ctx, criteria, searchQuery.AllowTimeTravel); travel != "" {
query += travel
}
}
query += where
if searchQuery.Columns == repository.ColumnsEvent {
@@ -85,6 +93,14 @@ func prepareColumns(criteria querier, columns repository.Columns) (string, func(
}
}
func prepareTimeTravel(ctx context.Context, criteria querier, allow bool) string {
if !allow {
return ""
}
took := call.Took(ctx)
return criteria.Timetravel(took)
}
func maxSequenceScanner(row scan, dest interface{}) (err error) {
sequence, ok := dest.(*Sequence)
if !ok {

View File

@@ -10,6 +10,7 @@ import (
"github.com/DATA-DOG/go-sqlmock"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/eventstore/repository"
)
@@ -537,7 +538,10 @@ func Test_query_events_with_crdb(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: tt.fields.client,
DB: &database.DB{
DB: tt.fields.client,
Database: new(testDB),
},
}
// setup initial data for query
@@ -657,6 +661,36 @@ func Test_query_events_mocked(t *testing.T) {
wantErr: false,
},
},
{
name: "with limit and order by desc as of system time",
args: args{
dest: &[]*repository.Event{},
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: true,
Limit: 5,
AllowTimeTravel: true,
Filters: [][]*repository.Filter{
{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQuery(t,
`SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, previous_aggregate_type_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events AS OF SYSTEM TIME '-1 ms' WHERE \( aggregate_type = \$1 \) ORDER BY event_sequence DESC LIMIT \$2`,
[]driver.Value{repository.AggregateType("user"), uint64(5)},
),
},
res: res{
wantErr: false,
},
},
{
name: "error sql conn closed",
args: args{
@@ -786,9 +820,11 @@ func Test_query_events_mocked(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
crdb := &CRDB{}
crdb := &CRDB{DB: &database.DB{
Database: new(testDB),
}}
if tt.fields.mock != nil {
crdb.client = tt.fields.mock.client
crdb.DB.DB = tt.fields.mock.client
}
err := query(context.Background(), crdb, tt.args.query, tt.args.dest)

View File

@@ -12,14 +12,15 @@ import (
// SearchQueryBuilder represents the builder for your filter
// if invalid data are set the filter will fail
type SearchQueryBuilder struct {
columns repository.Columns
limit uint64
desc bool
resourceOwner string
instanceID string
editorUser string
queries []*SearchQuery
tx *sql.Tx
columns repository.Columns
limit uint64
desc bool
resourceOwner string
instanceID string
editorUser string
queries []*SearchQuery
tx *sql.Tx
allowTimeTravel bool
}
type SearchQuery struct {
@@ -130,6 +131,13 @@ func (builder *SearchQueryBuilder) EditorUser(id string) *SearchQueryBuilder {
return builder
}
// AllowTimeTravel activates the time travel feature of the database if supported
// The queries will be made based on the call time
func (builder *SearchQueryBuilder) AllowTimeTravel() *SearchQueryBuilder {
builder.allowTimeTravel = true
return builder
}
// AddQuery creates a new sub query.
// All fields in the sub query are AND-connected in the storage request.
// Multiple sub queries are OR-connected in the storage request.
@@ -264,11 +272,12 @@ func (builder *SearchQueryBuilder) build(instanceID string) (*repository.SearchQ
}
return &repository.SearchQuery{
Columns: builder.columns,
Limit: builder.limit,
Desc: builder.desc,
Filters: filters,
Tx: builder.tx,
Columns: builder.columns,
Limit: builder.limit,
Desc: builder.desc,
Filters: filters,
Tx: builder.tx,
AllowTimeTravel: builder.allowTimeTravel,
}, nil
}

View File

@@ -2,8 +2,8 @@ package v1
import (
"context"
"database/sql"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore/v1/internal/repository"
z_sql "github.com/zitadel/zitadel/internal/eventstore/v1/internal/repository/sql"
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
@@ -22,7 +22,7 @@ type eventstore struct {
repo repository.Repository
}
func Start(db *sql.DB) (Eventstore, error) {
func Start(db *database.DB) (Eventstore, error) {
return &eventstore{
repo: z_sql.Start(db),
}, nil

View File

@@ -1,10 +1,10 @@
package sql
import (
"database/sql"
"github.com/zitadel/zitadel/internal/database"
)
func Start(client *sql.DB) *SQL {
func Start(client *database.DB) *SQL {
return &SQL{
client: client,
}

View File

@@ -12,7 +12,7 @@ import (
)
const (
selectEscaped = `SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events WHERE \( aggregate_type = \$1`
selectEscaped = `SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events AS OF SYSTEM TIME '-1 ms' WHERE \( aggregate_type = \$1`
)
var (
@@ -172,14 +172,14 @@ func (db *dbMock) expectFilterEventsError(returnedErr error) *dbMock {
}
func (db *dbMock) expectLatestSequenceFilter(aggregateType string, sequence Sequence) *dbMock {
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events WHERE \( aggregate_type = \$1 \)`).
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events AS OF SYSTEM TIME '-1 ms' WHERE \( aggregate_type = \$1 \)`).
WithArgs(aggregateType).
WillReturnRows(sqlmock.NewRows([]string{"max_sequence"}).AddRow(sequence))
return db
}
func (db *dbMock) expectLatestSequenceFilterError(aggregateType string, err error) *dbMock {
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events WHERE \( aggregate_type = \$1 \)`).
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events AS OF SYSTEM TIME '-1 ms' WHERE \( aggregate_type = \$1 \)`).
WithArgs(aggregateType).WillReturnError(err)
return db
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/errors"
es_models "github.com/zitadel/zitadel/internal/eventstore/v1/models"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
@@ -16,16 +17,16 @@ type Querier interface {
}
func (db *SQL) Filter(ctx context.Context, searchQuery *es_models.SearchQueryFactory) (events []*es_models.Event, err error) {
return filter(db.client, searchQuery)
return filter(ctx, db.client, searchQuery)
}
func filter(querier Querier, searchQuery *es_models.SearchQueryFactory) (events []*es_models.Event, err error) {
query, limit, values, rowScanner := buildQuery(searchQuery)
func filter(ctx context.Context, db *database.DB, searchQuery *es_models.SearchQueryFactory) (events []*es_models.Event, err error) {
query, limit, values, rowScanner := buildQuery(ctx, db, searchQuery)
if query == "" {
return nil, errors.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
}
rows, err := querier.Query(query, values...)
rows, err := db.Query(query, values...)
if err != nil {
logging.New().WithError(err).Info("query failed")
return nil, errors.ThrowInternal(err, "SQL-IJuyR", "unable to filter events")
@@ -48,7 +49,7 @@ func filter(querier Querier, searchQuery *es_models.SearchQueryFactory) (events
}
func (db *SQL) LatestSequence(ctx context.Context, queryFactory *es_models.SearchQueryFactory) (uint64, error) {
query, _, values, rowScanner := buildQuery(queryFactory)
query, _, values, rowScanner := buildQuery(ctx, db.client, queryFactory)
if query == "" {
return 0, errors.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
}
@@ -63,7 +64,7 @@ func (db *SQL) LatestSequence(ctx context.Context, queryFactory *es_models.Searc
}
func (db *SQL) InstanceIDs(ctx context.Context, queryFactory *es_models.SearchQueryFactory) ([]string, error) {
query, _, values, rowScanner := buildQuery(queryFactory)
query, _, values, rowScanner := buildQuery(ctx, db.client, queryFactory)
if query == "" {
return nil, errors.ThrowInvalidArgument(nil, "SQL-Sfwg2", "invalid query factory")
}

View File

@@ -6,6 +6,7 @@ import (
"math"
"testing"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/errors"
es_models "github.com/zitadel/zitadel/internal/eventstore/v1/models"
)
@@ -122,7 +123,7 @@ func TestSQL_Filter(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sql := &SQL{
client: tt.fields.client.sqlClient,
client: &database.DB{DB: tt.fields.client.sqlClient, Database: new(testDB)},
}
events, err := sql.Filter(context.Background(), tt.args.searchQuery)
if (err != nil) != tt.res.wantErr {
@@ -217,7 +218,7 @@ func TestSQL_LatestSequence(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sql := &SQL{
client: tt.fields.client.sqlClient,
client: &database.DB{DB: tt.fields.client.sqlClient, Database: new(testDB)},
}
sequence, err := sql.LatestSequence(context.Background(), tt.args.searchQuery)
if (err != nil) != tt.res.wantErr {

View File

@@ -1,6 +1,7 @@
package sql
import (
"context"
"database/sql"
"errors"
"fmt"
@@ -9,6 +10,8 @@ import (
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/api/call"
"github.com/zitadel/zitadel/internal/database/dialect"
z_errors "github.com/zitadel/zitadel/internal/errors"
es_models "github.com/zitadel/zitadel/internal/eventstore/v1/models"
)
@@ -30,7 +33,7 @@ const (
" FROM eventstore.events"
)
func buildQuery(queryFactory *es_models.SearchQueryFactory) (query string, limit uint64, values []interface{}, rowScanner func(s scan, dest interface{}) error) {
func buildQuery(ctx context.Context, db dialect.Database, queryFactory *es_models.SearchQueryFactory) (query string, limit uint64, values []interface{}, rowScanner func(s scan, dest interface{}) error) {
searchQuery, err := queryFactory.Build()
if err != nil {
logging.New().WithError(err).Warn("search query factory invalid")
@@ -41,6 +44,10 @@ func buildQuery(queryFactory *es_models.SearchQueryFactory) (query string, limit
if where == "" || query == "" {
return "", 0, nil, nil
}
if travel := db.Timetravel(call.Took(ctx)); travel != "" {
query += travel
}
query += where
if searchQuery.Columns == es_models.Columns_Event {

View File

@@ -1,6 +1,7 @@
package sql
import (
"context"
"database/sql"
"reflect"
"testing"
@@ -435,7 +436,7 @@ func Test_buildQuery(t *testing.T) {
queryFactory: es_models.NewSearchQueryFactory().OrderDesc().AddQuery().AggregateTypes("user").Factory(),
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE ( aggregate_type = $1 ) ORDER BY event_sequence DESC",
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events AS OF SYSTEM TIME '-1 ms' WHERE ( aggregate_type = $1 ) ORDER BY event_sequence DESC",
rowScanner: true,
values: []interface{}{es_models.AggregateType("user")},
},
@@ -446,7 +447,7 @@ func Test_buildQuery(t *testing.T) {
queryFactory: es_models.NewSearchQueryFactory().Limit(5).AddQuery().AggregateTypes("user").Factory(),
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE ( aggregate_type = $1 ) ORDER BY event_sequence LIMIT $2",
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events AS OF SYSTEM TIME '-1 ms' WHERE ( aggregate_type = $1 ) ORDER BY event_sequence LIMIT $2",
rowScanner: true,
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
limit: 5,
@@ -458,7 +459,7 @@ func Test_buildQuery(t *testing.T) {
queryFactory: es_models.NewSearchQueryFactory().Limit(5).OrderDesc().AddQuery().AggregateTypes("user").Factory(),
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE ( aggregate_type = $1 ) ORDER BY event_sequence DESC LIMIT $2",
query: "SELECT creation_date, event_type, event_sequence, previous_aggregate_sequence, event_data, editor_service, editor_user, resource_owner, instance_id, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events AS OF SYSTEM TIME '-1 ms' WHERE ( aggregate_type = $1 ) ORDER BY event_sequence DESC LIMIT $2",
rowScanner: true,
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
limit: 5,
@@ -466,8 +467,10 @@ func Test_buildQuery(t *testing.T) {
},
}
for _, tt := range tests {
ctx := context.Background()
db := new(testDB)
t.Run(tt.name, func(t *testing.T) {
gotQuery, gotLimit, gotValues, gotRowScanner := buildQuery(tt.args.queryFactory)
gotQuery, gotLimit, gotValues, gotRowScanner := buildQuery(ctx, db, tt.args.queryFactory)
if gotQuery != tt.res.query {
t.Errorf("buildQuery() gotQuery = %v, want %v", gotQuery, tt.res.query)
}
@@ -489,3 +492,13 @@ func Test_buildQuery(t *testing.T) {
})
}
}
type testDB struct{}
func (_ *testDB) Timetravel(time.Duration) string { return " AS OF SYSTEM TIME '-1 ms' " }
func (*testDB) DatabaseName() string { return "db" }
func (*testDB) Username() string { return "user" }
func (*testDB) Type() string { return "type" }

View File

@@ -2,11 +2,12 @@ package sql
import (
"context"
"database/sql"
"github.com/zitadel/zitadel/internal/database"
)
type SQL struct {
client *sql.DB
client *database.DB
}
func (db *SQL) Health(ctx context.Context) error {