fix: move v2 pkgs (#1331)

* fix: move eventstore pkgs

* fix: move eventstore pkgs

* fix: remove v2 view

* fix: remove v2 view
This commit is contained in:
Fabi
2021-02-23 15:13:04 +01:00
committed by GitHub
parent 57b277bc7c
commit d8e42744b4
797 changed files with 2116 additions and 2224 deletions

View File

@@ -0,0 +1,60 @@
package repository
import (
"time"
)
//Event represents all information about a manipulation of an aggregate
type Event struct {
//ID is a generated uuid for this event
ID string
//Sequence is the sequence of the event
Sequence uint64
//PreviousSequence is the sequence of the previous sequence
// if it's 0 then it's the first event of this aggregate
PreviousSequence uint64
//CreationDate is the time the event is created
// it's used for human readability.
// Don't use it for event ordering,
// time drifts in different services could cause integrity problems
CreationDate time.Time
//Type describes the cause of the event (e.g. user.added)
// it should always be in past-form
Type EventType
//Data describe the changed fields (e.g. userName = "hodor")
// data must always a pointer to a struct, a struct or a byte array containing json bytes
Data []byte
//EditorService should be a unique identifier for the service which created the event
// it's meant for maintainability
EditorService string
//EditorUser should be a unique identifier for the user which created the event
// it's meant for maintainability.
// It's recommend to use the aggregate id of the user
EditorUser string
//Version describes the definition of the aggregate at a certain point in time
// it's used in read models to reduce the events in the correct definition
Version Version
//AggregateID id is the unique identifier of the aggregate
// the client must generate it by it's own
AggregateID string
//AggregateType describes the meaning of the aggregate for this event
// it could an object like user
AggregateType AggregateType
//ResourceOwner is the organisation which owns this aggregate
// an aggregate can only be managed by one organisation
// use the ID of the org
ResourceOwner string
}
//EventType is the description of the change
type EventType string
//AggregateType is the object name
type AggregateType string

View File

@@ -0,0 +1,19 @@
package repository
import (
"context"
)
//Repository pushes and filters events
type Repository interface {
//Health checks if the connection to the storage is available
Health(ctx context.Context) error
// PushEvents adds all events of the given aggregates to the eventstreams of the aggregates.
// if unique constraints are pushed, they will be added to the unique table for checking unique constraint violations
// This call is transaction save. The transaction will be rolled back if one event fails
Push(ctx context.Context, events []*Event, uniqueConstraints ...*UniqueConstraint) error
// Filter returns all events matching the given search query
Filter(ctx context.Context, searchQuery *SearchQuery) (events []*Event, err error)
//LatestSequence returns the latests sequence found by the the search query
LatestSequence(ctx context.Context, queryFactory *SearchQuery) (uint64, error)
}

View File

@@ -0,0 +1,105 @@
package repository
import "github.com/caos/zitadel/internal/errors"
//SearchQuery defines the which and how data are queried
type SearchQuery struct {
Columns Columns
Limit uint64
Desc bool
Filters []*Filter
}
//Columns defines which fields of the event are needed for the query
type Columns int32
const (
//ColumnsEvent represents all fields of an event
ColumnsEvent = iota + 1
//ColumnsMaxSequence represents the latest sequence of the filtered events
ColumnsMaxSequence
columnsCount
)
func (c Columns) Validate() error {
if c <= 0 || c >= columnsCount {
return errors.ThrowPreconditionFailed(nil, "REPOS-x8R35", "column out of range")
}
return nil
}
//Filter represents all fields needed to compare a field of an event with a value
type Filter struct {
Field Field
Value interface{}
Operation Operation
}
//Operation defines how fields are compared
type Operation int32
const (
// OperationEquals compares two values for equality
OperationEquals Operation = iota + 1
// OperationGreater compares if the given values is greater than the stored one
OperationGreater
// OperationLess compares if the given values is less than the stored one
OperationLess
//OperationIn checks if a stored value matches one of the passed value list
OperationIn
//OperationJSONContains checks if a stored value matches the given json
OperationJSONContains
operationCount
)
//Field is the representation of a field from the event
type Field int32
const (
//FieldAggregateType represents the aggregate type field
FieldAggregateType Field = iota + 1
//FieldAggregateID represents the aggregate id field
FieldAggregateID
//FieldSequence represents the sequence field
FieldSequence
//FieldResourceOwner represents the resource owner field
FieldResourceOwner
//FieldEditorService represents the editor service field
FieldEditorService
//FieldEditorUser represents the editor user field
FieldEditorUser
//FieldEventType represents the event type field
FieldEventType
//FieldEventData represents the event data field
FieldEventData
fieldCount
)
//NewFilter is used in tests. Use searchQuery.*Filter() instead
func NewFilter(field Field, value interface{}, operation Operation) *Filter {
return &Filter{
Field: field,
Value: value,
Operation: operation,
}
}
//Validate checks if the fields of the filter have valid values
func (f *Filter) Validate() error {
if f == nil {
return errors.ThrowPreconditionFailed(nil, "REPO-z6KcG", "filter is nil")
}
if f.Field <= 0 || f.Field >= fieldCount {
return errors.ThrowPreconditionFailed(nil, "REPO-zw62U", "field not definded")
}
if f.Value == nil {
return errors.ThrowPreconditionFailed(nil, "REPO-GJ9ct", "no value definded")
}
if f.Operation <= 0 || f.Operation >= operationCount {
return errors.ThrowPreconditionFailed(nil, "REPO-RrQTy", "operation not definded")
}
return nil
}

View File

@@ -0,0 +1,144 @@
package repository
import (
"reflect"
"testing"
)
func TestNewFilter(t *testing.T) {
type args struct {
field Field
value interface{}
operation Operation
}
tests := []struct {
name string
args args
want *Filter
}{
{
name: "aggregateID equals",
args: args{
field: FieldAggregateID,
value: "hodor",
operation: OperationEquals,
},
want: &Filter{Field: FieldAggregateID, Operation: OperationEquals, Value: "hodor"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewFilter(tt.args.field, tt.args.value, tt.args.operation); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewFilter() = %v, want %v", got, tt.want)
}
})
}
}
func TestFilter_Validate(t *testing.T) {
type fields struct {
field Field
value interface{}
operation Operation
isNil bool
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "correct filter",
fields: fields{
field: FieldSequence,
operation: OperationGreater,
value: uint64(235),
},
wantErr: false,
},
{
name: "filter is nil",
fields: fields{isNil: true},
wantErr: true,
},
{
name: "no field error",
fields: fields{
operation: OperationGreater,
value: uint64(235),
},
wantErr: true,
},
{
name: "no value error",
fields: fields{
field: FieldSequence,
operation: OperationGreater,
},
wantErr: true,
},
{
name: "no operation error",
fields: fields{
field: FieldSequence,
value: uint64(235),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var f *Filter
if !tt.fields.isNil {
f = &Filter{
Field: tt.fields.field,
Value: tt.fields.value,
Operation: tt.fields.operation,
}
}
if err := f.Validate(); (err != nil) != tt.wantErr {
t.Errorf("Filter.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestColumns_Validate(t *testing.T) {
type fields struct {
columns Columns
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "correct filter",
fields: fields{
columns: ColumnsEvent,
},
wantErr: false,
},
{
name: "columns too low",
fields: fields{
columns: 0,
},
wantErr: true,
},
{
name: "columns too high",
fields: fields{
columns: columnsCount,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.fields.columns.Validate(); (err != nil) != tt.wantErr {
t.Errorf("Columns.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -0,0 +1,323 @@
package sql
import (
"context"
"database/sql"
"errors"
"github.com/lib/pq"
"regexp"
"strconv"
"github.com/caos/logging"
caos_errs "github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/eventstore/repository"
"github.com/cockroachdb/cockroach-go/v2/crdb"
//sql import for cockroach
_ "github.com/lib/pq"
)
const (
//as soon as stored procedures are possible in crdb
// we could move the code to migrations and coll the procedure
// traking issue: https://github.com/cockroachdb/cockroach/issues/17511
crdbInsert = "WITH data ( " +
" event_type, " +
" aggregate_type, " +
" aggregate_id, " +
" aggregate_version, " +
" creation_date, " +
" event_data, " +
" editor_user, " +
" editor_service, " +
" resource_owner, " +
// variables below are calculated
" previous_sequence" +
") AS (" +
//previous_data selects the needed data of the latest event of the aggregate
// and buffers it (crdb inmemory)
" WITH previous_data AS (" +
" SELECT MAX(event_sequence) AS seq, resource_owner " +
" FROM eventstore.events " +
//TODO: remove LIMIT 1 as soon as data cleaned up (only 1 resource_owner per aggregate)
" WHERE aggregate_type = $2 AND aggregate_id = $3 GROUP BY resource_owner LIMIT 1" +
" )" +
// defines the data to be inserted
" SELECT " +
" $1::VARCHAR AS event_type, " +
" $2::VARCHAR AS aggregate_type, " +
" $3::VARCHAR AS aggregate_id, " +
" $4::VARCHAR AS aggregate_version, " +
" NOW() AS creation_date, " +
" $5::JSONB AS event_data, " +
" $6::VARCHAR AS editor_user, " +
" $7::VARCHAR AS editor_service, " +
" CASE WHEN EXISTS (SELECT * FROM previous_data) " +
" THEN (SELECT resource_owner FROM previous_data) " +
" ELSE $8::VARCHAR " +
" end AS resource_owner, " +
" CASE WHEN EXISTS (SELECT * FROM previous_data) " +
" THEN (SELECT seq FROM previous_data) " +
" ELSE NULL " +
" end AS previous_sequence" +
") " +
"INSERT INTO eventstore.events " +
" ( " +
" event_type, " +
" aggregate_type," +
" aggregate_id, " +
" aggregate_version, " +
" creation_date, " +
" event_data, " +
" editor_user, " +
" editor_service, " +
" resource_owner, " +
" previous_sequence " +
" ) " +
" ( " +
" SELECT " +
" event_type, " +
" aggregate_type," +
" aggregate_id, " +
" aggregate_version, " +
" COALESCE(creation_date, NOW()), " +
" event_data, " +
" editor_user, " +
" editor_service, " +
" resource_owner, " +
" previous_sequence " +
" FROM data " +
" ) " +
"RETURNING id, event_sequence, previous_sequence, creation_date, resource_owner"
uniqueInsert = `INSERT INTO eventstore.unique_constraints
(
unique_type,
unique_field
)
VALUES (
$1,
$2
)`
uniqueDelete = `DELETE FROM eventstore.unique_constraints
WHERE unique_type = $1 and unique_field = $2`
)
type CRDB struct {
client *sql.DB
}
func NewCRDB(client *sql.DB) *CRDB {
return &CRDB{client}
}
func (db *CRDB) Health(ctx context.Context) error { return db.client.Ping() }
// Push adds all events to the eventstreams of the aggregates.
// This call is transaction save. The transaction will be rolled back if one event fails
func (db *CRDB) Push(ctx context.Context, events []*repository.Event, uniqueConstraints ...*repository.UniqueConstraint) error {
err := crdb.ExecuteTx(ctx, db.client, nil, func(tx *sql.Tx) error {
stmt, err := tx.PrepareContext(ctx, crdbInsert)
if err != nil {
logging.Log("SQL-3to5p").WithError(err).Warn("prepare failed")
return caos_errs.ThrowInternal(err, "SQL-OdXRE", "prepare failed")
}
var previousSequence Sequence
for _, event := range events {
err = stmt.QueryRowContext(ctx,
event.Type,
event.AggregateType,
event.AggregateID,
event.Version,
Data(event.Data),
event.EditorUser,
event.EditorService,
event.ResourceOwner,
).Scan(&event.ID, &event.Sequence, &previousSequence, &event.CreationDate, &event.ResourceOwner)
event.PreviousSequence = uint64(previousSequence)
if err != nil {
logging.LogWithFields("SQL-IP3js",
"aggregate", event.AggregateType,
"aggregateId", event.AggregateID,
"aggregateType", event.AggregateType,
"eventType", event.Type).WithError(err).Info("query failed",
"seq", event.PreviousSequence)
return caos_errs.ThrowInternal(err, "SQL-SBP37", "unable to create event")
}
}
err = db.handleUniqueConstraints(ctx, tx, uniqueConstraints...)
if err != nil {
return err
}
return nil
})
if err != nil && !errors.Is(err, &caos_errs.CaosError{}) {
err = caos_errs.ThrowInternal(err, "SQL-DjgtG", "unable to store events")
}
return err
}
// handleUniqueConstraints adds or removes unique constraints
func (db *CRDB) handleUniqueConstraints(ctx context.Context, tx *sql.Tx, uniqueConstraints ...*repository.UniqueConstraint) (err error) {
if uniqueConstraints == nil || len(uniqueConstraints) == 0 || (len(uniqueConstraints) == 1 && uniqueConstraints[0] == nil) {
return nil
}
for _, uniqueConstraint := range uniqueConstraints {
if uniqueConstraint.Action == repository.UniqueConstraintAdd {
_, err := tx.ExecContext(ctx, uniqueInsert, uniqueConstraint.UniqueType, uniqueConstraint.UniqueField)
if err != nil {
logging.LogWithFields("SQL-IP3js",
"unique_type", uniqueConstraint.UniqueType,
"unique_field", uniqueConstraint.UniqueField).WithError(err).Info("insert unique constraint failed")
if db.isUniqueViolationError(err) {
return caos_errs.ThrowAlreadyExists(err, "SQL-M0dsf", uniqueConstraint.ErrorMessage)
}
return caos_errs.ThrowInternal(err, "SQL-dM9ds", "unable to create unique constraint ")
}
} else if uniqueConstraint.Action == repository.UniqueConstraintRemoved {
_, err := tx.ExecContext(ctx, uniqueDelete, uniqueConstraint.UniqueType, uniqueConstraint.UniqueField)
if err != nil {
logging.LogWithFields("SQL-M0vsf",
"unique_type", uniqueConstraint.UniqueType,
"unique_field", uniqueConstraint.UniqueField).WithError(err).Info("delete unique constraint failed")
return caos_errs.ThrowInternal(err, "SQL-6n88i", "unable to remove unique constraint ")
}
}
}
return nil
}
// Filter returns all events matching the given search query
func (db *CRDB) Filter(ctx context.Context, searchQuery *repository.SearchQuery) (events []*repository.Event, err error) {
events = []*repository.Event{}
err = query(ctx, db, searchQuery, &events)
if err != nil {
return nil, err
}
return events, nil
}
//LatestSequence returns the latests sequence found by the the search query
func (db *CRDB) LatestSequence(ctx context.Context, searchQuery *repository.SearchQuery) (uint64, error) {
var seq Sequence
err := query(ctx, db, searchQuery, &seq)
if err != nil {
return 0, err
}
return uint64(seq), nil
}
func (db *CRDB) db() *sql.DB {
return db.client
}
func (db *CRDB) orderByEventSequence(desc bool) string {
if desc {
return " ORDER BY event_sequence DESC"
}
return " ORDER BY event_sequence"
}
func (db *CRDB) eventQuery() string {
return "SELECT" +
" creation_date" +
", event_type" +
", event_sequence" +
", previous_sequence" +
", event_data" +
", editor_service" +
", editor_user" +
", resource_owner" +
", aggregate_type" +
", aggregate_id" +
", aggregate_version" +
" FROM eventstore.events"
}
func (db *CRDB) maxSequenceQuery() string {
return "SELECT MAX(event_sequence) FROM eventstore.events"
}
func (db *CRDB) columnName(col repository.Field) string {
switch col {
case repository.FieldAggregateID:
return "aggregate_id"
case repository.FieldAggregateType:
return "aggregate_type"
case repository.FieldSequence:
return "event_sequence"
case repository.FieldResourceOwner:
return "resource_owner"
case repository.FieldEditorService:
return "editor_service"
case repository.FieldEditorUser:
return "editor_user"
case repository.FieldEventType:
return "event_type"
case repository.FieldEventData:
return "event_data"
default:
return ""
}
}
func (db *CRDB) conditionFormat(operation repository.Operation) string {
if operation == repository.OperationIn {
return "%s %s ANY(?)"
}
return "%s %s ?"
}
func (db *CRDB) operation(operation repository.Operation) string {
switch operation {
case repository.OperationEquals, repository.OperationIn:
return "="
case repository.OperationGreater:
return ">"
case repository.OperationLess:
return "<"
case repository.OperationJSONContains:
return "@>"
}
return ""
}
var (
placeholder = regexp.MustCompile(`\?`)
)
//placeholder replaces all "?" with postgres placeholders ($<NUMBER>)
func (db *CRDB) placeholder(query string) string {
occurances := placeholder.FindAllStringIndex(query, -1)
if len(occurances) == 0 {
return query
}
replaced := query[:occurances[0][0]]
for i, l := range occurances {
nextIDX := len(query)
if i < len(occurances)-1 {
nextIDX = occurances[i+1][0]
}
replaced = replaced + "$" + strconv.Itoa(i+1) + query[l[1]:nextIDX]
}
return replaced
}
func (db *CRDB) isUniqueViolationError(err error) bool {
if pqErr, ok := err.(*pq.Error); ok {
if pqErr.Code == "23505" {
return true
}
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,170 @@
package sql
import (
"database/sql"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/caos/logging"
"github.com/cockroachdb/cockroach-go/v2/testserver"
)
var (
migrationsPath = os.ExpandEnv("${GOPATH}/src/github.com/caos/zitadel/migrations/cockroach")
testCRDBClient *sql.DB
)
func TestMain(m *testing.M) {
ts, err := testserver.NewTestServer()
if err != nil {
logging.LogWithFields("REPOS-RvjLG", "error", err).Fatal("unable to start db")
}
testCRDBClient, err = sql.Open("postgres", ts.PGURL().String())
if err != nil {
logging.LogWithFields("REPOS-CF6dQ", "error", err).Fatal("unable to connect to db")
}
if err = testCRDBClient.Ping(); err != nil {
logging.LogWithFields("REPOS-CF6dQ", "error", err).Fatal("unable to ping db")
}
defer func() {
testCRDBClient.Close()
ts.Stop()
}()
if err = executeMigrations(); err != nil {
logging.LogWithFields("REPOS-jehDD", "error", err).Fatal("migrations failed")
}
os.Exit(m.Run())
}
func executeMigrations() error {
files, err := migrationFilePaths()
if err != nil {
return err
}
sort.Sort(files)
if err = setPasswordNULL(); err != nil {
return err
}
if err = createFlywayHistory(); err != nil {
return err
}
for _, file := range files {
migrationData, err := ioutil.ReadFile(file)
if err != nil {
return err
}
migration := os.ExpandEnv(string(migrationData))
transactionInMigration := strings.Contains(migration, "BEGIN;")
exec := testCRDBClient.Exec
var tx *sql.Tx
if !transactionInMigration {
tx, err = testCRDBClient.Begin()
if err != nil {
return fmt.Errorf("begin file: %v || err: %w", file, err)
}
exec = tx.Exec
}
if _, err = exec(migration); err != nil {
return fmt.Errorf("exec file: %v || err: %w", file, err)
}
duration := 1 * time.Second
if !transactionInMigration {
if err = tx.Commit(); err != nil {
return fmt.Errorf("commit file: %v || err: %w", file, err)
}
duration = 0
}
time.Sleep(duration)
}
return nil
}
func setPasswordNULL() error {
passwordNames := []string{
"eventstorepassword",
"managementpassword",
"adminapipassword",
"authpassword",
"notificationpassword",
"authzpassword",
}
for _, name := range passwordNames {
if err := os.Setenv(name, "NULL"); err != nil {
return err
}
}
return nil
}
func createFlywayHistory() error {
_, err := testCRDBClient.Exec("CREATE TABLE defaultdb.flyway_schema_history(id TEXT, PRIMARY KEY(id));")
return err
}
func fillUniqueData(unique_type, field string) error {
_, err := testCRDBClient.Exec("INSERT INTO eventstore.unique_constraints (unique_type, unique_field) VALUES ($1, $2)", unique_type, field)
return err
}
type migrationPaths []string
type version struct {
major int
minor int
}
func versionFromPath(s string) version {
v := s[strings.Index(s, "/V")+2 : strings.Index(s, "__")]
splitted := strings.Split(v, ".")
res := version{}
var err error
if len(splitted) >= 1 {
res.major, err = strconv.Atoi(splitted[0])
if err != nil {
panic(err)
}
}
if len(splitted) >= 2 {
res.minor, err = strconv.Atoi(splitted[1])
if err != nil {
panic(err)
}
}
return res
}
func (a migrationPaths) Len() int { return len(a) }
func (a migrationPaths) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a migrationPaths) Less(i, j int) bool {
versionI := versionFromPath(a[i])
versionJ := versionFromPath(a[j])
return versionI.major < versionJ.major ||
(versionI.major == versionJ.major && versionI.minor < versionJ.minor)
}
func migrationFilePaths() (migrationPaths, error) {
files := make(migrationPaths, 0)
err := filepath.Walk(migrationsPath, func(path string, info os.FileInfo, err error) error {
if err != nil || info.IsDir() || !strings.HasSuffix(info.Name(), ".sql") {
return err
}
files = append(files, path)
return nil
})
return files, err
}

View File

@@ -0,0 +1,164 @@
package sql
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/caos/logging"
caos_errs "github.com/caos/zitadel/internal/errors"
z_errors "github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/eventstore/repository"
"github.com/lib/pq"
)
type querier interface {
columnName(repository.Field) string
operation(repository.Operation) string
conditionFormat(repository.Operation) string
placeholder(query string) string
eventQuery() string
maxSequenceQuery() string
db() *sql.DB
orderByEventSequence(desc bool) string
}
type rowScan func(scan, interface{}) error
type scan func(dest ...interface{}) error
func query(ctx context.Context, criteria querier, searchQuery *repository.SearchQuery, dest interface{}) error {
query, rowScanner := prepareColumns(criteria, searchQuery.Columns)
where, values := prepareCondition(criteria, searchQuery.Filters)
if where == "" || query == "" {
return caos_errs.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
}
query += where
if searchQuery.Columns != repository.ColumnsMaxSequence {
query += criteria.orderByEventSequence(searchQuery.Desc)
}
if searchQuery.Limit > 0 {
values = append(values, searchQuery.Limit)
query += " LIMIT ?"
}
query = criteria.placeholder(query)
rows, err := criteria.db().QueryContext(ctx, query, values...)
if err != nil {
logging.Log("SQL-HP3Uk").WithError(err).Info("query failed")
return caos_errs.ThrowInternal(err, "SQL-IJuyR", "unable to filter events")
}
defer rows.Close()
for rows.Next() {
err = rowScanner(rows.Scan, dest)
if err != nil {
return err
}
}
return nil
}
func prepareColumns(criteria querier, columns repository.Columns) (string, func(s scan, dest interface{}) error) {
switch columns {
case repository.ColumnsMaxSequence:
return criteria.maxSequenceQuery(), maxSequenceScanner
case repository.ColumnsEvent:
return criteria.eventQuery(), eventsScanner
default:
return "", nil
}
}
func maxSequenceScanner(row scan, dest interface{}) (err error) {
sequence, ok := dest.(*Sequence)
if !ok {
return z_errors.ThrowInvalidArgument(nil, "SQL-NBjA9", "type must be sequence")
}
err = row(sequence)
if err == nil || errors.Is(err, sql.ErrNoRows) {
return nil
}
return z_errors.ThrowInternal(err, "SQL-bN5xg", "something went wrong")
}
func eventsScanner(scanner scan, dest interface{}) (err error) {
events, ok := dest.(*[]*repository.Event)
if !ok {
return z_errors.ThrowInvalidArgument(nil, "SQL-4GP6F", "type must be event")
}
var previousSequence Sequence
data := make(Data, 0)
event := new(repository.Event)
err = scanner(
&event.CreationDate,
&event.Type,
&event.Sequence,
&previousSequence,
&data,
&event.EditorService,
&event.EditorUser,
&event.ResourceOwner,
&event.AggregateType,
&event.AggregateID,
&event.Version,
)
if err != nil {
logging.Log("SQL-3mofs").WithError(err).Warn("unable to scan row")
return z_errors.ThrowInternal(err, "SQL-M0dsf", "unable to scan row")
}
event.PreviousSequence = uint64(previousSequence)
event.Data = make([]byte, len(data))
copy(event.Data, data)
*events = append(*events, event)
return nil
}
func prepareCondition(criteria querier, filters []*repository.Filter) (clause string, values []interface{}) {
values = make([]interface{}, len(filters))
clauses := make([]string, len(filters))
if len(filters) == 0 {
return clause, values
}
for i, filter := range filters {
value := filter.Value
switch value.(type) {
case []bool, []float64, []int64, []string, []repository.AggregateType, []repository.EventType, *[]bool, *[]float64, *[]int64, *[]string, *[]repository.AggregateType, *[]repository.EventType:
value = pq.Array(value)
case map[string]interface{}:
var err error
value, err = json.Marshal(value)
logging.Log("SQL-BSsNy").OnError(err).Warn("unable to marshal search value")
}
clauses[i] = getCondition(criteria, filter)
if clauses[i] == "" {
return "", nil
}
values[i] = value
}
return " WHERE " + strings.Join(clauses, " AND "), values
}
func getCondition(cond querier, filter *repository.Filter) (condition string) {
field := cond.columnName(filter.Field)
operation := cond.operation(filter.Operation)
if field == "" || operation == "" {
return ""
}
format := cond.conditionFormat(filter.Operation)
return fmt.Sprintf(format, field, operation)
}

View File

@@ -0,0 +1,770 @@
package sql
import (
"context"
"database/sql"
"database/sql/driver"
"reflect"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/eventstore/repository"
"github.com/lib/pq"
)
func Test_getCondition(t *testing.T) {
type args struct {
filter *repository.Filter
}
tests := []struct {
name string
args args
want string
}{
{
name: "equals",
args: args{filter: repository.NewFilter(repository.FieldAggregateID, "", repository.OperationEquals)},
want: "aggregate_id = ?",
},
{
name: "greater",
args: args{filter: repository.NewFilter(repository.FieldSequence, 0, repository.OperationGreater)},
want: "event_sequence > ?",
},
{
name: "less",
args: args{filter: repository.NewFilter(repository.FieldSequence, 5000, repository.OperationLess)},
want: "event_sequence < ?",
},
{
name: "in list",
args: args{filter: repository.NewFilter(repository.FieldAggregateType, []repository.AggregateType{"movies", "actors"}, repository.OperationIn)},
want: "aggregate_type = ANY(?)",
},
{
name: "invalid operation",
args: args{filter: repository.NewFilter(repository.FieldAggregateType, []repository.AggregateType{"movies", "actors"}, repository.Operation(-1))},
want: "",
},
{
name: "invalid field",
args: args{filter: repository.NewFilter(repository.Field(-1), []repository.AggregateType{"movies", "actors"}, repository.OperationEquals)},
want: "",
},
{
name: "invalid field and operation",
args: args{filter: repository.NewFilter(repository.Field(-1), []repository.AggregateType{"movies", "actors"}, repository.Operation(-1))},
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{}
if got := getCondition(db, tt.args.filter); got != tt.want {
t.Errorf("getCondition() = %v, want %v", got, tt.want)
}
})
}
}
func Test_prepareColumns(t *testing.T) {
type fields struct {
dbRow []interface{}
}
type args struct {
columns repository.Columns
dest interface{}
dbErr error
}
type res struct {
query string
expected interface{}
dbErr func(error) bool
}
tests := []struct {
name string
args args
res res
fields fields
}{
{
name: "invalid columns",
args: args{columns: repository.Columns(-1)},
res: res{
query: "",
dbErr: func(err error) bool { return err == nil },
},
},
{
name: "max column",
args: args{
columns: repository.ColumnsMaxSequence,
dest: new(Sequence),
},
res: res{
query: "SELECT MAX(event_sequence) FROM eventstore.events",
expected: Sequence(5),
},
fields: fields{
dbRow: []interface{}{Sequence(5)},
},
},
{
name: "max sequence wrong dest type",
args: args{
columns: repository.ColumnsMaxSequence,
dest: new(uint64),
},
res: res{
query: "SELECT MAX(event_sequence) FROM eventstore.events",
dbErr: errors.IsErrorInvalidArgument,
},
},
{
name: "events",
args: args{
columns: repository.ColumnsEvent,
dest: &[]*repository.Event{},
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
expected: []*repository.Event{
{AggregateID: "hodor", AggregateType: "user", Sequence: 5, Data: make(Data, 0)},
},
},
fields: fields{
dbRow: []interface{}{time.Time{}, repository.EventType(""), uint64(5), Sequence(0), Data(nil), "", "", "", repository.AggregateType("user"), "hodor", repository.Version("")},
},
},
{
name: "events wrong dest type",
args: args{
columns: repository.ColumnsEvent,
dest: []*repository.Event{},
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
dbErr: errors.IsErrorInvalidArgument,
},
},
{
name: "event query error",
args: args{
columns: repository.ColumnsEvent,
dest: &[]*repository.Event{},
dbErr: sql.ErrConnDone,
},
res: res{
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
dbErr: errors.IsInternal,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
crdb := &CRDB{}
query, rowScanner := prepareColumns(crdb, tt.args.columns)
if query != tt.res.query {
t.Errorf("prepareColumns() got = %s, want %s", query, tt.res.query)
}
if tt.res.query == "" && rowScanner != nil {
t.Errorf("row scanner should be nil")
}
if rowScanner == nil {
return
}
err := rowScanner(prepareTestScan(tt.args.dbErr, tt.fields.dbRow), tt.args.dest)
if err != nil && tt.res.dbErr == nil || err != nil && !tt.res.dbErr(err) || err == nil && tt.res.dbErr != nil {
t.Errorf("wrong error type in rowScanner got: %v", err)
return
}
if tt.res.dbErr != nil && tt.res.dbErr(err) {
return
}
if !reflect.DeepEqual(reflect.Indirect(reflect.ValueOf(tt.args.dest)).Interface(), tt.res.expected) {
t.Errorf("unexpected result from rowScanner \nwant: %+v \ngot: %+v", tt.fields.dbRow, reflect.Indirect(reflect.ValueOf(tt.args.dest)).Interface())
}
})
}
}
func prepareTestScan(err error, res []interface{}) scan {
return func(dests ...interface{}) error {
if err != nil {
return err
}
if len(dests) != len(res) {
return errors.ThrowInvalidArgumentf(nil, "SQL-NML1q", "expected len %d got %d", len(res), len(dests))
}
for i, r := range res {
reflect.ValueOf(dests[i]).Elem().Set(reflect.ValueOf(r))
}
return nil
}
}
func Test_prepareCondition(t *testing.T) {
type args struct {
filters []*repository.Filter
}
type res struct {
clause string
values []interface{}
}
tests := []struct {
name string
args args
res res
}{
{
name: "nil filters",
args: args{
filters: nil,
},
res: res{
clause: "",
values: nil,
},
},
{
name: "empty filters",
args: args{
filters: []*repository.Filter{},
},
res: res{
clause: "",
values: nil,
},
},
{
name: "invalid condition",
args: args{
filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateID, "wrong", repository.Operation(-1)),
},
},
res: res{
clause: "",
values: nil,
},
},
{
name: "array as condition value",
args: args{
filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateType, []repository.AggregateType{"user", "org"}, repository.OperationIn),
},
},
res: res{
clause: " WHERE aggregate_type = ANY(?)",
values: []interface{}{pq.Array([]repository.AggregateType{"user", "org"})},
},
},
{
name: "multiple filters",
args: args{
filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateType, []repository.AggregateType{"user", "org"}, repository.OperationIn),
repository.NewFilter(repository.FieldAggregateID, "1234", repository.OperationEquals),
repository.NewFilter(repository.FieldEventType, []repository.EventType{"user.created", "org.created"}, repository.OperationIn),
},
},
res: res{
clause: " WHERE aggregate_type = ANY(?) AND aggregate_id = ? AND event_type = ANY(?)",
values: []interface{}{pq.Array([]repository.AggregateType{"user", "org"}), "1234", pq.Array([]repository.EventType{"user.created", "org.created"})},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
crdb := &CRDB{}
gotClause, gotValues := prepareCondition(crdb, tt.args.filters)
if gotClause != tt.res.clause {
t.Errorf("prepareCondition() gotClause = %v, want %v", gotClause, tt.res.clause)
}
if len(gotValues) != len(tt.res.values) {
t.Errorf("wrong length of gotten values got = %d, want %d", len(gotValues), len(tt.res.values))
return
}
for i, value := range gotValues {
if !reflect.DeepEqual(value, tt.res.values[i]) {
t.Errorf("prepareCondition() gotValues = %v, want %v", gotValues, tt.res.values)
}
}
})
}
}
func Test_query_events_with_crdb(t *testing.T) {
type args struct {
searchQuery *repository.SearchQuery
}
type fields struct {
existingEvents []*repository.Event
client *sql.DB
}
type res struct {
eventCount int
}
tests := []struct {
name string
fields fields
args args
res res
wantErr bool
}{
{
name: "aggregate type filter no events",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateType, "not found", repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "300"),
generateEvent(t, "300"),
generateEvent(t, "300"),
},
},
res: res{
eventCount: 0,
},
wantErr: false,
},
{
name: "aggregate type filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateType, t.Name(), repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "301"),
generateEvent(t, "302"),
generateEvent(t, "302"),
generateEvent(t, "303", func(e *repository.Event) { e.AggregateType = "not in list" }),
},
},
res: res{
eventCount: 3,
},
wantErr: false,
},
{
name: "aggregate type and id filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldAggregateType, t.Name(), repository.OperationEquals),
repository.NewFilter(repository.FieldAggregateID, "303", repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "303"),
generateEvent(t, "303"),
generateEvent(t, "303"),
generateEvent(t, "304", func(e *repository.Event) { e.AggregateType = "not in list" }),
generateEvent(t, "305"),
},
},
res: res{
eventCount: 3,
},
wantErr: false,
},
{
name: "resource owner filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldResourceOwner, "caos", repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "306", func(e *repository.Event) { e.ResourceOwner = "caos" }),
generateEvent(t, "307", func(e *repository.Event) { e.ResourceOwner = "caos" }),
generateEvent(t, "308", func(e *repository.Event) { e.ResourceOwner = "caos" }),
generateEvent(t, "309", func(e *repository.Event) { e.ResourceOwner = "orgID" }),
generateEvent(t, "309", func(e *repository.Event) { e.ResourceOwner = "orgID" }),
},
},
res: res{
eventCount: 3,
},
wantErr: false,
},
{
name: "editor service filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldEditorService, "MANAGEMENT-API", repository.OperationEquals),
repository.NewFilter(repository.FieldEditorService, "ADMIN-API", repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "307", func(e *repository.Event) { e.EditorService = "MANAGEMENT-API" }),
generateEvent(t, "307", func(e *repository.Event) { e.EditorService = "MANAGEMENT-API" }),
generateEvent(t, "308", func(e *repository.Event) { e.EditorService = "ADMIN-API" }),
generateEvent(t, "309", func(e *repository.Event) { e.EditorService = "AUTHAPI" }),
generateEvent(t, "309", func(e *repository.Event) { e.EditorService = "AUTHAPI" }),
},
},
res: res{
eventCount: 3,
},
wantErr: false,
},
{
name: "editor user filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldEditorUser, "adlerhurst", repository.OperationEquals),
repository.NewFilter(repository.FieldEditorUser, "nobody", repository.OperationEquals),
repository.NewFilter(repository.FieldEditorUser, "", repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "310", func(e *repository.Event) { e.EditorUser = "adlerhurst" }),
generateEvent(t, "310", func(e *repository.Event) { e.EditorUser = "adlerhurst" }),
generateEvent(t, "310", func(e *repository.Event) { e.EditorUser = "nobody" }),
generateEvent(t, "311", func(e *repository.Event) { e.EditorUser = "" }),
generateEvent(t, "311", func(e *repository.Event) { e.EditorUser = "" }),
generateEvent(t, "312", func(e *repository.Event) { e.EditorUser = "fforootd" }),
generateEvent(t, "312", func(e *repository.Event) { e.EditorUser = "fforootd" }),
},
},
res: res{
eventCount: 5,
},
wantErr: false,
},
{
name: "event type filter events found",
args: args{
searchQuery: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
repository.NewFilter(repository.FieldEventType, repository.EventType("user.created"), repository.OperationEquals),
repository.NewFilter(repository.FieldEventType, repository.EventType("user.updated"), repository.OperationEquals),
},
},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{
generateEvent(t, "311", func(e *repository.Event) { e.Type = "user.created" }),
generateEvent(t, "311", func(e *repository.Event) { e.Type = "user.updated" }),
generateEvent(t, "311", func(e *repository.Event) { e.Type = "user.deactivated" }),
generateEvent(t, "311", func(e *repository.Event) { e.Type = "user.locked" }),
generateEvent(t, "312", func(e *repository.Event) { e.Type = "user.created" }),
generateEvent(t, "312", func(e *repository.Event) { e.Type = "user.updated" }),
generateEvent(t, "312", func(e *repository.Event) { e.Type = "user.deactivated" }),
generateEvent(t, "312", func(e *repository.Event) { e.Type = "user.reactivated" }),
generateEvent(t, "313", func(e *repository.Event) { e.Type = "user.locked" }),
},
},
res: res{
eventCount: 7,
},
wantErr: false,
},
{
name: "fail because no filter",
args: args{
searchQuery: &repository.SearchQuery{},
},
fields: fields{
client: testCRDBClient,
existingEvents: []*repository.Event{},
},
res: res{
eventCount: 0,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := &CRDB{
client: tt.fields.client,
}
// setup initial data for query
if err := db.Push(context.Background(), tt.fields.existingEvents); err != nil {
t.Errorf("error in setup = %v", err)
return
}
events := []*repository.Event{}
if err := query(context.Background(), db, tt.args.searchQuery, &events); (err != nil) != tt.wantErr {
t.Errorf("CRDB.query() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_query_events_mocked(t *testing.T) {
type args struct {
query *repository.SearchQuery
dest interface{}
}
type res struct {
wantErr bool
}
type fields struct {
mock *dbMock
}
tests := []struct {
name string
args args
fields fields
res res
}{
{
name: "with order by desc",
args: args{
dest: &[]*repository.Event{},
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: true,
Filters: []*repository.Filter{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQuery(t,
`SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = \$1 ORDER BY event_sequence DESC`,
[]driver.Value{repository.AggregateType("user")},
),
},
res: res{
wantErr: false,
},
},
{
name: "with limit",
args: args{
dest: &[]*repository.Event{},
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: false,
Limit: 5,
Filters: []*repository.Filter{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQuery(t,
`SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = \$1 ORDER BY event_sequence LIMIT \$2`,
[]driver.Value{repository.AggregateType("user"), uint64(5)},
),
},
res: res{
wantErr: false,
},
},
{
name: "with limit and order by desc",
args: args{
dest: &[]*repository.Event{},
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: true,
Limit: 5,
Filters: []*repository.Filter{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQuery(t,
`SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = \$1 ORDER BY event_sequence DESC LIMIT \$2`,
[]driver.Value{repository.AggregateType("user"), uint64(5)},
),
},
res: res{
wantErr: false,
},
},
{
name: "error sql conn closed",
args: args{
dest: &[]*repository.Event{},
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: true,
Limit: 0,
Filters: []*repository.Filter{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQueryErr(t,
`SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = \$1 ORDER BY event_sequence DESC`,
[]driver.Value{repository.AggregateType("user")},
sql.ErrConnDone),
},
res: res{
wantErr: true,
},
},
{
name: "error unexpected dest",
args: args{
dest: nil,
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Desc: true,
Limit: 0,
Filters: []*repository.Filter{
{
Field: repository.FieldAggregateType,
Value: repository.AggregateType("user"),
Operation: repository.OperationEquals,
},
},
},
},
fields: fields{
mock: newMockClient(t).expectQuery(t,
`SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = \$1 ORDER BY event_sequence DESC`,
[]driver.Value{repository.AggregateType("user")},
&repository.Event{Sequence: 100}),
},
res: res{
wantErr: true,
},
},
{
name: "error no columns",
args: args{
query: &repository.SearchQuery{
Columns: repository.Columns(-1),
},
},
res: res{
wantErr: true,
},
},
{
name: "invalid condition",
args: args{
query: &repository.SearchQuery{
Columns: repository.ColumnsEvent,
Filters: []*repository.Filter{
{},
},
},
},
res: res{
wantErr: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
crdb := &CRDB{}
if tt.fields.mock != nil {
crdb.client = tt.fields.mock.client
}
err := query(context.Background(), crdb, tt.args.query, tt.args.dest)
if (err != nil) != tt.res.wantErr {
t.Errorf("query() error = %v, wantErr %v", err, tt.res.wantErr)
}
if tt.fields.mock == nil {
return
}
if err := tt.fields.mock.mock.ExpectationsWereMet(); err != nil {
t.Errorf("not all expectaions met: %v", err)
}
})
}
}
type dbMock struct {
mock sqlmock.Sqlmock
client *sql.DB
}
func (m *dbMock) expectQuery(t *testing.T, expectedQuery string, args []driver.Value, events ...*repository.Event) *dbMock {
query := m.mock.ExpectQuery(expectedQuery).WithArgs(args...)
rows := sqlmock.NewRows([]string{"event_sequence"})
for _, event := range events {
rows = rows.AddRow(event.Sequence)
}
query.WillReturnRows(rows).RowsWillBeClosed()
return m
}
func (m *dbMock) expectQueryErr(t *testing.T, expectedQuery string, args []driver.Value, err error) *dbMock {
m.mock.ExpectQuery(expectedQuery).WithArgs(args...).WillReturnError(err)
return m
}
func newMockClient(t *testing.T) *dbMock {
t.Helper()
db, mock, err := sqlmock.New()
if err != nil {
t.Errorf("unable to create mock client: %v", err)
t.FailNow()
return nil
}
return &dbMock{
mock: mock,
client: db,
}
}

View File

@@ -0,0 +1,49 @@
package sql
import (
"database/sql/driver"
)
// Data represents a byte array that may be null.
// Data implements the sql.Scanner interface
type Data []byte
// Scan implements the Scanner interface.
func (data *Data) Scan(value interface{}) error {
if value == nil {
*data = nil
return nil
}
*data = Data(value.([]byte))
return nil
}
// Value implements the driver Valuer interface.
func (data Data) Value() (driver.Value, error) {
if len(data) == 0 {
return nil, nil
}
return []byte(data), nil
}
// Sequence represents a number that may be null.
// Sequence implements the sql.Scanner interface
type Sequence uint64
// Scan implements the Scanner interface.
func (seq *Sequence) Scan(value interface{}) error {
if value == nil {
*seq = 0
return nil
}
*seq = Sequence(value.(int64))
return nil
}
// Value implements the driver Valuer interface.
func (seq Sequence) Value() (driver.Value, error) {
if seq == 0 {
return nil, nil
}
return int64(seq), nil
}

View File

@@ -0,0 +1,29 @@
package repository
//UniqueCheck represents all information about a unique attribute
type UniqueConstraint struct {
//UniqueField is the field which should be unique
UniqueField string
//UniqueType is the type of the unique field
UniqueType string
//Action defines if unique constraint should be added or removed
Action UniqueConstraintAction
//ErrorMessage is the message key which should be returned if constraint is violated
ErrorMessage string
}
type UniqueConstraintAction int32
const (
UniqueConstraintAdd UniqueConstraintAction = iota
UniqueConstraintRemoved
uniqueConstraintActionCount
)
func (f UniqueConstraintAction) Valid() bool {
return f >= 0 && f < uniqueConstraintActionCount
}

View File

@@ -0,0 +1,20 @@
package repository
import (
"regexp"
"github.com/caos/zitadel/internal/errors"
)
var versionRegexp = regexp.MustCompile(`^v[0-9]+(\.[0-9]+){0,2}$`)
//Version represents the semver of an aggregate
type Version string
//Validate checks if the v is semver
func (v Version) Validate() error {
if !versionRegexp.MatchString(string(v)) {
return errors.ThrowPreconditionFailed(nil, "MODEL-luDuS", "version is not semver")
}
return nil
}

View File

@@ -0,0 +1,39 @@
package repository
import "testing"
func TestVersion_Validate(t *testing.T) {
tests := []struct {
name string
v Version
wantErr bool
}{
{
"correct version",
"v1.23.23",
false,
},
{
"no v prefix",
"1.2.2",
true,
},
{
"letters in version",
"v1.as.3",
true,
},
{
"no version",
"",
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.v.Validate(); (err != nil) != tt.wantErr {
t.Errorf("Version.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}