mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 12:07:38 +00:00
fix(spooler): correct workers (#508)
* one concurrent task * disable spooler * fix: improve concurrency in spooler * fix: dont block lock * fix: break if lock failed * fix: check if handler is working * fix: worker id * fix: test * fix: use limit for spoolers configured in startup.yaml * fix test * fix: factory * fix(key): only reduce if not expired * fix(searchQueryFactory): check for string-slice in aggregateID * fix(migrations): combine migrations * fix: allow saving multiple objects in one request * fix(eventstore): logging * fix(eventstore): rethink insert i locks table * fix: ignore failed tests for the moment * fix: tuubel * fix: for tests in io * fix: ignore tests for io * fix: rename concurrent tasks to workers * fix: incomment tests and remove some tests * fix: refert changes for io * refactor(eventstore): combine types of sql in one file * refactor(eventstore): logs, TODO's, tests * fix(eventstore): sql package * test(eventstore): add tests for search query factory * chore: logs * fix(spooler): optimize lock query chore(migrations): rename locks.object_type to view_name chore(migrations): refactor migrations * test: incomment tests * fix: rename PrepareSaves to PrepareBulkSave * chore: go dependencies * fix(migrations): add id in events table * refactor(lock): less magic numbers Co-authored-by: Livio Amstutz <livio.a@gmail.com>
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,6 +21,7 @@ debug
|
||||
# IDE
|
||||
.idea
|
||||
.vscode
|
||||
.DS_STORE
|
||||
|
||||
# credential
|
||||
google-credentials
|
||||
|
@@ -42,7 +42,7 @@ AuthZ:
|
||||
Cert: $CR_AUTHZ_CERT
|
||||
Key: $CR_AUTHZ_KEY
|
||||
Spooler:
|
||||
ConcurrentTasks: 1
|
||||
ConcurrentWorkers: 1
|
||||
BulkLimit: 100
|
||||
FailureCountUntilSkip: 5
|
||||
|
||||
@@ -90,7 +90,7 @@ Auth:
|
||||
Cert: $CR_AUTH_CERT
|
||||
Key: $CR_AUTH_KEY
|
||||
Spooler:
|
||||
ConcurrentTasks: 4
|
||||
ConcurrentWorkers: 1
|
||||
BulkLimit: 100
|
||||
FailureCountUntilSkip: 5
|
||||
KeyConfig:
|
||||
@@ -134,7 +134,7 @@ Admin:
|
||||
Cert: $CR_ADMINAPI_CERT
|
||||
Key: $CR_ADMINAPI_KEY
|
||||
Spooler:
|
||||
ConcurrentTasks: 1
|
||||
ConcurrentWorkers: 1
|
||||
BulkLimit: 100
|
||||
FailureCountUntilSkip: 5
|
||||
|
||||
@@ -171,7 +171,7 @@ Mgmt:
|
||||
Cert: $CR_MANAGEMENT_CERT
|
||||
Key: $CR_MANAGEMENT_KEY
|
||||
Spooler:
|
||||
ConcurrentTasks: 4
|
||||
ConcurrentWorkers: 1
|
||||
BulkLimit: 100
|
||||
FailureCountUntilSkip: 5
|
||||
|
||||
@@ -275,9 +275,11 @@ Notification:
|
||||
Cert: $CR_NOTIFICATION_CERT
|
||||
Key: $CR_NOTIFICATION_KEY
|
||||
Spooler:
|
||||
ConcurrentTasks: 4
|
||||
ConcurrentWorkers: 1
|
||||
BulkLimit: 100
|
||||
FailureCountUntilSkip: 5
|
||||
Handlers:
|
||||
Notification:
|
||||
MinimumCycleDuration: 10s
|
||||
MinimumCycleDuration: 5s
|
||||
User:
|
||||
MinimumCycleDuration: 5s
|
29
go.mod
29
go.mod
@@ -3,7 +3,7 @@ module github.com/caos/zitadel
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.60.0 // indirect
|
||||
cloud.google.com/go v0.61.0 // indirect
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.2
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1
|
||||
@@ -11,18 +11,20 @@ require (
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible
|
||||
github.com/VictoriaMetrics/fastcache v1.5.7
|
||||
github.com/ajstarks/svgo v0.0.0-20200320125537-f189e35d30ca
|
||||
github.com/ajstarks/svgo v0.0.0-20200725142600-7a3c8b57fecb
|
||||
github.com/allegro/bigcache v1.2.1
|
||||
github.com/aws/aws-sdk-go v1.33.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.33.12 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc
|
||||
github.com/caos/logging v0.0.2
|
||||
github.com/caos/oidc v0.6.4
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.0
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.4
|
||||
github.com/envoyproxy/protoc-gen-validate v0.4.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/mock v1.4.3
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.5.1 // indirect
|
||||
github.com/gorilla/csrf v1.7.0
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/schema v1.1.0
|
||||
@@ -30,14 +32,14 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.14.6
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/imdario/mergo v0.3.10 // indirect
|
||||
github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1 // indirect
|
||||
github.com/jinzhu/gorm v1.9.14
|
||||
github.com/jinzhu/gorm v1.9.15
|
||||
github.com/kevinburke/go-types v0.0.0-20200309064045-f2d4aea18a7a // indirect
|
||||
github.com/kevinburke/go.uuid v1.2.0 // indirect
|
||||
github.com/kevinburke/rest v0.0.0-20200429221318-0d2892b400f8 // indirect
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200630070730-ac4e42b98b94
|
||||
github.com/lib/pq v1.7.0
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200713162607-ff84c3703a29
|
||||
github.com/lib/pq v1.7.1
|
||||
github.com/mattn/go-colorable v0.1.7 // indirect
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
|
||||
github.com/mitchellh/copystructure v1.0.0 // indirect
|
||||
@@ -51,15 +53,12 @@ require (
|
||||
github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2 // indirect
|
||||
github.com/ttacon/libphonenumber v1.1.0
|
||||
go.opencensus.io v0.22.4
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/sys v0.0.0-20200727154430-2d971f7391a4 // indirect
|
||||
golang.org/x/text v0.3.3
|
||||
golang.org/x/tools v0.0.0-20200702044944-0cc1aa72b347
|
||||
google.golang.org/genproto v0.0.0-20200702021140-07506425bd67
|
||||
golang.org/x/tools v0.0.0-20200725200936-102e7d357031
|
||||
google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d
|
||||
google.golang.org/grpc v1.30.0
|
||||
google.golang.org/protobuf v1.25.0
|
||||
gopkg.in/square/go-jose.v2 v2.5.1
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
)
|
||||
|
75
go.sum
75
go.sum
@@ -14,13 +14,14 @@ cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.60.0 h1:R+tDlceO7Ss+zyvtsdhTxacDyZ1k99xwskQ4FT7ruoM=
|
||||
cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
|
||||
cloud.google.com/go v0.61.0 h1:NLQf5e1OMspfNT1RAHOB3ublr1TW3YTXO8OiWwVjK2U=
|
||||
cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -31,6 +32,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.2 h1:5lKLBwUuq4S6pTbYaBtWmnay3eJfKNS3qL8M8HM5fM4=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.2/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
@@ -53,8 +55,8 @@ github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN
|
||||
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
|
||||
github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw=
|
||||
github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8=
|
||||
github.com/ajstarks/svgo v0.0.0-20200320125537-f189e35d30ca h1:kWzLcty5V2rzOqJM7Tp/MfSX0RMSI1x4IOLApEefYxA=
|
||||
github.com/ajstarks/svgo v0.0.0-20200320125537-f189e35d30ca/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
github.com/ajstarks/svgo v0.0.0-20200725142600-7a3c8b57fecb h1:EVl3FJLQCzSbgBezKo/1A4ADnJ4mtJZ0RvnNzDJ44nY=
|
||||
github.com/ajstarks/svgo v0.0.0-20200725142600-7a3c8b57fecb/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
|
||||
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
@@ -62,8 +64,8 @@ github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9Pq
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.33.1 h1:yz9XmNzPshz/lhfAZvLfMnIS9HPo8+boGRcWqDVX+T0=
|
||||
github.com/aws/aws-sdk-go v1.33.1/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4=
|
||||
github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/caos/logging v0.0.0-20191210002624-b3260f690a6a/go.mod h1:9LKiDE2ChuGv6CHYif/kiugrfEXu9AwDiFWSreX7Wp0=
|
||||
@@ -74,6 +76,8 @@ github.com/caos/oidc v0.6.4/go.mod h1:f3bYdAHhN9WS3VxYgy5c9wgsiJ3qNrek1r7ktgHriD
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
@@ -82,8 +86,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.0 h1:34FdeWSTsOANpqIGuvkEYF7mNjZnIbMrv1rHQcFefto=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.0/go.mod h1:oReO3/FZVS/8KidC56m7CDiUZbfL278nwLq7gkuhxsc=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.4 h1:XTQuJ7XHmqzLKjczTYWpiY1eLLKSNFvynAmMiCTHhxc=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.4/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
@@ -110,6 +114,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
@@ -164,16 +169,19 @@ github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -202,8 +210,8 @@ github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
|
||||
github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1 h1:KUDFlmBg2buRWNzIcwLlKvfcnujcHQRQ1As1LoaCLAM=
|
||||
github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
@@ -236,8 +244,8 @@ github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0f
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||
github.com/jinzhu/gorm v1.9.14 h1:Kg3ShyTPcM6nzVo148fRrcMO6MNKuqtOUwnzqMgVniM=
|
||||
github.com/jinzhu/gorm v1.9.14/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs=
|
||||
github.com/jinzhu/gorm v1.9.15 h1:OdR1qFvtXktlxk73XFYMiYn9ywzTwytqe4QkuMRqc38=
|
||||
github.com/jinzhu/gorm v1.9.15/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
|
||||
@@ -246,6 +254,7 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kevinburke/go-types v0.0.0-20200309064045-f2d4aea18a7a h1:Z7+SSApKiwPjNic+NF9+j7h657Uyvdp/jA3iTKhpj4E=
|
||||
@@ -254,8 +263,8 @@ github.com/kevinburke/go.uuid v1.2.0 h1:+1qP8NdkJfgOSTrrrUuA7h0djr1VY77HFXYjR+zU
|
||||
github.com/kevinburke/go.uuid v1.2.0/go.mod h1:9gVngk1Hq1FjwewVAjsWEUT+xc6jP+p62CASaGmQ0NQ=
|
||||
github.com/kevinburke/rest v0.0.0-20200429221318-0d2892b400f8 h1:KpuDJTaTPQAyWqETt70dHX3pMz65/XYTAZymrKKNvh8=
|
||||
github.com/kevinburke/rest v0.0.0-20200429221318-0d2892b400f8/go.mod h1:pD+iEcdAGVXld5foVN4e24zb/6fnb60tgZPZ3P/3T/I=
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200630070730-ac4e42b98b94 h1:tPJDYIpi61JR+s7/dZBVRd0d4e0L9LuyxjXvvm6L2YM=
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200630070730-ac4e42b98b94/go.mod h1:Fm9alkN1/LPVY1eqD/psyMwPWE4VWl4P01/nTYZKzBk=
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200713162607-ff84c3703a29 h1:Wa+T+SIfcHXoRhZV6rv/COB5Sr9fKoQdfd/jlMuYIWI=
|
||||
github.com/kevinburke/twilio-go v0.0.0-20200713162607-ff84c3703a29/go.mod h1:Fm9alkN1/LPVY1eqD/psyMwPWE4VWl4P01/nTYZKzBk=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -275,8 +284,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
||||
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.7.1 h1:FvD5XTVTDt+KON6oIoOmHq6B6HzGuYEhuTMpEG0yuBQ=
|
||||
github.com/lib/pq v1.7.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lyft/protoc-gen-star v0.4.10/go.mod h1:mE8fbna26u7aEA2QCVvvfBU/ZrPgocG1206xAFPcs94=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
@@ -288,6 +297,7 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
@@ -348,6 +358,7 @@ github.com/ttacon/libphonenumber v1.1.0 h1:tC6kE4t8UI4OqQVQjW5q8gSWhG2wnY5moEpSE
|
||||
github.com/ttacon/libphonenumber v1.1.0/go.mod h1:E0TpmdVMq5dyVlQ7oenAkhsLu86OkUl+yR4OAxyEg/M=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
@@ -377,6 +388,8 @@ golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -422,6 +435,7 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -438,9 +452,12 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -493,10 +510,11 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ix
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200727154430-2d971f7391a4 h1:gtF+PUC1CD1a9ocwQHbVNXuTp6RQsAYt6tpi6zjT81Y=
|
||||
golang.org/x/sys v0.0.0-20200727154430-2d971f7391a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -549,10 +567,12 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5X
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200702044944-0cc1aa72b347 h1:/e4fNMHdLn7SQSxTrRZTma2xjQW6ELdxcnpqMhpo9X4=
|
||||
golang.org/x/tools v0.0.0-20200702044944-0cc1aa72b347/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200725200936-102e7d357031 h1:VtIxiVHWPhnny2ZTi4f9/2diZKqyLaq3FUTuud5+khA=
|
||||
golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -578,6 +598,8 @@ google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -612,11 +634,13 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200702021140-07506425bd67 h1:4BC1C1i30F3MZeiIO6y6IIo4DxrtOwITK87bQl3lhFA=
|
||||
google.golang.org/genproto v0.0.0-20200702021140-07506425bd67/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d h1:HJaAqDnKreMkv+AQyf1Mcw0jEmL9kKBNL07RDJu1N/k=
|
||||
google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -669,6 +693,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/caos/zitadel/internal/admin/repository/eventsourcing/view"
|
||||
"github.com/caos/zitadel/internal/config/types"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
)
|
||||
|
||||
@@ -26,8 +26,8 @@ type EventstoreRepos struct {
|
||||
UserEvents *usr_event.UserEventstore
|
||||
}
|
||||
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, repos EventstoreRepos) []spooler.Handler {
|
||||
return []spooler.Handler{
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, repos EventstoreRepos) []query.Handler {
|
||||
return []query.Handler{
|
||||
&Org{handler: handler{view, bulkLimit, configs.cycleDuration("Org"), errorCount}},
|
||||
&IamMember{handler: handler{view, bulkLimit, configs.cycleDuration("IamMember"), errorCount}, userEvents: repos.UserEvents},
|
||||
}
|
||||
@@ -40,3 +40,11 @@ func (configs Configs) cycleDuration(viewModel string) time.Duration {
|
||||
}
|
||||
return c.MinimumCycleDuration.Duration
|
||||
}
|
||||
|
||||
func (h *handler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
|
||||
func (h *handler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
@@ -2,14 +2,14 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/caos/zitadel/internal/iam/repository/eventsourcing/model"
|
||||
iam_model "github.com/caos/zitadel/internal/iam/repository/view/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/iam/repository/eventsourcing/model"
|
||||
iam_model "github.com/caos/zitadel/internal/iam/repository/view/model"
|
||||
usr_model "github.com/caos/zitadel/internal/user/model"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
usr_es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
@@ -24,8 +24,6 @@ const (
|
||||
iamMemberTable = "adminapi.iam_members"
|
||||
)
|
||||
|
||||
func (m *IamMember) MinimumCycleDuration() time.Duration { return m.cycleDuration }
|
||||
|
||||
func (m *IamMember) ViewModel() string {
|
||||
return iamMemberTable
|
||||
}
|
||||
|
@@ -1,14 +1,13 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
)
|
||||
|
||||
type Org struct {
|
||||
@@ -19,8 +18,6 @@ const (
|
||||
orgTable = "adminapi.orgs"
|
||||
)
|
||||
|
||||
func (o *Org) MinimumCycleDuration() time.Duration { return o.cycleDuration }
|
||||
|
||||
func (o *Org) ViewModel() string {
|
||||
return orgTable
|
||||
}
|
||||
|
@@ -1,127 +0,0 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
}
|
||||
|
||||
func mockDB(t *testing.T) *dbMock {
|
||||
mockDB := dbMock{}
|
||||
var err error
|
||||
mockDB.db, mockDB.mock, err = sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error occured while creating stub db %v", err)
|
||||
}
|
||||
|
||||
mockDB.mock.MatchExpectationsInOrder(true)
|
||||
|
||||
return &mockDB
|
||||
}
|
||||
|
||||
func (db *dbMock) expectCommit() *dbMock {
|
||||
db.mock.ExpectCommit()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRollback() *dbMock {
|
||||
db.mock.ExpectRollback()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectBegin() *dbMock {
|
||||
db.mock.ExpectBegin()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("RELEASE SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO adminapi\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
query.WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
} else {
|
||||
query.WillReturnResult(sqlmock.NewResult(1, affectedRows))
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_locker_Renew(t *testing.T) {
|
||||
type fields struct {
|
||||
db *dbMock
|
||||
}
|
||||
type args struct {
|
||||
lockerID string
|
||||
viewModel string
|
||||
waitTime time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "renew succeeded",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 1).
|
||||
expectReleaseSavepoint().
|
||||
expectCommit(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "renew now rows updated",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 0).
|
||||
expectRollback(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &locker{
|
||||
dbClient: tt.fields.db.db,
|
||||
}
|
||||
if err := l.Renew(tt.args.lockerID, tt.args.viewModel, tt.args.waitTime); (err != nil) != tt.wantErr {
|
||||
t.Errorf("locker.Renew() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err := tt.fields.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("not all database expectations met: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -12,16 +12,16 @@ import (
|
||||
type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentTasks int
|
||||
ConcurrentWorkers int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, sql *sql.DB, repos handler.EventstoreRepos) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentTasks: c.ConcurrentTasks,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, repos),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, repos),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@@ -2,13 +2,13 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
proj_event "github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
es_model "github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/project/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
@@ -20,8 +20,6 @@ const (
|
||||
applicationTable = "auth.applications"
|
||||
)
|
||||
|
||||
func (p *Application) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *Application) ViewModel() string {
|
||||
return applicationTable
|
||||
}
|
||||
|
@@ -1,16 +1,17 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
sd "github.com/caos/zitadel/internal/config/systemdefaults"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
iam_events "github.com/caos/zitadel/internal/iam/repository/eventsourcing"
|
||||
org_events "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
proj_event "github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/caos/zitadel/internal/config/types"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
)
|
||||
|
||||
@@ -34,8 +35,8 @@ type EventstoreRepos struct {
|
||||
IamEvents *iam_events.IamEventstore
|
||||
}
|
||||
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults) []spooler.Handler {
|
||||
return []spooler.Handler{
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults) []query.Handler {
|
||||
return []query.Handler{
|
||||
&User{handler: handler{view, bulkLimit, configs.cycleDuration("User"), errorCount}, orgEvents: repos.OrgEvents},
|
||||
&UserSession{handler: handler{view, bulkLimit, configs.cycleDuration("UserSession"), errorCount}, userEvents: repos.UserEvents},
|
||||
&Token{handler: handler{view, bulkLimit, configs.cycleDuration("Token"), errorCount}, ProjectEvents: repos.ProjectEvents},
|
||||
@@ -60,3 +61,11 @@ func (configs Configs) cycleDuration(viewModel string) time.Duration {
|
||||
}
|
||||
return c.MinimumCycleDuration.Duration
|
||||
}
|
||||
|
||||
func (h *handler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
|
||||
func (h *handler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
@@ -21,8 +21,6 @@ const (
|
||||
keyTable = "auth.keys"
|
||||
)
|
||||
|
||||
func (k *Key) MinimumCycleDuration() time.Duration { return k.cycleDuration }
|
||||
|
||||
func (k *Key) ViewModel() string {
|
||||
return keyTable
|
||||
}
|
||||
@@ -42,11 +40,13 @@ func (k *Key) Reduce(event *models.Event) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if privateKey.Expiry.Before(time.Now()) && publicKey.Expiry.Before(time.Now()) {
|
||||
return k.view.ProcessedKeySequence(event.Sequence)
|
||||
}
|
||||
return k.view.PutKeys(privateKey, publicKey, event.Sequence)
|
||||
default:
|
||||
return k.view.ProcessedKeySequence(event.Sequence)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Key) OnError(event *models.Event, err error) error {
|
||||
|
@@ -2,12 +2,12 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Org struct {
|
||||
@@ -18,8 +18,6 @@ const (
|
||||
orgTable = "auth.orgs"
|
||||
)
|
||||
|
||||
func (o *Org) MinimumCycleDuration() time.Duration { return o.cycleDuration }
|
||||
|
||||
func (o *Org) ViewModel() string {
|
||||
return orgTable
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
@@ -25,8 +24,6 @@ const (
|
||||
tokenTable = "auth.tokens"
|
||||
)
|
||||
|
||||
func (u *Token) MinimumCycleDuration() time.Duration { return u.cycleDuration }
|
||||
|
||||
func (u *Token) ViewModel() string {
|
||||
return tokenTable
|
||||
}
|
||||
|
@@ -2,7 +2,6 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
@@ -28,8 +27,6 @@ const (
|
||||
userTable = "auth.users"
|
||||
)
|
||||
|
||||
func (p *User) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *User) ViewModel() string {
|
||||
return userTable
|
||||
}
|
||||
|
@@ -3,9 +3,9 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
@@ -42,8 +42,6 @@ const (
|
||||
userGrantTable = "auth.user_grants"
|
||||
)
|
||||
|
||||
func (u *UserGrant) MinimumCycleDuration() time.Duration { return u.cycleDuration }
|
||||
|
||||
func (u *UserGrant) ViewModel() string {
|
||||
return userGrantTable
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
req_model "github.com/caos/zitadel/internal/auth_request/model"
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
@@ -25,8 +23,6 @@ const (
|
||||
userSessionTable = "auth.user_sessions"
|
||||
)
|
||||
|
||||
func (u *UserSession) MinimumCycleDuration() time.Duration { return u.cycleDuration }
|
||||
|
||||
func (u *UserSession) ViewModel() string {
|
||||
return userSessionTable
|
||||
}
|
||||
|
@@ -1,127 +0,0 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
}
|
||||
|
||||
func mockDB(t *testing.T) *dbMock {
|
||||
mockDB := dbMock{}
|
||||
var err error
|
||||
mockDB.db, mockDB.mock, err = sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error occured while creating stub db %v", err)
|
||||
}
|
||||
|
||||
mockDB.mock.MatchExpectationsInOrder(true)
|
||||
|
||||
return &mockDB
|
||||
}
|
||||
|
||||
func (db *dbMock) expectCommit() *dbMock {
|
||||
db.mock.ExpectCommit()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRollback() *dbMock {
|
||||
db.mock.ExpectRollback()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectBegin() *dbMock {
|
||||
db.mock.ExpectBegin()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("RELEASE SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO auth\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
query.WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
} else {
|
||||
query.WillReturnResult(sqlmock.NewResult(1, affectedRows))
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_locker_Renew(t *testing.T) {
|
||||
type fields struct {
|
||||
db *dbMock
|
||||
}
|
||||
type args struct {
|
||||
lockerID string
|
||||
viewModel string
|
||||
waitTime time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "renew succeeded",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 1).
|
||||
expectReleaseSavepoint().
|
||||
expectCommit(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "renew now rows updated",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 0).
|
||||
expectRollback(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &locker{
|
||||
dbClient: tt.fields.db.db,
|
||||
}
|
||||
if err := l.Renew(tt.args.lockerID, tt.args.viewModel, tt.args.waitTime); (err != nil) != tt.wantErr {
|
||||
t.Errorf("locker.Renew() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err := tt.fields.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("not all database expectations met: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -2,6 +2,7 @@ package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
sd "github.com/caos/zitadel/internal/config/systemdefaults"
|
||||
|
||||
"github.com/caos/zitadel/internal/auth/repository/eventsourcing/handler"
|
||||
@@ -14,16 +15,16 @@ import (
|
||||
type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentTasks int
|
||||
ConcurrentWorkers int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, sql *sql.DB, repos handler.EventstoreRepos, systemDefaults sd.SystemDefaults) *spooler.Spooler {
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, client *sql.DB, repos handler.EventstoreRepos, systemDefaults sd.SystemDefaults) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentTasks: c.ConcurrentTasks,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, repos, systemDefaults),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: client},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, repos, systemDefaults),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@@ -2,12 +2,12 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
es_model "github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/project/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
@@ -18,8 +18,6 @@ const (
|
||||
applicationTable = "authz.applications"
|
||||
)
|
||||
|
||||
func (p *Application) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *Application) ViewModel() string {
|
||||
return applicationTable
|
||||
}
|
||||
|
@@ -1,14 +1,15 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
sd "github.com/caos/zitadel/internal/config/systemdefaults"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
iam_events "github.com/caos/zitadel/internal/iam/repository/eventsourcing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/authz/repository/eventsourcing/view"
|
||||
"github.com/caos/zitadel/internal/config/types"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
)
|
||||
|
||||
type Configs map[string]*Config
|
||||
@@ -28,8 +29,8 @@ type EventstoreRepos struct {
|
||||
IamEvents *iam_events.IamEventstore
|
||||
}
|
||||
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults) []spooler.Handler {
|
||||
return []spooler.Handler{
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults) []query.Handler {
|
||||
return []query.Handler{
|
||||
&UserGrant{
|
||||
handler: handler{view, bulkLimit, configs.cycleDuration("UserGrant"), errorCount},
|
||||
eventstore: eventstore,
|
||||
@@ -48,3 +49,11 @@ func (configs Configs) cycleDuration(viewModel string) time.Duration {
|
||||
}
|
||||
return c.MinimumCycleDuration.Duration
|
||||
}
|
||||
|
||||
func (h *handler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
|
||||
func (h *handler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
@@ -2,12 +2,12 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Org struct {
|
||||
@@ -18,8 +18,6 @@ const (
|
||||
orgTable = "authz.orgs"
|
||||
)
|
||||
|
||||
func (o *Org) MinimumCycleDuration() time.Duration { return o.cycleDuration }
|
||||
|
||||
func (o *Org) ViewModel() string {
|
||||
return orgTable
|
||||
}
|
||||
|
@@ -3,9 +3,9 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
@@ -31,8 +31,6 @@ const (
|
||||
userGrantTable = "authz.user_grants"
|
||||
)
|
||||
|
||||
func (u *UserGrant) MinimumCycleDuration() time.Duration { return u.cycleDuration }
|
||||
|
||||
func (u *UserGrant) ViewModel() string {
|
||||
return userGrantTable
|
||||
}
|
||||
|
@@ -1,127 +0,0 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
}
|
||||
|
||||
func mockDB(t *testing.T) *dbMock {
|
||||
mockDB := dbMock{}
|
||||
var err error
|
||||
mockDB.db, mockDB.mock, err = sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error occured while creating stub db %v", err)
|
||||
}
|
||||
|
||||
mockDB.mock.MatchExpectationsInOrder(true)
|
||||
|
||||
return &mockDB
|
||||
}
|
||||
|
||||
func (db *dbMock) expectCommit() *dbMock {
|
||||
db.mock.ExpectCommit()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRollback() *dbMock {
|
||||
db.mock.ExpectRollback()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectBegin() *dbMock {
|
||||
db.mock.ExpectBegin()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("RELEASE SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO authz\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
query.WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
} else {
|
||||
query.WillReturnResult(sqlmock.NewResult(1, affectedRows))
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_locker_Renew(t *testing.T) {
|
||||
type fields struct {
|
||||
db *dbMock
|
||||
}
|
||||
type args struct {
|
||||
lockerID string
|
||||
viewModel string
|
||||
waitTime time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "renew succeeded",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 1).
|
||||
expectReleaseSavepoint().
|
||||
expectCommit(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "renew now rows updated",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 0).
|
||||
expectRollback(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &locker{
|
||||
dbClient: tt.fields.db.db,
|
||||
}
|
||||
if err := l.Renew(tt.args.lockerID, tt.args.viewModel, tt.args.waitTime); (err != nil) != tt.wantErr {
|
||||
t.Errorf("locker.Renew() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err := tt.fields.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("not all database expectations met: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -2,6 +2,7 @@ package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
sd "github.com/caos/zitadel/internal/config/systemdefaults"
|
||||
|
||||
"github.com/caos/zitadel/internal/authz/repository/eventsourcing/handler"
|
||||
@@ -14,16 +15,16 @@ import (
|
||||
type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentTasks int
|
||||
ConcurrentWorkers int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, sql *sql.DB, repos handler.EventstoreRepos, systemDefaults sd.SystemDefaults) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentTasks: c.ConcurrentTasks,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, repos, systemDefaults),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, repos, systemDefaults),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@@ -38,6 +38,7 @@ func (s *SQL) connectionString() string {
|
||||
"port=" + s.Port,
|
||||
"user=" + s.User,
|
||||
"dbname=" + s.Database,
|
||||
"application_name=zitadel",
|
||||
"sslmode=" + s.SSL.Mode,
|
||||
}
|
||||
if s.Password != "" {
|
||||
|
@@ -13,6 +13,7 @@ type Eventstore interface {
|
||||
Health(ctx context.Context) error
|
||||
PushAggregates(ctx context.Context, aggregates ...*models.Aggregate) error
|
||||
FilterEvents(ctx context.Context, searchQuery *models.SearchQuery) (events []*models.Event, err error)
|
||||
LatestSequence(ctx context.Context, searchQuery *models.SearchQueryFactory) (uint64, error)
|
||||
}
|
||||
|
||||
var _ Eventstore = (*eventstore)(nil)
|
||||
@@ -48,7 +49,14 @@ func (es *eventstore) FilterEvents(ctx context.Context, searchQuery *models.Sear
|
||||
if err := searchQuery.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return es.repo.Filter(ctx, searchQuery)
|
||||
return es.repo.Filter(ctx, models.FactoryFromSearchQuery(searchQuery))
|
||||
}
|
||||
|
||||
func (es *eventstore) LatestSequence(ctx context.Context, queryFactory *models.SearchQueryFactory) (uint64, error) {
|
||||
sequenceFactory := *queryFactory
|
||||
sequenceFactory = *(&sequenceFactory).Columns(models.Columns_Max_Sequence)
|
||||
sequenceFactory = *(&sequenceFactory).SequenceGreater(0)
|
||||
return es.repo.LatestSequence(ctx, &sequenceFactory)
|
||||
}
|
||||
|
||||
func (es *eventstore) Health(ctx context.Context) error {
|
||||
|
@@ -13,5 +13,7 @@ type Repository interface {
|
||||
// This call is transaction save. The transaction will be rolled back if one event fails
|
||||
PushAggregates(ctx context.Context, aggregates ...*models.Aggregate) error
|
||||
// Filter returns all events matching the given search query
|
||||
Filter(ctx context.Context, searchQuery *models.SearchQuery) (events []*models.Event, err error)
|
||||
Filter(ctx context.Context, searchQuery *models.SearchQueryFactory) (events []*models.Event, err error)
|
||||
//LatestSequence returns the latests sequence found by the the search query
|
||||
LatestSequence(ctx context.Context, queryFactory *models.SearchQueryFactory) (uint64, error)
|
||||
}
|
||||
|
@@ -2,26 +2,24 @@ package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
selectEscaped = `SELECT id, creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events`
|
||||
selectEscaped = `SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore\.events WHERE aggregate_type = \$1`
|
||||
)
|
||||
|
||||
var (
|
||||
eventColumns = []string{"id", "creation_date", "event_type", "event_sequence", "previous_sequence", "event_data", "editor_service", "editor_user", "resource_owner", "aggregate_type", "aggregate_id", "aggregate_version"}
|
||||
expectedFilterEventsLimitFormat = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence LIMIT \$1`).String()
|
||||
eventColumns = []string{"creation_date", "event_type", "event_sequence", "previous_sequence", "event_data", "editor_service", "editor_user", "resource_owner", "aggregate_type", "aggregate_id", "aggregate_version"}
|
||||
expectedFilterEventsLimitFormat = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence LIMIT \$2`).String()
|
||||
expectedFilterEventsDescFormat = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence DESC`).String()
|
||||
expectedFilterEventsAggregateIDLimit = regexp.MustCompile(selectEscaped + ` WHERE aggregate_id = \$1 ORDER BY event_sequence LIMIT \$2`).String()
|
||||
expectedFilterEventsAggregateIDTypeLimit = regexp.MustCompile(selectEscaped + ` WHERE aggregate_id = \$1 AND aggregate_type = ANY\(\$2\) ORDER BY event_sequence LIMIT \$3`).String()
|
||||
expectedFilterEventsAggregateIDLimit = regexp.MustCompile(selectEscaped + ` AND aggregate_id = \$2 ORDER BY event_sequence LIMIT \$3`).String()
|
||||
expectedFilterEventsAggregateIDTypeLimit = regexp.MustCompile(selectEscaped + ` AND aggregate_id = \$2 ORDER BY event_sequence LIMIT \$3`).String()
|
||||
expectedGetAllEvents = regexp.MustCompile(selectEscaped + ` ORDER BY event_sequence`).String()
|
||||
|
||||
expectedInsertStatement = regexp.MustCompile(`INSERT INTO eventstore\.events ` +
|
||||
@@ -29,7 +27,7 @@ var (
|
||||
`SELECT \$1, \$2, \$3, \$4, COALESCE\(\$5, now\(\)\), \$6, \$7, \$8, \$9, \$10 ` +
|
||||
`WHERE EXISTS \(` +
|
||||
`SELECT 1 FROM eventstore\.events WHERE aggregate_type = \$11 AND aggregate_id = \$12 HAVING MAX\(event_sequence\) = \$13 OR \(\$14::BIGINT IS NULL AND COUNT\(\*\) = 0\)\) ` +
|
||||
`RETURNING id, event_sequence, creation_date`).String()
|
||||
`RETURNING event_sequence, creation_date`).String()
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
@@ -98,15 +96,15 @@ func (db *dbMock) expectRollback(err error) *dbMock {
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectInsertEvent(e *models.Event, returnedID string, returnedSequence uint64) *dbMock {
|
||||
func (db *dbMock) expectInsertEvent(e *models.Event, returnedSequence uint64) *dbMock {
|
||||
db.mock.ExpectQuery(expectedInsertStatement).
|
||||
WithArgs(
|
||||
e.Type, e.AggregateType, e.AggregateID, e.AggregateVersion, sqlmock.AnyArg(), Data(e.Data), e.EditorUser, e.EditorService, e.ResourceOwner, Sequence(e.PreviousSequence),
|
||||
e.AggregateType, e.AggregateID, Sequence(e.PreviousSequence), Sequence(e.PreviousSequence),
|
||||
).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"id", "event_sequence", "creation_date"}).
|
||||
AddRow(returnedID, returnedSequence, time.Now().UTC()),
|
||||
sqlmock.NewRows([]string{"event_sequence", "creation_date"}).
|
||||
AddRow(returnedSequence, time.Now().UTC()),
|
||||
)
|
||||
|
||||
return db
|
||||
@@ -123,45 +121,45 @@ func (db *dbMock) expectInsertEventError(e *models.Event) *dbMock {
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectFilterEventsLimit(limit uint64, eventCount int) *dbMock {
|
||||
func (db *dbMock) expectFilterEventsLimit(aggregateType string, limit uint64, eventCount int) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := 0; i < eventCount; i++ {
|
||||
rows.AddRow(fmt.Sprint("event", i), time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsLimitFormat).
|
||||
WithArgs(limit).
|
||||
WithArgs(aggregateType, limit).
|
||||
WillReturnRows(rows)
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectFilterEventsDesc(eventCount int) *dbMock {
|
||||
func (db *dbMock) expectFilterEventsDesc(aggregateType string, eventCount int) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := eventCount; i > 0; i-- {
|
||||
rows.AddRow(fmt.Sprint("event", i), time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsDescFormat).
|
||||
WillReturnRows(rows)
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectFilterEventsAggregateIDLimit(aggregateID string, limit uint64) *dbMock {
|
||||
func (db *dbMock) expectFilterEventsAggregateIDLimit(aggregateType, aggregateID string, limit uint64) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := limit; i > 0; i-- {
|
||||
rows.AddRow(fmt.Sprint("event", i), time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsAggregateIDLimit).
|
||||
WithArgs(aggregateID, limit).
|
||||
WithArgs(aggregateType, aggregateID, limit).
|
||||
WillReturnRows(rows)
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectFilterEventsAggregateIDTypeLimit(aggregateID, aggregateType string, limit uint64) *dbMock {
|
||||
func (db *dbMock) expectFilterEventsAggregateIDTypeLimit(aggregateType, aggregateID string, limit uint64) *dbMock {
|
||||
rows := sqlmock.NewRows(eventColumns)
|
||||
for i := limit; i > 0; i-- {
|
||||
rows.AddRow(fmt.Sprint("event", i), time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
rows.AddRow(time.Now(), "eventType", Sequence(i+1), Sequence(i), nil, "svc", "hodor", "org", "aggType", "aggID", "v1.0.0")
|
||||
}
|
||||
db.mock.ExpectQuery(expectedFilterEventsAggregateIDTypeLimit).
|
||||
WithArgs(aggregateID, pq.Array([]string{aggregateType}), limit).
|
||||
WithArgs(aggregateType, aggregateID, limit).
|
||||
WillReturnRows(rows)
|
||||
return db
|
||||
}
|
||||
@@ -172,8 +170,24 @@ func (db *dbMock) expectFilterEventsError(returnedErr error) *dbMock {
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectPrepareInsert() *dbMock {
|
||||
db.mock.ExpectPrepare(expectedInsertStatement)
|
||||
func (db *dbMock) expectLatestSequenceFilter(aggregateType string, sequence Sequence) *dbMock {
|
||||
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events WHERE aggregate_type = \$1`).
|
||||
WithArgs(aggregateType).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"max_sequence"}).AddRow(sequence))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectLatestSequenceFilterError(aggregateType string, err error) *dbMock {
|
||||
db.mock.ExpectQuery(`SELECT MAX\(event_sequence\) FROM eventstore\.events WHERE aggregate_type = \$1`).
|
||||
WithArgs(aggregateType).WillReturnError(err)
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectPrepareInsert(err error) *dbMock {
|
||||
prepare := db.mock.ExpectPrepare(expectedInsertStatement)
|
||||
if err != nil {
|
||||
prepare.WillReturnError(err)
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
@@ -3,44 +3,26 @@ package sql
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/logging"
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
selectStmt = "SELECT" +
|
||||
" id" +
|
||||
", creation_date" +
|
||||
", event_type" +
|
||||
", event_sequence" +
|
||||
", previous_sequence" +
|
||||
", event_data" +
|
||||
", editor_service" +
|
||||
", editor_user" +
|
||||
", resource_owner" +
|
||||
", aggregate_type" +
|
||||
", aggregate_id" +
|
||||
", aggregate_version" +
|
||||
" FROM eventstore.events"
|
||||
)
|
||||
|
||||
type Querier interface {
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
}
|
||||
|
||||
func (db *SQL) Filter(ctx context.Context, searchQuery *es_models.SearchQuery) (events []*models.Event, err error) {
|
||||
func (db *SQL) Filter(ctx context.Context, searchQuery *es_models.SearchQueryFactory) (events []*models.Event, err error) {
|
||||
return filter(db.client, searchQuery)
|
||||
}
|
||||
|
||||
func filter(querier Querier, searchQuery *es_models.SearchQuery) (events []*es_models.Event, err error) {
|
||||
query, values := prepareQuery(searchQuery)
|
||||
func filter(querier Querier, searchQuery *es_models.SearchQueryFactory) (events []*es_models.Event, err error) {
|
||||
query, limit, values, rowScanner := buildQuery(searchQuery)
|
||||
if query == "" {
|
||||
return nil, errors.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
|
||||
}
|
||||
|
||||
rows, err := querier.Query(query, values...)
|
||||
if err != nil {
|
||||
@@ -49,136 +31,32 @@ func filter(querier Querier, searchQuery *es_models.SearchQuery) (events []*es_m
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
events = make([]*es_models.Event, 0, searchQuery.Limit)
|
||||
events = make([]*es_models.Event, 0, limit)
|
||||
|
||||
for rows.Next() {
|
||||
event := new(models.Event)
|
||||
var previousSequence Sequence
|
||||
data := make(Data, 0)
|
||||
|
||||
err = rows.Scan(
|
||||
&event.ID,
|
||||
&event.CreationDate,
|
||||
&event.Type,
|
||||
&event.Sequence,
|
||||
&previousSequence,
|
||||
&data,
|
||||
&event.EditorService,
|
||||
&event.EditorUser,
|
||||
&event.ResourceOwner,
|
||||
&event.AggregateType,
|
||||
&event.AggregateID,
|
||||
&event.AggregateVersion,
|
||||
)
|
||||
|
||||
err := rowScanner(rows.Scan, event)
|
||||
if err != nil {
|
||||
logging.Log("SQL-wHNPo").WithError(err).Warn("unable to scan row")
|
||||
return nil, errors.ThrowInternal(err, "SQL-BfZwF", "unable to scan row")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
event.PreviousSequence = uint64(previousSequence)
|
||||
|
||||
event.Data = make([]byte, len(data))
|
||||
copy(event.Data, data)
|
||||
|
||||
events = append(events, event)
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
func prepareQuery(searchQuery *es_models.SearchQuery) (query string, values []interface{}) {
|
||||
where, values := prepareWhere(searchQuery)
|
||||
query = selectStmt + where
|
||||
|
||||
query += " ORDER BY event_sequence"
|
||||
if searchQuery.Desc {
|
||||
query += " DESC"
|
||||
func (db *SQL) LatestSequence(ctx context.Context, queryFactory *es_models.SearchQueryFactory) (uint64, error) {
|
||||
query, _, values, rowScanner := buildQuery(queryFactory)
|
||||
if query == "" {
|
||||
return 0, errors.ThrowInvalidArgument(nil, "SQL-rWeBw", "invalid query factory")
|
||||
}
|
||||
|
||||
if searchQuery.Limit > 0 {
|
||||
values = append(values, searchQuery.Limit)
|
||||
query += " LIMIT ?"
|
||||
row := db.client.QueryRow(query, values...)
|
||||
sequence := new(Sequence)
|
||||
err := rowScanner(row.Scan, sequence)
|
||||
if err != nil {
|
||||
logging.Log("SQL-WsxTg").WithError(err).Info("query failed")
|
||||
return 0, errors.ThrowInternal(err, "SQL-Yczyx", "unable to filter latest sequence")
|
||||
}
|
||||
|
||||
query = numberPlaceholder(query, "?", "$")
|
||||
|
||||
return query, values
|
||||
}
|
||||
|
||||
func numberPlaceholder(query, old, new string) string {
|
||||
for i, hasChanged := 1, true; hasChanged; i++ {
|
||||
newQuery := strings.Replace(query, old, new+strconv.Itoa(i), 1)
|
||||
hasChanged = query != newQuery
|
||||
query = newQuery
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
func prepareWhere(searchQuery *es_models.SearchQuery) (clause string, values []interface{}) {
|
||||
values = make([]interface{}, len(searchQuery.Filters))
|
||||
clauses := make([]string, len(searchQuery.Filters))
|
||||
|
||||
if len(values) == 0 {
|
||||
return clause, values
|
||||
}
|
||||
|
||||
for i, filter := range searchQuery.Filters {
|
||||
value := filter.GetValue()
|
||||
switch value.(type) {
|
||||
case []bool, []float64, []int64, []string, []models.AggregateType, []models.EventType, *[]bool, *[]float64, *[]int64, *[]string, *[]models.AggregateType, *[]models.EventType:
|
||||
value = pq.Array(value)
|
||||
}
|
||||
|
||||
clauses[i] = getCondition(filter)
|
||||
values[i] = value
|
||||
}
|
||||
return " WHERE " + strings.Join(clauses, " AND "), values
|
||||
}
|
||||
|
||||
func getCondition(filter *es_models.Filter) string {
|
||||
field := getField(filter.GetField())
|
||||
operation := getOperation(filter.GetOperation())
|
||||
format := prepareConditionFormat(filter.GetOperation())
|
||||
|
||||
return fmt.Sprintf(format, field, operation)
|
||||
}
|
||||
|
||||
func prepareConditionFormat(operation es_models.Operation) string {
|
||||
if operation == es_models.Operation_In {
|
||||
return "%s %s ANY(?)"
|
||||
}
|
||||
return "%s %s ?"
|
||||
}
|
||||
|
||||
func getField(field es_models.Field) string {
|
||||
switch field {
|
||||
case es_models.Field_AggregateID:
|
||||
return "aggregate_id"
|
||||
case es_models.Field_AggregateType:
|
||||
return "aggregate_type"
|
||||
case es_models.Field_LatestSequence:
|
||||
return "event_sequence"
|
||||
case es_models.Field_ResourceOwner:
|
||||
return "resource_owner"
|
||||
case es_models.Field_EditorService:
|
||||
return "editor_service"
|
||||
case es_models.Field_EditorUser:
|
||||
return "editor_user"
|
||||
case es_models.Field_EventType:
|
||||
return "event_type"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getOperation(operation es_models.Operation) string {
|
||||
switch operation {
|
||||
case es_models.Operation_Equals, es_models.Operation_In:
|
||||
return "="
|
||||
case es_models.Operation_Greater:
|
||||
return ">"
|
||||
case es_models.Operation_Less:
|
||||
return "<"
|
||||
}
|
||||
return ""
|
||||
return uint64(*sequence), nil
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package sql
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
@@ -15,39 +16,46 @@ func TestSQL_Filter(t *testing.T) {
|
||||
}
|
||||
type args struct {
|
||||
events *mockEvents
|
||||
searchQuery *es_models.SearchQuery
|
||||
searchQuery *es_models.SearchQueryFactory
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
eventsLen int
|
||||
type res struct {
|
||||
wantErr bool
|
||||
isErrFunc func(error) bool
|
||||
eventsLen int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "only limit filter",
|
||||
fields: fields{
|
||||
client: mockDB(t).expectFilterEventsLimit(34, 3),
|
||||
client: mockDB(t).expectFilterEventsLimit("user", 34, 3),
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: es_models.NewSearchQuery().SetLimit(34),
|
||||
searchQuery: es_models.NewSearchQueryFactory("user").Limit(34),
|
||||
},
|
||||
res: res{
|
||||
eventsLen: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
eventsLen: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "only desc filter",
|
||||
fields: fields{
|
||||
client: mockDB(t).expectFilterEventsDesc(34),
|
||||
client: mockDB(t).expectFilterEventsDesc("user", 34),
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: es_models.NewSearchQuery().OrderDesc(),
|
||||
searchQuery: es_models.NewSearchQueryFactory("user").OrderDesc(),
|
||||
},
|
||||
res: res{
|
||||
eventsLen: 34,
|
||||
wantErr: false,
|
||||
},
|
||||
eventsLen: 34,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no events found",
|
||||
@@ -56,10 +64,12 @@ func TestSQL_Filter(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: &es_models.SearchQuery{},
|
||||
searchQuery: es_models.NewSearchQueryFactory("nonAggregate"),
|
||||
},
|
||||
res: res{
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsInternal,
|
||||
},
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsInternal,
|
||||
},
|
||||
{
|
||||
name: "filter fails because sql internal error",
|
||||
@@ -68,34 +78,40 @@ func TestSQL_Filter(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: &es_models.SearchQuery{},
|
||||
searchQuery: es_models.NewSearchQueryFactory("user"),
|
||||
},
|
||||
res: res{
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsInternal,
|
||||
},
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsInternal,
|
||||
},
|
||||
{
|
||||
name: "filter by aggregate id",
|
||||
fields: fields{
|
||||
client: mockDB(t).expectFilterEventsAggregateIDLimit("hop", 5),
|
||||
client: mockDB(t).expectFilterEventsAggregateIDLimit("user", "hop", 5),
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: es_models.NewSearchQuery().SetLimit(5).AggregateIDFilter("hop"),
|
||||
searchQuery: es_models.NewSearchQueryFactory("user").Limit(5).AggregateIDs("hop"),
|
||||
},
|
||||
res: res{
|
||||
wantErr: false,
|
||||
isErrFunc: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
isErrFunc: nil,
|
||||
},
|
||||
{
|
||||
name: "filter by aggregate id and aggregate type",
|
||||
fields: fields{
|
||||
client: mockDB(t).expectFilterEventsAggregateIDTypeLimit("hop", "user", 5),
|
||||
client: mockDB(t).expectFilterEventsAggregateIDTypeLimit("user", "hop", 5),
|
||||
},
|
||||
args: args{
|
||||
events: &mockEvents{t: t},
|
||||
searchQuery: es_models.NewSearchQuery().SetLimit(5).AggregateIDFilter("hop").AggregateTypeFilter("user"),
|
||||
searchQuery: es_models.NewSearchQueryFactory("user").Limit(5).AggregateIDs("hop"),
|
||||
},
|
||||
res: res{
|
||||
wantErr: false,
|
||||
isErrFunc: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
isErrFunc: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -104,13 +120,13 @@ func TestSQL_Filter(t *testing.T) {
|
||||
client: tt.fields.client.sqlClient,
|
||||
}
|
||||
events, err := sql.Filter(context.Background(), tt.args.searchQuery)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("SQL.Filter() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if (err != nil) != tt.res.wantErr {
|
||||
t.Errorf("SQL.Filter() error = %v, wantErr %v", err, tt.res.wantErr)
|
||||
}
|
||||
if tt.eventsLen != 0 && len(events) != tt.eventsLen {
|
||||
t.Errorf("events has wrong length got: %d want %d", len(events), tt.eventsLen)
|
||||
if tt.res.eventsLen != 0 && len(events) != tt.res.eventsLen {
|
||||
t.Errorf("events has wrong length got: %d want %d", len(events), tt.res.eventsLen)
|
||||
}
|
||||
if tt.wantErr && !tt.isErrFunc(err) {
|
||||
if tt.res.wantErr && !tt.res.isErrFunc(err) {
|
||||
t.Errorf("got wrong error %v", err)
|
||||
}
|
||||
if err := tt.fields.client.mock.ExpectationsWereMet(); err != nil {
|
||||
@@ -121,35 +137,97 @@ func TestSQL_Filter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getCondition(t *testing.T) {
|
||||
func TestSQL_LatestSequence(t *testing.T) {
|
||||
type fields struct {
|
||||
client *dbMock
|
||||
}
|
||||
type args struct {
|
||||
filter *es_models.Filter
|
||||
searchQuery *es_models.SearchQueryFactory
|
||||
}
|
||||
type res struct {
|
||||
wantErr bool
|
||||
isErrFunc func(error) bool
|
||||
sequence uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "single value",
|
||||
name: "invalid query factory",
|
||||
args: args{
|
||||
filter: es_models.NewFilter(es_models.Field_LatestSequence, 34, es_models.Operation_Greater),
|
||||
searchQuery: nil,
|
||||
},
|
||||
fields: fields{
|
||||
client: mockDB(t),
|
||||
},
|
||||
res: res{
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsErrorInvalidArgument,
|
||||
},
|
||||
want: "event_sequence > ?",
|
||||
},
|
||||
{
|
||||
name: "list value",
|
||||
name: "no events for aggregate",
|
||||
args: args{
|
||||
filter: es_models.NewFilter(es_models.Field_AggregateType, []string{"a", "b"}, es_models.Operation_In),
|
||||
searchQuery: es_models.NewSearchQueryFactory("idiot").Columns(es_models.Columns_Max_Sequence),
|
||||
},
|
||||
fields: fields{
|
||||
client: mockDB(t).expectLatestSequenceFilterError("idiot", sql.ErrNoRows),
|
||||
},
|
||||
res: res{
|
||||
wantErr: false,
|
||||
sequence: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sql query error",
|
||||
args: args{
|
||||
searchQuery: es_models.NewSearchQueryFactory("idiot").Columns(es_models.Columns_Max_Sequence),
|
||||
},
|
||||
fields: fields{
|
||||
client: mockDB(t).expectLatestSequenceFilterError("idiot", sql.ErrConnDone),
|
||||
},
|
||||
res: res{
|
||||
wantErr: true,
|
||||
isErrFunc: errors.IsInternal,
|
||||
sequence: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "events for aggregate found",
|
||||
args: args{
|
||||
searchQuery: es_models.NewSearchQueryFactory("user").Columns(es_models.Columns_Max_Sequence),
|
||||
},
|
||||
fields: fields{
|
||||
client: mockDB(t).expectLatestSequenceFilter("user", math.MaxUint64),
|
||||
},
|
||||
res: res{
|
||||
wantErr: false,
|
||||
sequence: math.MaxUint64,
|
||||
},
|
||||
want: "aggregate_type = ANY(?)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getCondition(tt.args.filter); got != tt.want {
|
||||
t.Errorf("getCondition() = %v, want %v", got, tt.want)
|
||||
sql := &SQL{
|
||||
client: tt.fields.client.sqlClient,
|
||||
}
|
||||
sequence, err := sql.LatestSequence(context.Background(), tt.args.searchQuery)
|
||||
if (err != nil) != tt.res.wantErr {
|
||||
t.Errorf("SQL.Filter() error = %v, wantErr %v", err, tt.res.wantErr)
|
||||
}
|
||||
if tt.res.sequence != sequence {
|
||||
t.Errorf("events has wrong length got: %d want %d", sequence, tt.res.sequence)
|
||||
}
|
||||
if tt.res.wantErr && !tt.res.isErrFunc(err) {
|
||||
t.Errorf("got wrong error %v", err)
|
||||
}
|
||||
if err := tt.fields.client.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("there were unfulfilled expectations: %s", err)
|
||||
}
|
||||
tt.fields.client.close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -17,7 +17,7 @@ const (
|
||||
"SELECT $1, $2, $3, $4, COALESCE($5, now()), $6, $7, $8, $9, $10 " +
|
||||
"WHERE EXISTS (" +
|
||||
"SELECT 1 FROM eventstore.events WHERE aggregate_type = $11 AND aggregate_id = $12 HAVING MAX(event_sequence) = $13 OR ($14::BIGINT IS NULL AND COUNT(*) = 0)) " +
|
||||
"RETURNING id, event_sequence, creation_date"
|
||||
"RETURNING event_sequence, creation_date"
|
||||
)
|
||||
|
||||
func (db *SQL) PushAggregates(ctx context.Context, aggregates ...*models.Aggregate) (err error) {
|
||||
@@ -55,7 +55,7 @@ func precondtion(tx *sql.Tx, aggregate *models.Aggregate) error {
|
||||
if aggregate.Precondition == nil {
|
||||
return nil
|
||||
}
|
||||
events, err := filter(tx, aggregate.Precondition.Query)
|
||||
events, err := filter(tx, models.FactoryFromSearchQuery(aggregate.Precondition.Query))
|
||||
if err != nil {
|
||||
return caos_errs.ThrowPreconditionFailed(err, "SQL-oBPxB", "filter failed")
|
||||
}
|
||||
@@ -69,12 +69,11 @@ func precondtion(tx *sql.Tx, aggregate *models.Aggregate) error {
|
||||
func insertEvents(stmt *sql.Stmt, previousSequence Sequence, events []*models.Event) error {
|
||||
for _, event := range events {
|
||||
err := stmt.QueryRow(event.Type, event.AggregateType, event.AggregateID, event.AggregateVersion, event.CreationDate, Data(event.Data), event.EditorUser, event.EditorService, event.ResourceOwner, previousSequence,
|
||||
event.AggregateType, event.AggregateID, previousSequence, previousSequence).Scan(&event.ID, &previousSequence, &event.CreationDate)
|
||||
event.AggregateType, event.AggregateID, previousSequence, previousSequence).Scan(&previousSequence, &event.CreationDate)
|
||||
|
||||
if err != nil {
|
||||
logging.LogWithFields("SQL-IP3js",
|
||||
"aggregate", event.AggregateType,
|
||||
"id", event.AggregateID,
|
||||
"previousSequence", previousSequence,
|
||||
"aggregateId", event.AggregateID,
|
||||
"aggregateType", event.AggregateType,
|
||||
|
@@ -3,11 +3,12 @@ package sql
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
z_errors "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
)
|
||||
|
||||
@@ -36,7 +37,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert().
|
||||
expectPrepareInsert(nil).
|
||||
expectReleaseSavepoint(nil).
|
||||
expectCommit(nil),
|
||||
},
|
||||
@@ -44,19 +45,33 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
shouldCheckEvents: false,
|
||||
isError: noErr,
|
||||
},
|
||||
{
|
||||
name: "prepare fails",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert(sql.ErrConnDone).
|
||||
expectReleaseSavepoint(nil).
|
||||
expectCommit(nil),
|
||||
},
|
||||
args: args{aggregates: []*models.Aggregate{}},
|
||||
shouldCheckEvents: false,
|
||||
isError: func(err error) bool { return errors.Is(err, sql.ErrConnDone) },
|
||||
},
|
||||
{
|
||||
name: "no aggregates release fails",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert().
|
||||
expectPrepareInsert(nil).
|
||||
expectReleaseSavepoint(sql.ErrConnDone).
|
||||
expectCommit(nil),
|
||||
},
|
||||
|
||||
args: args{aggregates: []*models.Aggregate{}},
|
||||
isError: errors.IsInternal,
|
||||
isError: z_errors.IsInternal,
|
||||
shouldCheckEvents: false,
|
||||
},
|
||||
{
|
||||
@@ -65,13 +80,13 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert().
|
||||
expectFilterEventsError(errors.CreateCaosError(nil, "SQL-IzJOf", "err")).
|
||||
expectPrepareInsert(nil).
|
||||
expectFilterEventsError(z_errors.CreateCaosError(nil, "SQL-IzJOf", "err")).
|
||||
expectRollback(nil),
|
||||
},
|
||||
|
||||
args: args{aggregates: []*models.Aggregate{aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(1), nil)}},
|
||||
isError: errors.IsPreconditionFailed,
|
||||
isError: z_errors.IsPreconditionFailed,
|
||||
shouldCheckEvents: false,
|
||||
},
|
||||
{
|
||||
@@ -80,7 +95,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert().
|
||||
expectPrepareInsert(nil).
|
||||
expectInsertEvent(&models.Event{
|
||||
AggregateID: "aggID",
|
||||
AggregateType: "aggType",
|
||||
@@ -90,8 +105,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
PreviousSequence: 34,
|
||||
Type: "eventTyp",
|
||||
AggregateVersion: "v0.0.1",
|
||||
},
|
||||
"asdfölk-234", 45).
|
||||
}, 45).
|
||||
expectInsertEvent(&models.Event{
|
||||
AggregateID: "aggID",
|
||||
AggregateType: "aggType",
|
||||
@@ -101,7 +115,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
PreviousSequence: 45,
|
||||
Type: "eventTyp",
|
||||
AggregateVersion: "v0.0.1",
|
||||
}, "asdfölk-233", 46).
|
||||
}, 46).
|
||||
expectReleaseSavepoint(nil).
|
||||
expectCommit(nil),
|
||||
},
|
||||
@@ -141,7 +155,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert().
|
||||
expectPrepareInsert(nil).
|
||||
expectInsertEvent(&models.Event{
|
||||
AggregateID: "aggID",
|
||||
AggregateType: "aggType",
|
||||
@@ -151,7 +165,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
PreviousSequence: 34,
|
||||
Type: "eventTyp",
|
||||
AggregateVersion: "v0.0.1",
|
||||
}, "asdfölk-233", 47).
|
||||
}, 47).
|
||||
expectInsertEvent(&models.Event{
|
||||
AggregateID: "aggID2",
|
||||
AggregateType: "aggType2",
|
||||
@@ -161,7 +175,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
PreviousSequence: 40,
|
||||
Type: "eventTyp",
|
||||
AggregateVersion: "v0.0.1",
|
||||
}, "asdfölk-233", 48).
|
||||
}, 48).
|
||||
expectReleaseSavepoint(nil).
|
||||
expectCommit(nil),
|
||||
},
|
||||
@@ -206,6 +220,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectSavepoint().
|
||||
expectPrepareInsert(nil).
|
||||
expectInsertEventError(&models.Event{
|
||||
AggregateID: "aggID",
|
||||
AggregateType: "aggType",
|
||||
@@ -248,7 +263,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
isError: errors.IsInternal,
|
||||
isError: z_errors.IsInternal,
|
||||
shouldCheckEvents: false,
|
||||
},
|
||||
{
|
||||
@@ -256,7 +271,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).
|
||||
expectPrepareInsert().
|
||||
expectPrepareInsert(nil).
|
||||
expectSavepoint().
|
||||
expectInsertEvent(&models.Event{
|
||||
AggregateID: "aggID",
|
||||
@@ -268,7 +283,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
Type: "eventTyp",
|
||||
Data: []byte("{}"),
|
||||
AggregateVersion: "v0.0.1",
|
||||
}, "asdfölk-233", 47).
|
||||
}, 47).
|
||||
expectReleaseSavepoint(sql.ErrConnDone).
|
||||
expectCommit(nil).
|
||||
expectRollback(nil),
|
||||
@@ -291,7 +306,7 @@ func TestSQL_PushAggregates(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
isError: errors.IsInternal,
|
||||
isError: z_errors.IsInternal,
|
||||
shouldCheckEvents: false,
|
||||
},
|
||||
}
|
||||
@@ -359,42 +374,42 @@ func Test_precondtion(t *testing.T) {
|
||||
name: "precondition fails",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).expectFilterEventsLimit(5, 0),
|
||||
expectBegin(nil).expectFilterEventsLimit("test", 5, 0),
|
||||
},
|
||||
args: args{
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5), validationFunc(errors.ThrowPreconditionFailed(nil, "SQL-LBIKm", "err"))),
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5).AggregateTypeFilter("test"), validationFunc(z_errors.ThrowPreconditionFailed(nil, "SQL-LBIKm", "err"))),
|
||||
},
|
||||
isErr: errors.IsPreconditionFailed,
|
||||
isErr: z_errors.IsPreconditionFailed,
|
||||
},
|
||||
{
|
||||
name: "precondition with filter error",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).expectFilterEventsError(errors.ThrowInternal(nil, "SQL-ac9EW", "err")),
|
||||
expectBegin(nil).expectFilterEventsError(z_errors.ThrowInternal(nil, "SQL-ac9EW", "err")),
|
||||
},
|
||||
args: args{
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5), validationFunc(errors.CreateCaosError(nil, "SQL-LBIKm", "err"))),
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5).AggregateTypeFilter("test"), validationFunc(z_errors.CreateCaosError(nil, "SQL-LBIKm", "err"))),
|
||||
},
|
||||
isErr: errors.IsPreconditionFailed,
|
||||
isErr: z_errors.IsPreconditionFailed,
|
||||
},
|
||||
{
|
||||
name: "precondition no events",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).expectFilterEventsLimit(5, 0),
|
||||
expectBegin(nil).expectFilterEventsLimit("test", 5, 0),
|
||||
},
|
||||
args: args{
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5), validationFunc(nil)),
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5).AggregateTypeFilter("test"), validationFunc(nil)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "precondition with events",
|
||||
fields: fields{
|
||||
client: mockDB(t).
|
||||
expectBegin(nil).expectFilterEventsLimit(5, 3),
|
||||
expectBegin(nil).expectFilterEventsLimit("test", 5, 3),
|
||||
},
|
||||
args: args{
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5), validationFunc(nil)),
|
||||
aggregate: aggregateWithPrecondition(&models.Aggregate{}, models.NewSearchQuery().SetLimit(5).AggregateTypeFilter("test"), validationFunc(nil)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
199
internal/eventstore/internal/repository/sql/query.go
Normal file
199
internal/eventstore/internal/repository/sql/query.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/logging"
|
||||
z_errors "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
selectStmt = "SELECT" +
|
||||
" creation_date" +
|
||||
", event_type" +
|
||||
", event_sequence" +
|
||||
", previous_sequence" +
|
||||
", event_data" +
|
||||
", editor_service" +
|
||||
", editor_user" +
|
||||
", resource_owner" +
|
||||
", aggregate_type" +
|
||||
", aggregate_id" +
|
||||
", aggregate_version" +
|
||||
" FROM eventstore.events"
|
||||
)
|
||||
|
||||
func buildQuery(queryFactory *models.SearchQueryFactory) (query string, limit uint64, values []interface{}, rowScanner func(s scan, dest interface{}) error) {
|
||||
searchQuery, err := queryFactory.Build()
|
||||
if err != nil {
|
||||
logging.Log("SQL-cshKu").WithError(err).Warn("search query factory invalid")
|
||||
return "", 0, nil, nil
|
||||
}
|
||||
query, rowScanner = prepareColumns(searchQuery.Columns)
|
||||
where, values := prepareCondition(searchQuery.Filters)
|
||||
if where == "" || query == "" {
|
||||
return "", 0, nil, nil
|
||||
}
|
||||
query += where
|
||||
|
||||
if searchQuery.Columns != models.Columns_Max_Sequence {
|
||||
query += " ORDER BY event_sequence"
|
||||
if searchQuery.Desc {
|
||||
query += " DESC"
|
||||
}
|
||||
}
|
||||
|
||||
if searchQuery.Limit > 0 {
|
||||
values = append(values, searchQuery.Limit)
|
||||
query += " LIMIT ?"
|
||||
}
|
||||
|
||||
query = numberPlaceholder(query, "?", "$")
|
||||
|
||||
return query, searchQuery.Limit, values, rowScanner
|
||||
}
|
||||
|
||||
func prepareCondition(filters []*models.Filter) (clause string, values []interface{}) {
|
||||
values = make([]interface{}, len(filters))
|
||||
clauses := make([]string, len(filters))
|
||||
|
||||
if len(filters) == 0 {
|
||||
return clause, values
|
||||
}
|
||||
for i, filter := range filters {
|
||||
value := filter.GetValue()
|
||||
switch value.(type) {
|
||||
case []bool, []float64, []int64, []string, []models.AggregateType, []models.EventType, *[]bool, *[]float64, *[]int64, *[]string, *[]models.AggregateType, *[]models.EventType:
|
||||
value = pq.Array(value)
|
||||
}
|
||||
|
||||
clauses[i] = getCondition(filter)
|
||||
if clauses[i] == "" {
|
||||
return "", nil
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return " WHERE " + strings.Join(clauses, " AND "), values
|
||||
}
|
||||
|
||||
type scan func(dest ...interface{}) error
|
||||
|
||||
func prepareColumns(columns models.Columns) (string, func(s scan, dest interface{}) error) {
|
||||
switch columns {
|
||||
case models.Columns_Max_Sequence:
|
||||
return "SELECT MAX(event_sequence) FROM eventstore.events", func(row scan, dest interface{}) (err error) {
|
||||
sequence, ok := dest.(*Sequence)
|
||||
if !ok {
|
||||
return z_errors.ThrowInvalidArgument(nil, "SQL-NBjA9", "type must be sequence")
|
||||
}
|
||||
err = row(sequence)
|
||||
if err == nil || errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
return z_errors.ThrowInternal(err, "SQL-bN5xg", "something went wrong")
|
||||
}
|
||||
case models.Columns_Event:
|
||||
return selectStmt, func(row scan, dest interface{}) (err error) {
|
||||
event, ok := dest.(*models.Event)
|
||||
if !ok {
|
||||
return z_errors.ThrowInvalidArgument(nil, "SQL-4GP6F", "type must be event")
|
||||
}
|
||||
var previousSequence Sequence
|
||||
data := make(Data, 0)
|
||||
|
||||
err = row(
|
||||
&event.CreationDate,
|
||||
&event.Type,
|
||||
&event.Sequence,
|
||||
&previousSequence,
|
||||
&data,
|
||||
&event.EditorService,
|
||||
&event.EditorUser,
|
||||
&event.ResourceOwner,
|
||||
&event.AggregateType,
|
||||
&event.AggregateID,
|
||||
&event.AggregateVersion,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logging.Log("SQL-kn1Sw").WithError(err).Warn("unable to scan row")
|
||||
return z_errors.ThrowInternal(err, "SQL-J0hFS", "unable to scan row")
|
||||
}
|
||||
|
||||
event.PreviousSequence = uint64(previousSequence)
|
||||
|
||||
event.Data = make([]byte, len(data))
|
||||
copy(event.Data, data)
|
||||
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
func numberPlaceholder(query, old, new string) string {
|
||||
for i, hasChanged := 1, true; hasChanged; i++ {
|
||||
newQuery := strings.Replace(query, old, new+strconv.Itoa(i), 1)
|
||||
hasChanged = query != newQuery
|
||||
query = newQuery
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
func getCondition(filter *es_models.Filter) (condition string) {
|
||||
field := getField(filter.GetField())
|
||||
operation := getOperation(filter.GetOperation())
|
||||
if field == "" || operation == "" {
|
||||
return ""
|
||||
}
|
||||
format := getConditionFormat(filter.GetOperation())
|
||||
|
||||
return fmt.Sprintf(format, field, operation)
|
||||
}
|
||||
|
||||
func getConditionFormat(operation es_models.Operation) string {
|
||||
if operation == es_models.Operation_In {
|
||||
return "%s %s ANY(?)"
|
||||
}
|
||||
return "%s %s ?"
|
||||
}
|
||||
|
||||
func getField(field es_models.Field) string {
|
||||
switch field {
|
||||
case es_models.Field_AggregateID:
|
||||
return "aggregate_id"
|
||||
case es_models.Field_AggregateType:
|
||||
return "aggregate_type"
|
||||
case es_models.Field_LatestSequence:
|
||||
return "event_sequence"
|
||||
case es_models.Field_ResourceOwner:
|
||||
return "resource_owner"
|
||||
case es_models.Field_EditorService:
|
||||
return "editor_service"
|
||||
case es_models.Field_EditorUser:
|
||||
return "editor_user"
|
||||
case es_models.Field_EventType:
|
||||
return "event_type"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getOperation(operation es_models.Operation) string {
|
||||
switch operation {
|
||||
case es_models.Operation_Equals, es_models.Operation_In:
|
||||
return "="
|
||||
case es_models.Operation_Greater:
|
||||
return ">"
|
||||
case es_models.Operation_Less:
|
||||
return "<"
|
||||
}
|
||||
return ""
|
||||
}
|
486
internal/eventstore/internal/repository/sql/query_test.go
Normal file
486
internal/eventstore/internal/repository/sql/query_test.go
Normal file
@@ -0,0 +1,486 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
func Test_numberPlaceholder(t *testing.T) {
|
||||
type args struct {
|
||||
query string
|
||||
old string
|
||||
new string
|
||||
}
|
||||
type res struct {
|
||||
query string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "no replaces",
|
||||
args: args{
|
||||
new: "$",
|
||||
old: "?",
|
||||
query: "SELECT * FROM eventstore.events",
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT * FROM eventstore.events",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "two replaces",
|
||||
args: args{
|
||||
new: "$",
|
||||
old: "?",
|
||||
query: "SELECT * FROM eventstore.events WHERE aggregate_type = ? AND LIMIT = ?",
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT * FROM eventstore.events WHERE aggregate_type = $1 AND LIMIT = $2",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := numberPlaceholder(tt.args.query, tt.args.old, tt.args.new); got != tt.res.query {
|
||||
t.Errorf("numberPlaceholder() = %v, want %v", got, tt.res.query)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getOperation(t *testing.T) {
|
||||
t.Run("all ops", func(t *testing.T) {
|
||||
for op, expected := range map[es_models.Operation]string{
|
||||
es_models.Operation_Equals: "=",
|
||||
es_models.Operation_In: "=",
|
||||
es_models.Operation_Greater: ">",
|
||||
es_models.Operation_Less: "<",
|
||||
es_models.Operation(-1): "",
|
||||
} {
|
||||
if got := getOperation(op); got != expected {
|
||||
t.Errorf("getOperation() = %v, want %v", got, expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getField(t *testing.T) {
|
||||
t.Run("all fields", func(t *testing.T) {
|
||||
for field, expected := range map[es_models.Field]string{
|
||||
es_models.Field_AggregateType: "aggregate_type",
|
||||
es_models.Field_AggregateID: "aggregate_id",
|
||||
es_models.Field_LatestSequence: "event_sequence",
|
||||
es_models.Field_ResourceOwner: "resource_owner",
|
||||
es_models.Field_EditorService: "editor_service",
|
||||
es_models.Field_EditorUser: "editor_user",
|
||||
es_models.Field_EventType: "event_type",
|
||||
es_models.Field(-1): "",
|
||||
} {
|
||||
if got := getField(field); got != expected {
|
||||
t.Errorf("getField() = %v, want %v", got, expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getConditionFormat(t *testing.T) {
|
||||
type args struct {
|
||||
operation es_models.Operation
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "no in operation",
|
||||
args: args{
|
||||
operation: es_models.Operation_Equals,
|
||||
},
|
||||
want: "%s %s ?",
|
||||
},
|
||||
{
|
||||
name: "in operation",
|
||||
args: args{
|
||||
operation: es_models.Operation_In,
|
||||
},
|
||||
want: "%s %s ANY(?)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getConditionFormat(tt.args.operation); got != tt.want {
|
||||
t.Errorf("prepareConditionFormat() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getCondition(t *testing.T) {
|
||||
type args struct {
|
||||
filter *es_models.Filter
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "equals",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field_AggregateID, "", es_models.Operation_Equals)},
|
||||
want: "aggregate_id = ?",
|
||||
},
|
||||
{
|
||||
name: "greater",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field_LatestSequence, 0, es_models.Operation_Greater)},
|
||||
want: "event_sequence > ?",
|
||||
},
|
||||
{
|
||||
name: "less",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field_LatestSequence, 5000, es_models.Operation_Less)},
|
||||
want: "event_sequence < ?",
|
||||
},
|
||||
{
|
||||
name: "in list",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field_AggregateType, []es_models.AggregateType{"movies", "actors"}, es_models.Operation_In)},
|
||||
want: "aggregate_type = ANY(?)",
|
||||
},
|
||||
{
|
||||
name: "invalid operation",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field_AggregateType, []es_models.AggregateType{"movies", "actors"}, es_models.Operation(-1))},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "invalid field",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field(-1), []es_models.AggregateType{"movies", "actors"}, es_models.Operation_Equals)},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "invalid field and operation",
|
||||
args: args{filter: es_models.NewFilter(es_models.Field(-1), []es_models.AggregateType{"movies", "actors"}, es_models.Operation(-1))},
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getCondition(tt.args.filter); got != tt.want {
|
||||
t.Errorf("getCondition() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_prepareColumns(t *testing.T) {
|
||||
type args struct {
|
||||
columns models.Columns
|
||||
dest interface{}
|
||||
dbErr error
|
||||
}
|
||||
type res struct {
|
||||
query string
|
||||
dbRow []interface{}
|
||||
expected interface{}
|
||||
dbErr func(error) bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "invalid columns",
|
||||
args: args{columns: es_models.Columns(-1)},
|
||||
res: res{
|
||||
query: "",
|
||||
dbErr: func(err error) bool { return err == nil },
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "max column",
|
||||
args: args{
|
||||
columns: es_models.Columns_Max_Sequence,
|
||||
dest: new(Sequence),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT MAX(event_sequence) FROM eventstore.events",
|
||||
dbRow: []interface{}{Sequence(5)},
|
||||
expected: Sequence(5),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "max sequence wrong dest type",
|
||||
args: args{
|
||||
columns: es_models.Columns_Max_Sequence,
|
||||
dest: new(uint64),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT MAX(event_sequence) FROM eventstore.events",
|
||||
dbErr: errors.IsErrorInvalidArgument,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "event",
|
||||
args: args{
|
||||
columns: es_models.Columns_Event,
|
||||
dest: new(models.Event),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbRow: []interface{}{time.Time{}, models.EventType(""), uint64(5), Sequence(0), Data(nil), "", "", "", models.AggregateType("user"), "hodor", models.Version("")},
|
||||
expected: models.Event{AggregateID: "hodor", AggregateType: "user", Sequence: 5, Data: make(Data, 0)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "event wrong dest type",
|
||||
args: args{
|
||||
columns: es_models.Columns_Event,
|
||||
dest: new(uint64),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsErrorInvalidArgument,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "event query error",
|
||||
args: args{
|
||||
columns: es_models.Columns_Event,
|
||||
dest: new(models.Event),
|
||||
dbErr: sql.ErrConnDone,
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events",
|
||||
dbErr: errors.IsInternal,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
query, rowScanner := prepareColumns(tt.args.columns)
|
||||
if query != tt.res.query {
|
||||
t.Errorf("prepareColumns() got = %v, want %v", query, tt.res.query)
|
||||
}
|
||||
if tt.res.query == "" && rowScanner != nil {
|
||||
t.Errorf("row scanner should be nil")
|
||||
}
|
||||
if rowScanner == nil {
|
||||
return
|
||||
}
|
||||
err := rowScanner(prepareTestScan(tt.args.dbErr, tt.res.dbRow), tt.args.dest)
|
||||
if tt.res.dbErr != nil {
|
||||
if !tt.res.dbErr(err) {
|
||||
t.Errorf("wrong error type in rowScanner got: %v", err)
|
||||
}
|
||||
} else {
|
||||
if !reflect.DeepEqual(reflect.Indirect(reflect.ValueOf(tt.args.dest)).Interface(), tt.res.expected) {
|
||||
t.Errorf("unexpected result from rowScanner want: %v got: %v", tt.res.dbRow, tt.args.dest)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func prepareTestScan(err error, res []interface{}) scan {
|
||||
return func(dests ...interface{}) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(dests) != len(res) {
|
||||
return errors.ThrowInvalidArgumentf(nil, "SQL-NML1q", "expected len %d got %d", len(res), len(dests))
|
||||
}
|
||||
for i, r := range res {
|
||||
reflect.ValueOf(dests[i]).Elem().Set(reflect.ValueOf(r))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Test_prepareCondition(t *testing.T) {
|
||||
type args struct {
|
||||
filters []*models.Filter
|
||||
}
|
||||
type res struct {
|
||||
clause string
|
||||
values []interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "nil filters",
|
||||
args: args{
|
||||
filters: nil,
|
||||
},
|
||||
res: res{
|
||||
clause: "",
|
||||
values: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty filters",
|
||||
args: args{
|
||||
filters: []*es_models.Filter{},
|
||||
},
|
||||
res: res{
|
||||
clause: "",
|
||||
values: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid condition",
|
||||
args: args{
|
||||
filters: []*es_models.Filter{
|
||||
es_models.NewFilter(es_models.Field_AggregateID, "wrong", es_models.Operation(-1)),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
clause: "",
|
||||
values: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "array as condition value",
|
||||
args: args{
|
||||
filters: []*es_models.Filter{
|
||||
es_models.NewFilter(es_models.Field_AggregateType, []es_models.AggregateType{"user", "org"}, es_models.Operation_In),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
clause: " WHERE aggregate_type = ANY(?)",
|
||||
values: []interface{}{pq.Array([]es_models.AggregateType{"user", "org"})},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple filters",
|
||||
args: args{
|
||||
filters: []*es_models.Filter{
|
||||
es_models.NewFilter(es_models.Field_AggregateType, []es_models.AggregateType{"user", "org"}, es_models.Operation_In),
|
||||
es_models.NewFilter(es_models.Field_AggregateID, "1234", es_models.Operation_Equals),
|
||||
es_models.NewFilter(es_models.Field_EventType, []es_models.EventType{"user.created", "org.created"}, es_models.Operation_In),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
clause: " WHERE aggregate_type = ANY(?) AND aggregate_id = ? AND event_type = ANY(?)",
|
||||
values: []interface{}{pq.Array([]es_models.AggregateType{"user", "org"}), "1234", pq.Array([]es_models.EventType{"user.created", "org.created"})},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotClause, gotValues := prepareCondition(tt.args.filters)
|
||||
if gotClause != tt.res.clause {
|
||||
t.Errorf("prepareCondition() gotClause = %v, want %v", gotClause, tt.res.clause)
|
||||
}
|
||||
if len(gotValues) != len(tt.res.values) {
|
||||
t.Errorf("wrong length of gotten values got = %d, want %d", len(gotValues), len(tt.res.values))
|
||||
return
|
||||
}
|
||||
for i, value := range gotValues {
|
||||
if !reflect.DeepEqual(value, tt.res.values[i]) {
|
||||
t.Errorf("prepareCondition() gotValues = %v, want %v", gotValues, tt.res.values)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildQuery(t *testing.T) {
|
||||
type args struct {
|
||||
queryFactory *models.SearchQueryFactory
|
||||
}
|
||||
type res struct {
|
||||
query string
|
||||
limit uint64
|
||||
values []interface{}
|
||||
rowScanner bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "invalid query factory",
|
||||
args: args{
|
||||
queryFactory: nil,
|
||||
},
|
||||
res: res{
|
||||
query: "",
|
||||
limit: 0,
|
||||
rowScanner: false,
|
||||
values: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with order by desc",
|
||||
args: args{
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").OrderDesc(),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with limit",
|
||||
args: args{
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").Limit(5),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence LIMIT $2",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
|
||||
limit: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with limit and order by desc",
|
||||
args: args{
|
||||
queryFactory: es_models.NewSearchQueryFactory("user").Limit(5).OrderDesc(),
|
||||
},
|
||||
res: res{
|
||||
query: "SELECT creation_date, event_type, event_sequence, previous_sequence, event_data, editor_service, editor_user, resource_owner, aggregate_type, aggregate_id, aggregate_version FROM eventstore.events WHERE aggregate_type = $1 ORDER BY event_sequence DESC LIMIT $2",
|
||||
rowScanner: true,
|
||||
values: []interface{}{es_models.AggregateType("user"), uint64(5)},
|
||||
limit: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotQuery, gotLimit, gotValues, gotRowScanner := buildQuery(tt.args.queryFactory)
|
||||
if gotQuery != tt.res.query {
|
||||
t.Errorf("buildQuery() gotQuery = %v, want %v", gotQuery, tt.res.query)
|
||||
}
|
||||
if gotLimit != tt.res.limit {
|
||||
t.Errorf("buildQuery() gotLimit = %v, want %v", gotLimit, tt.res.limit)
|
||||
}
|
||||
if len(gotValues) != len(tt.res.values) {
|
||||
t.Errorf("wrong length of gotten values got = %d, want %d", len(gotValues), len(tt.res.values))
|
||||
return
|
||||
}
|
||||
for i, value := range gotValues {
|
||||
if !reflect.DeepEqual(value, tt.res.values[i]) {
|
||||
t.Errorf("prepareCondition() gotValues = %v, want %v", gotValues, tt.res.values)
|
||||
}
|
||||
}
|
||||
if (tt.res.rowScanner && gotRowScanner == nil) || (!tt.res.rowScanner && gotRowScanner != nil) {
|
||||
t.Errorf("rowScanner should be nil==%v got nil==%v", tt.res.rowScanner, gotRowScanner == nil)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,27 +0,0 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
)
|
||||
|
||||
// Sequence represents a number that may be null.
|
||||
// Sequence implements the sql.Scanner interface
|
||||
type Sequence uint64
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (seq *Sequence) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*seq = 0
|
||||
return nil
|
||||
}
|
||||
*seq = Sequence(value.(int64))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (seq Sequence) Value() (driver.Value, error) {
|
||||
if seq == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return int64(seq), nil
|
||||
}
|
@@ -23,3 +23,25 @@ func (data Data) Value() (driver.Value, error) {
|
||||
}
|
||||
return []byte(data), nil
|
||||
}
|
||||
|
||||
// Sequence represents a number that may be null.
|
||||
// Sequence implements the sql.Scanner interface
|
||||
type Sequence uint64
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (seq *Sequence) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*seq = 0
|
||||
return nil
|
||||
}
|
||||
*seq = Sequence(value.(int64))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (seq Sequence) Value() (driver.Value, error) {
|
||||
if seq == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return int64(seq), nil
|
||||
}
|
@@ -4,37 +4,45 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/cockroachdb/cockroach-go/v2/crdb"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
lockedUntilKey = "locked_until"
|
||||
lockerIDKey = "locker_id"
|
||||
objectTypeKey = "object_type"
|
||||
insertStmtFormat = "INSERT INTO %s" +
|
||||
" (locker_id, locked_until, view_name) VALUES ($1, now()+$2::INTERVAL, $3)" +
|
||||
" ON CONFLICT (view_name)" +
|
||||
" DO UPDATE SET locker_id = $4, locked_until = now()+$5::INTERVAL" +
|
||||
" WHERE locks.view_name = $6 AND (locks.locker_id = $7 OR locks.locked_until < now())"
|
||||
millisecondsAsSeconds = int64(time.Second / time.Millisecond)
|
||||
)
|
||||
|
||||
type lock struct {
|
||||
LockerID string `gorm:"column:locker_id;primary_key"`
|
||||
LockedUntil time.Time `gorm:"column:locked_until"`
|
||||
ViewName string `gorm:"column:object_type;primary_key"`
|
||||
ViewName string `gorm:"column:view_name;primary_key"`
|
||||
}
|
||||
|
||||
func Renew(dbClient *sql.DB, lockTable, lockerID, viewModel string, waitTime time.Duration) error {
|
||||
return crdb.ExecuteTx(context.Background(), dbClient, nil, func(tx *sql.Tx) error {
|
||||
query := fmt.Sprintf("INSERT INTO %s (%s, %s, %s) VALUES ($1, $2, now()+$3) ON CONFLICT (%s) DO UPDATE SET %s = now()+$4, %s = $5 WHERE (locks.%s < now() OR locks.%s = $6) AND locks.%s = $7",
|
||||
lockTable, objectTypeKey, lockerIDKey, lockedUntilKey, objectTypeKey, lockedUntilKey, lockerIDKey, lockedUntilKey, lockerIDKey, objectTypeKey)
|
||||
insert := fmt.Sprintf(insertStmtFormat, lockTable)
|
||||
result, err := tx.Exec(insert,
|
||||
lockerID, waitTime.Milliseconds()/millisecondsAsSeconds, viewModel,
|
||||
lockerID, waitTime.Milliseconds()/millisecondsAsSeconds,
|
||||
viewModel, lockerID)
|
||||
|
||||
rs, err := tx.Exec(query, viewModel, lockerID, waitTime.Seconds(), waitTime.Seconds(), lockerID, lockerID, viewModel)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
if rows, _ := rs.RowsAffected(); rows == 0 {
|
||||
tx.Rollback()
|
||||
|
||||
if rows, _ := result.RowsAffected(); rows == 0 {
|
||||
return caos_errs.ThrowAlreadyExists(nil, "SPOOL-lso0e", "view already locked")
|
||||
}
|
||||
logging.LogWithFields("LOCKE-lOgbg", "view", viewModel, "locker", lockerID).Debug("locker changed")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@@ -57,8 +57,8 @@ func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO table\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
ExpectExec(`INSERT INTO table\.locks \(locker_id, locked_until, view_name\) VALUES \(\$1, now\(\)\+\$2::INTERVAL, \$3\) ON CONFLICT \(view_name\) DO UPDATE SET locker_id = \$4, locked_until = now\(\)\+\$5::INTERVAL WHERE locks\.view_name = \$6 AND \(locks\.locker_id = \$7 OR locks\.locked_until < now\(\)\)`).
|
||||
WithArgs(lockerID, sqlmock.AnyArg(), view, lockerID, sqlmock.AnyArg(), view, lockerID).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
|
@@ -77,6 +77,21 @@ func (mr *MockEventstoreMockRecorder) Health(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockEventstore)(nil).Health), arg0)
|
||||
}
|
||||
|
||||
// LatestSequence mocks base method
|
||||
func (m *MockEventstore) LatestSequence(arg0 context.Context, arg1 *models.SearchQueryFactory) (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "LatestSequence", arg0, arg1)
|
||||
ret0, _ := ret[0].(uint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// LatestSequence indicates an expected call of LatestSequence
|
||||
func (mr *MockEventstoreMockRecorder) LatestSequence(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestSequence", reflect.TypeOf((*MockEventstore)(nil).LatestSequence), arg0, arg1)
|
||||
}
|
||||
|
||||
// PushAggregates mocks base method
|
||||
func (m *MockEventstore) PushAggregates(arg0 context.Context, arg1 ...*models.Aggregate) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
@@ -1,90 +1,191 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
)
|
||||
|
||||
type SearchQuery struct {
|
||||
type SearchQueryFactory struct {
|
||||
columns Columns
|
||||
limit uint64
|
||||
desc bool
|
||||
aggregateTypes []AggregateType
|
||||
aggregateIDs []string
|
||||
eventSequence uint64
|
||||
eventTypes []EventType
|
||||
resourceOwner string
|
||||
}
|
||||
|
||||
type searchQuery struct {
|
||||
Columns Columns
|
||||
Limit uint64
|
||||
Desc bool
|
||||
Filters []*Filter
|
||||
}
|
||||
|
||||
func NewSearchQuery() *SearchQuery {
|
||||
return &SearchQuery{
|
||||
Filters: make([]*Filter, 0, 4),
|
||||
type Columns int32
|
||||
|
||||
const (
|
||||
Columns_Event = iota
|
||||
Columns_Max_Sequence
|
||||
//insert new columns-types before this columnsCount because count is needed for validation
|
||||
columnsCount
|
||||
)
|
||||
|
||||
//FactoryFromSearchQuery is deprecated because it's for migration purposes. use NewSearchQueryFactory
|
||||
func FactoryFromSearchQuery(query *SearchQuery) *SearchQueryFactory {
|
||||
factory := &SearchQueryFactory{
|
||||
columns: Columns_Event,
|
||||
desc: query.Desc,
|
||||
limit: query.Limit,
|
||||
}
|
||||
|
||||
for _, filter := range query.Filters {
|
||||
switch filter.field {
|
||||
case Field_AggregateType:
|
||||
factory = factory.aggregateTypesMig(filter.value.([]AggregateType)...)
|
||||
case Field_AggregateID:
|
||||
if aggregateID, ok := filter.value.(string); ok {
|
||||
factory = factory.AggregateIDs(aggregateID)
|
||||
} else if aggregateIDs, ok := filter.value.([]string); ok {
|
||||
factory = factory.AggregateIDs(aggregateIDs...)
|
||||
}
|
||||
case Field_LatestSequence:
|
||||
factory = factory.SequenceGreater(filter.value.(uint64))
|
||||
case Field_ResourceOwner:
|
||||
factory = factory.ResourceOwner(filter.value.(string))
|
||||
case Field_EventType:
|
||||
factory = factory.EventTypes(filter.value.([]EventType)...)
|
||||
case Field_EditorService, Field_EditorUser:
|
||||
logging.Log("MODEL-Mr0VN").WithField("value", filter.value).Panic("field not converted to factory")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return factory
|
||||
}
|
||||
|
||||
func NewSearchQueryFactory(aggregateTypes ...AggregateType) *SearchQueryFactory {
|
||||
return &SearchQueryFactory{
|
||||
aggregateTypes: aggregateTypes,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *SearchQuery) SetLimit(limit uint64) *SearchQuery {
|
||||
q.Limit = limit
|
||||
return q
|
||||
func (factory *SearchQueryFactory) Columns(columns Columns) *SearchQueryFactory {
|
||||
factory.columns = columns
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) OrderDesc() *SearchQuery {
|
||||
q.Desc = true
|
||||
return q
|
||||
func (factory *SearchQueryFactory) Limit(limit uint64) *SearchQueryFactory {
|
||||
factory.limit = limit
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) OrderAsc() *SearchQuery {
|
||||
q.Desc = false
|
||||
return q
|
||||
func (factory *SearchQueryFactory) SequenceGreater(sequence uint64) *SearchQueryFactory {
|
||||
factory.eventSequence = sequence
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateIDFilter(id string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateID, id, Operation_Equals))
|
||||
func (factory *SearchQueryFactory) AggregateIDs(ids ...string) *SearchQueryFactory {
|
||||
factory.aggregateIDs = ids
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateIDsFilter(ids ...string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateID, ids, Operation_In))
|
||||
func (factory *SearchQueryFactory) aggregateTypesMig(types ...AggregateType) *SearchQueryFactory {
|
||||
factory.aggregateTypes = types
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateTypeFilter(types ...AggregateType) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateType, types, Operation_In))
|
||||
func (factory *SearchQueryFactory) EventTypes(types ...EventType) *SearchQueryFactory {
|
||||
factory.eventTypes = types
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) EventTypesFilter(types ...EventType) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_EventType, types, Operation_In))
|
||||
func (factory *SearchQueryFactory) ResourceOwner(resourceOwner string) *SearchQueryFactory {
|
||||
factory.resourceOwner = resourceOwner
|
||||
return factory
|
||||
}
|
||||
|
||||
func (q *SearchQuery) LatestSequenceFilter(sequence uint64) *SearchQuery {
|
||||
if sequence == 0 {
|
||||
return q
|
||||
func (factory *SearchQueryFactory) OrderDesc() *SearchQueryFactory {
|
||||
factory.desc = true
|
||||
return factory
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) OrderAsc() *SearchQueryFactory {
|
||||
factory.desc = false
|
||||
return factory
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) Build() (*searchQuery, error) {
|
||||
if factory == nil ||
|
||||
len(factory.aggregateTypes) < 1 ||
|
||||
(factory.columns < 0 || factory.columns >= columnsCount) {
|
||||
return nil, errors.ThrowPreconditionFailed(nil, "MODEL-tGAD3", "factory invalid")
|
||||
}
|
||||
filters := []*Filter{
|
||||
factory.aggregateTypeFilter(),
|
||||
}
|
||||
|
||||
for _, f := range []func() *Filter{
|
||||
factory.aggregateIDFilter,
|
||||
factory.eventSequenceFilter,
|
||||
factory.eventTypeFilter,
|
||||
factory.resourceOwnerFilter,
|
||||
} {
|
||||
if filter := f(); filter != nil {
|
||||
filters = append(filters, filter)
|
||||
}
|
||||
}
|
||||
|
||||
return &searchQuery{
|
||||
Columns: factory.columns,
|
||||
Limit: factory.limit,
|
||||
Desc: factory.desc,
|
||||
Filters: filters,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) aggregateIDFilter() *Filter {
|
||||
if len(factory.aggregateIDs) < 1 {
|
||||
return nil
|
||||
}
|
||||
if len(factory.aggregateIDs) == 1 {
|
||||
return NewFilter(Field_AggregateID, factory.aggregateIDs[0], Operation_Equals)
|
||||
}
|
||||
return NewFilter(Field_AggregateID, factory.aggregateIDs, Operation_In)
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) eventTypeFilter() *Filter {
|
||||
if len(factory.eventTypes) < 1 {
|
||||
return nil
|
||||
}
|
||||
if len(factory.eventTypes) == 1 {
|
||||
return NewFilter(Field_EventType, factory.eventTypes[0], Operation_Equals)
|
||||
}
|
||||
return NewFilter(Field_EventType, factory.eventTypes, Operation_In)
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) aggregateTypeFilter() *Filter {
|
||||
if len(factory.aggregateTypes) == 1 {
|
||||
return NewFilter(Field_AggregateType, factory.aggregateTypes[0], Operation_Equals)
|
||||
}
|
||||
return NewFilter(Field_AggregateType, factory.aggregateTypes, Operation_In)
|
||||
}
|
||||
|
||||
func (factory *SearchQueryFactory) eventSequenceFilter() *Filter {
|
||||
if factory.eventSequence == 0 {
|
||||
return nil
|
||||
}
|
||||
sortOrder := Operation_Greater
|
||||
if q.Desc {
|
||||
if factory.desc {
|
||||
sortOrder = Operation_Less
|
||||
}
|
||||
return q.setFilter(NewFilter(Field_LatestSequence, sequence, sortOrder))
|
||||
return NewFilter(Field_LatestSequence, factory.eventSequence, sortOrder)
|
||||
}
|
||||
|
||||
func (q *SearchQuery) ResourceOwnerFilter(resourceOwner string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_ResourceOwner, resourceOwner, Operation_Equals))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) setFilter(filter *Filter) *SearchQuery {
|
||||
for i, f := range q.Filters {
|
||||
if f.field == filter.field {
|
||||
q.Filters[i] = filter
|
||||
return q
|
||||
}
|
||||
}
|
||||
q.Filters = append(q.Filters, filter)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) Validate() error {
|
||||
if q == nil {
|
||||
return errors.ThrowPreconditionFailed(nil, "MODEL-J5xQi", "search query is nil")
|
||||
}
|
||||
if len(q.Filters) == 0 {
|
||||
return errors.ThrowPreconditionFailed(nil, "MODEL-pF3DR", "no filters set")
|
||||
}
|
||||
for _, filter := range q.Filters {
|
||||
if err := filter.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
func (factory *SearchQueryFactory) resourceOwnerFilter() *Filter {
|
||||
if factory.resourceOwner == "" {
|
||||
return nil
|
||||
}
|
||||
return NewFilter(Field_ResourceOwner, factory.resourceOwner, Operation_Equals)
|
||||
}
|
||||
|
90
internal/eventstore/models/search_query_old.go
Normal file
90
internal/eventstore/models/search_query_old.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package models
|
||||
|
||||
import "github.com/caos/zitadel/internal/errors"
|
||||
|
||||
//SearchQuery is deprecated. Use SearchQueryFactory
|
||||
type SearchQuery struct {
|
||||
Limit uint64
|
||||
Desc bool
|
||||
Filters []*Filter
|
||||
}
|
||||
|
||||
//NewSearchQuery is deprecated. Use SearchQueryFactory
|
||||
func NewSearchQuery() *SearchQuery {
|
||||
return &SearchQuery{
|
||||
Filters: make([]*Filter, 0, 4),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *SearchQuery) SetLimit(limit uint64) *SearchQuery {
|
||||
q.Limit = limit
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) OrderDesc() *SearchQuery {
|
||||
q.Desc = true
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) OrderAsc() *SearchQuery {
|
||||
q.Desc = false
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateIDFilter(id string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateID, id, Operation_Equals))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateIDsFilter(ids ...string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateID, ids, Operation_In))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AggregateTypeFilter(types ...AggregateType) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_AggregateType, types, Operation_In))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) EventTypesFilter(types ...EventType) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_EventType, types, Operation_In))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) LatestSequenceFilter(sequence uint64) *SearchQuery {
|
||||
if sequence == 0 {
|
||||
return q
|
||||
}
|
||||
sortOrder := Operation_Greater
|
||||
if q.Desc {
|
||||
sortOrder = Operation_Less
|
||||
}
|
||||
return q.setFilter(NewFilter(Field_LatestSequence, sequence, sortOrder))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) ResourceOwnerFilter(resourceOwner string) *SearchQuery {
|
||||
return q.setFilter(NewFilter(Field_ResourceOwner, resourceOwner, Operation_Equals))
|
||||
}
|
||||
|
||||
func (q *SearchQuery) setFilter(filter *Filter) *SearchQuery {
|
||||
for i, f := range q.Filters {
|
||||
if f.field == filter.field {
|
||||
q.Filters[i] = filter
|
||||
return q
|
||||
}
|
||||
}
|
||||
q.Filters = append(q.Filters, filter)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) Validate() error {
|
||||
if q == nil {
|
||||
return errors.ThrowPreconditionFailed(nil, "MODEL-J5xQi", "search query is nil")
|
||||
}
|
||||
if len(q.Filters) == 0 {
|
||||
return errors.ThrowPreconditionFailed(nil, "MODEL-pF3DR", "no filters set")
|
||||
}
|
||||
for _, filter := range q.Filters {
|
||||
if err := filter.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
65
internal/eventstore/models/search_query_old_test.go
Normal file
65
internal/eventstore/models/search_query_old_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSearchQuery_setFilter(t *testing.T) {
|
||||
type fields struct {
|
||||
query *SearchQuery
|
||||
}
|
||||
type args struct {
|
||||
filters []*Filter
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *SearchQuery
|
||||
}{
|
||||
{
|
||||
name: "set idFilter",
|
||||
fields: fields{query: NewSearchQuery()},
|
||||
args: args{filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
}},
|
||||
want: &SearchQuery{Filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "overwrite idFilter",
|
||||
fields: fields{query: NewSearchQuery()},
|
||||
args: args{filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "ursli"},
|
||||
}},
|
||||
want: &SearchQuery{Filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "ursli"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.fields.query
|
||||
for _, filter := range tt.args.filters {
|
||||
got = got.setFilter(filter)
|
||||
}
|
||||
for _, wantFilter := range tt.want.Filters {
|
||||
found := false
|
||||
for _, gotFilter := range got.Filters {
|
||||
if gotFilter.field == wantFilter.field {
|
||||
found = true
|
||||
if !reflect.DeepEqual(wantFilter, gotFilter) {
|
||||
t.Errorf("filter not as expected: want: %v got %v", wantFilter, gotFilter)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("filter field %v not found", wantFilter.field)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -3,62 +3,462 @@ package models
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
)
|
||||
|
||||
func TestSearchQuery_setFilter(t *testing.T) {
|
||||
type fields struct {
|
||||
query *SearchQuery
|
||||
func testSetColumns(columns Columns) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.Columns(columns)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetLimit(limit uint64) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.Limit(limit)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetSequence(sequence uint64) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.SequenceGreater(sequence)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetAggregateIDs(aggregateIDs ...string) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.AggregateIDs(aggregateIDs...)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetEventTypes(eventTypes ...EventType) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.EventTypes(eventTypes...)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetResourceOwner(resourceOwner string) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
factory = factory.ResourceOwner(resourceOwner)
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func testSetSortOrder(asc bool) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
return func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||
if asc {
|
||||
factory = factory.OrderAsc()
|
||||
} else {
|
||||
factory = factory.OrderDesc()
|
||||
}
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchQueryFactorySetters(t *testing.T) {
|
||||
type args struct {
|
||||
filters []*Filter
|
||||
aggregateTypes []AggregateType
|
||||
setters []func(*SearchQueryFactory) *SearchQueryFactory
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *SearchQuery
|
||||
name string
|
||||
args args
|
||||
res *SearchQueryFactory
|
||||
}{
|
||||
{
|
||||
name: "set idFilter",
|
||||
fields: fields{query: NewSearchQuery()},
|
||||
args: args{filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
}},
|
||||
want: &SearchQuery{Filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
}},
|
||||
name: "New factory",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user", "org"},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
aggregateTypes: []AggregateType{"user", "org"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overwrite idFilter",
|
||||
fields: fields{query: NewSearchQuery()},
|
||||
args: args{filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "hodor"},
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "ursli"},
|
||||
}},
|
||||
want: &SearchQuery{Filters: []*Filter{
|
||||
{field: Field_AggregateID, operation: Operation_Equals, value: "ursli"},
|
||||
}},
|
||||
name: "set columns",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetColumns(Columns_Max_Sequence)},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
columns: Columns_Max_Sequence,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set limit",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetLimit(100)},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
limit: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set sequence",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetSequence(90)},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
eventSequence: 90,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set aggregateIDs",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetAggregateIDs("1235", "09824")},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
aggregateIDs: []string{"1235", "09824"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set eventTypes",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetEventTypes("user.created", "user.updated")},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
eventTypes: []EventType{"user.created", "user.updated"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set resource owner",
|
||||
args: args{
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetResourceOwner("hodor")},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
resourceOwner: "hodor",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default search query",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetAggregateIDs("1235", "024"), testSetSortOrder(false)},
|
||||
},
|
||||
res: &SearchQueryFactory{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
aggregateIDs: []string{"1235", "024"},
|
||||
desc: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.fields.query
|
||||
for _, filter := range tt.args.filters {
|
||||
got = got.setFilter(filter)
|
||||
factory := NewSearchQueryFactory(tt.args.aggregateTypes...)
|
||||
for _, setter := range tt.args.setters {
|
||||
factory = setter(factory)
|
||||
}
|
||||
for _, wantFilter := range tt.want.Filters {
|
||||
found := false
|
||||
for _, gotFilter := range got.Filters {
|
||||
if gotFilter.field == wantFilter.field {
|
||||
found = true
|
||||
if !reflect.DeepEqual(wantFilter, gotFilter) {
|
||||
t.Errorf("filter not as expected: want: %v got %v", wantFilter, gotFilter)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("filter field %v not found", wantFilter.field)
|
||||
}
|
||||
if !reflect.DeepEqual(factory, tt.res) {
|
||||
t.Errorf("NewSearchQueryFactory() = %v, want %v", factory, tt.res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchQueryFactoryBuild(t *testing.T) {
|
||||
type args struct {
|
||||
aggregateTypes []AggregateType
|
||||
setters []func(*SearchQueryFactory) *SearchQueryFactory
|
||||
}
|
||||
type res struct {
|
||||
isErr func(err error) bool
|
||||
query *searchQuery
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
res res
|
||||
}{
|
||||
{
|
||||
name: "no aggregate types",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{},
|
||||
},
|
||||
res: res{
|
||||
isErr: errors.IsPreconditionFailed,
|
||||
query: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid column (too low)",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetColumns(Columns(-1)),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: errors.IsPreconditionFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid column (too high)",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetColumns(columnsCount),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: errors.IsPreconditionFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate types",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user", "org"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, []AggregateType{"user", "org"}, Operation_In),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type, limit, desc",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetLimit(5),
|
||||
testSetSortOrder(false),
|
||||
testSetSequence(100),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: true,
|
||||
Limit: 5,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Less),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type, limit, asc",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetLimit(5),
|
||||
testSetSortOrder(true),
|
||||
testSetSequence(100),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 5,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Greater),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type, limit, desc, max event sequence cols",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetLimit(5),
|
||||
testSetSortOrder(false),
|
||||
testSetSequence(100),
|
||||
testSetColumns(Columns_Max_Sequence),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: Columns_Max_Sequence,
|
||||
Desc: true,
|
||||
Limit: 5,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Less),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and aggregate id",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetAggregateIDs("1234"),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_AggregateID, "1234", Operation_Equals),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and aggregate ids",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetAggregateIDs("1234", "0815"),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_AggregateID, []string{"1234", "0815"}, Operation_In),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and sequence greater",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetSequence(8),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_LatestSequence, uint64(8), Operation_Greater),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and event type",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetEventTypes("user.created"),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_EventType, EventType("user.created"), Operation_Equals),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type and event types",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetEventTypes("user.created", "user.changed"),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_EventType, []EventType{"user.created", "user.changed"}, Operation_In),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter aggregate type resource owner",
|
||||
args: args{
|
||||
aggregateTypes: []AggregateType{"user"},
|
||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||
testSetResourceOwner("hodor"),
|
||||
},
|
||||
},
|
||||
res: res{
|
||||
isErr: nil,
|
||||
query: &searchQuery{
|
||||
Columns: 0,
|
||||
Desc: false,
|
||||
Limit: 0,
|
||||
Filters: []*Filter{
|
||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
||||
NewFilter(Field_ResourceOwner, "hodor", Operation_Equals),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
factory := NewSearchQueryFactory(tt.args.aggregateTypes...)
|
||||
for _, f := range tt.args.setters {
|
||||
factory = f(factory)
|
||||
}
|
||||
query, err := factory.Build()
|
||||
if tt.res.isErr != nil && !tt.res.isErr(err) {
|
||||
t.Errorf("wrong error: %v", err)
|
||||
return
|
||||
}
|
||||
if err != nil && tt.res.isErr == nil {
|
||||
t.Errorf("no error expected: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(query, tt.res.query) {
|
||||
t.Errorf("NewSearchQueryFactory() = %v, want %v", factory, tt.res)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
)
|
||||
|
||||
@@ -9,4 +11,6 @@ type Handler interface {
|
||||
EventQuery() (*models.SearchQuery, error)
|
||||
Reduce(*models.Event) error
|
||||
OnError(event *models.Event, err error) error
|
||||
MinimumCycleDuration() time.Duration
|
||||
QueryLimit() uint64
|
||||
}
|
||||
|
@@ -1,28 +1,34 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/caos/logging"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
"github.com/caos/zitadel/internal/id"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Eventstore eventstore.Eventstore
|
||||
Locker Locker
|
||||
ViewHandlers []Handler
|
||||
ConcurrentTasks int
|
||||
Eventstore eventstore.Eventstore
|
||||
Locker Locker
|
||||
ViewHandlers []query.Handler
|
||||
ConcurrentWorkers int
|
||||
}
|
||||
|
||||
func (c *Config) New() *Spooler {
|
||||
lockID, err := id.SonyFlakeGenerator.Next()
|
||||
logging.Log("SPOOL-bdO56").OnError(err).Panic("unable to generate lockID")
|
||||
lockID, err := os.Hostname()
|
||||
if err != nil || lockID == "" {
|
||||
lockID, err = id.SonyFlakeGenerator.Next()
|
||||
logging.Log("SPOOL-bdO56").OnError(err).Panic("unable to generate lockID")
|
||||
}
|
||||
|
||||
return &Spooler{
|
||||
handlers: c.ViewHandlers,
|
||||
lockID: lockID,
|
||||
eventstore: c.Eventstore,
|
||||
locker: c.Locker,
|
||||
queue: make(chan *spooledHandler),
|
||||
concurrentTasks: c.ConcurrentTasks,
|
||||
handlers: c.ViewHandlers,
|
||||
lockID: lockID,
|
||||
eventstore: c.Eventstore,
|
||||
locker: c.Locker,
|
||||
queue: make(chan *spooledHandler),
|
||||
workers: c.ConcurrentWorkers,
|
||||
}
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package spooler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/caos/logging"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
@@ -13,17 +14,12 @@ import (
|
||||
)
|
||||
|
||||
type Spooler struct {
|
||||
handlers []Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
eventstore eventstore.Eventstore
|
||||
concurrentTasks int
|
||||
queue chan *spooledHandler
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
query.Handler
|
||||
MinimumCycleDuration() time.Duration
|
||||
handlers []query.Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
eventstore eventstore.Eventstore
|
||||
workers int
|
||||
queue chan *spooledHandler
|
||||
}
|
||||
|
||||
type Locker interface {
|
||||
@@ -31,69 +27,78 @@ type Locker interface {
|
||||
}
|
||||
|
||||
type spooledHandler struct {
|
||||
Handler
|
||||
query.Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
queuedAt time.Time
|
||||
eventstore eventstore.Eventstore
|
||||
}
|
||||
|
||||
func (s *Spooler) Start() {
|
||||
defer logging.LogWithFields("SPOOL-N0V1g", "lockerID", s.lockID, "workers", s.concurrentTasks).Info("spooler started")
|
||||
if s.concurrentTasks < 1 {
|
||||
defer logging.LogWithFields("SPOOL-N0V1g", "lockerID", s.lockID, "workers", s.workers).Info("spooler started")
|
||||
if s.workers < 1 {
|
||||
return
|
||||
}
|
||||
for i := 0; i < s.concurrentTasks; i++ {
|
||||
go func() {
|
||||
for handler := range s.queue {
|
||||
|
||||
for i := 0; i < s.workers; i++ {
|
||||
go func(workerIdx int) {
|
||||
workerID := s.lockID + "--" + strconv.Itoa(workerIdx)
|
||||
for task := range s.queue {
|
||||
go func(handler *spooledHandler, queue chan<- *spooledHandler) {
|
||||
time.Sleep(handler.MinimumCycleDuration() - time.Since(handler.queuedAt))
|
||||
handler.queuedAt = time.Now()
|
||||
queue <- handler
|
||||
}(handler, s.queue)
|
||||
}(task, s.queue)
|
||||
|
||||
handler.load()
|
||||
task.load(workerID)
|
||||
}
|
||||
}()
|
||||
}(i)
|
||||
}
|
||||
for _, handler := range s.handlers {
|
||||
handler := &spooledHandler{handler, s.locker, s.lockID, time.Now(), s.eventstore}
|
||||
handler := &spooledHandler{Handler: handler, locker: s.locker, queuedAt: time.Now(), eventstore: s.eventstore}
|
||||
s.queue <- handler
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spooledHandler) load() {
|
||||
func (s *spooledHandler) load(workerID string) {
|
||||
errs := make(chan error)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go s.awaitError(cancel, errs)
|
||||
hasLocked := s.lock(ctx, errs)
|
||||
|
||||
defer close(errs)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go s.awaitError(cancel, errs, workerID)
|
||||
hasLocked := s.lock(ctx, errs, workerID)
|
||||
|
||||
if <-hasLocked {
|
||||
go func() {
|
||||
for l := range hasLocked {
|
||||
if !l {
|
||||
// we only need to break. An error is already written by the lock-routine to the errs channel
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
events, err := s.query(ctx)
|
||||
if err != nil {
|
||||
errs <- err
|
||||
} else {
|
||||
errs <- s.process(ctx, events)
|
||||
errs <- s.process(ctx, events, workerID)
|
||||
logging.Log("SPOOL-0pV8o").WithField("view", s.ViewModel()).WithField("worker", workerID).Debug("process done")
|
||||
}
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (s *spooledHandler) awaitError(cancel func(), errs chan error) {
|
||||
func (s *spooledHandler) awaitError(cancel func(), errs chan error, workerID string) {
|
||||
select {
|
||||
case err := <-errs:
|
||||
cancel()
|
||||
logging.Log("SPOOL-K2lst").OnError(err).WithField("view", s.ViewModel()).Debug("load canceled")
|
||||
logging.Log("SPOOL-K2lst").OnError(err).WithField("view", s.ViewModel()).WithField("worker", workerID).Debug("load canceled")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spooledHandler) process(ctx context.Context, events []*models.Event) error {
|
||||
func (s *spooledHandler) process(ctx context.Context, events []*models.Event, workerID string) error {
|
||||
for _, event := range events {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logging.Log("SPOOL-FTKwH").WithField("view", s.ViewModel()).Debug("context canceled")
|
||||
logging.LogWithFields("SPOOL-FTKwH", "view", s.ViewModel(), "worker", workerID).Debug("context canceled")
|
||||
return nil
|
||||
default:
|
||||
if err := s.Reduce(event); err != nil {
|
||||
@@ -129,13 +134,27 @@ func (s *spooledHandler) query(ctx context.Context) ([]*models.Event, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
factory := models.FactoryFromSearchQuery(query)
|
||||
sequence, err := s.eventstore.LatestSequence(ctx, factory)
|
||||
logging.Log("SPOOL-7SciK").OnError(err).Debug("unable to query latest sequence")
|
||||
var processedSequence uint64
|
||||
for _, filter := range query.Filters {
|
||||
if filter.GetField() == models.Field_LatestSequence {
|
||||
processedSequence = filter.GetValue().(uint64)
|
||||
}
|
||||
}
|
||||
if sequence != 0 && processedSequence == sequence {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
query.Limit = s.QueryLimit()
|
||||
return s.eventstore.FilterEvents(ctx, query)
|
||||
}
|
||||
|
||||
func (s *spooledHandler) lock(ctx context.Context, errs chan<- error) chan bool {
|
||||
func (s *spooledHandler) lock(ctx context.Context, errs chan<- error, workerID string) chan bool {
|
||||
renewTimer := time.After(0)
|
||||
renewDuration := s.MinimumCycleDuration() - 50*time.Millisecond
|
||||
locked := make(chan bool, 1)
|
||||
renewDuration := s.MinimumCycleDuration()
|
||||
locked := make(chan bool)
|
||||
|
||||
go func(locked chan bool) {
|
||||
for {
|
||||
@@ -143,7 +162,9 @@ func (s *spooledHandler) lock(ctx context.Context, errs chan<- error) chan bool
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-renewTimer:
|
||||
err := s.locker.Renew(s.lockID, s.ViewModel(), s.MinimumCycleDuration()*2)
|
||||
logging.Log("SPOOL-K2lst").WithField("view", s.ViewModel()).WithField("worker", workerID).Debug("renew")
|
||||
err := s.locker.Renew(workerID, s.ViewModel(), s.MinimumCycleDuration()*2)
|
||||
logging.Log("SPOOL-K2lst").WithField("view", s.ViewModel()).WithField("worker", workerID).WithError(err).Debug("renew done")
|
||||
if err == nil {
|
||||
locked <- true
|
||||
renewTimer = time.After(renewDuration)
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler/mock"
|
||||
"github.com/caos/zitadel/internal/view/repository"
|
||||
"github.com/golang/mock/gomock"
|
||||
@@ -20,13 +21,17 @@ type testHandler struct {
|
||||
processError error
|
||||
queryError error
|
||||
viewModel string
|
||||
bulkLimit uint64
|
||||
}
|
||||
|
||||
func (h *testHandler) ViewModel() string {
|
||||
return h.viewModel
|
||||
}
|
||||
func (h *testHandler) EventQuery() (*models.SearchQuery, error) {
|
||||
return nil, h.queryError
|
||||
if h.queryError != nil {
|
||||
return nil, h.queryError
|
||||
}
|
||||
return &models.SearchQuery{}, nil
|
||||
}
|
||||
func (h *testHandler) Reduce(*models.Event) error {
|
||||
<-time.After(h.processSleep)
|
||||
@@ -35,7 +40,12 @@ func (h *testHandler) Reduce(*models.Event) error {
|
||||
func (h *testHandler) OnError(event *models.Event, err error) error {
|
||||
return err
|
||||
}
|
||||
func (h *testHandler) MinimumCycleDuration() time.Duration { return h.cycleDuration }
|
||||
func (h *testHandler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
func (h *testHandler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
||||
type eventstoreStub struct {
|
||||
events []*models.Event
|
||||
@@ -60,9 +70,13 @@ func (es *eventstoreStub) PushAggregates(ctx context.Context, in ...*models.Aggr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (es *eventstoreStub) LatestSequence(ctx context.Context, in *models.SearchQueryFactory) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func TestSpooler_process(t *testing.T) {
|
||||
type fields struct {
|
||||
currentHandler Handler
|
||||
currentHandler query.Handler
|
||||
}
|
||||
type args struct {
|
||||
timeout time.Duration
|
||||
@@ -81,7 +95,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
timeout: 0,
|
||||
events: []*models.Event{&models.Event{}, &models.Event{}},
|
||||
events: []*models.Event{{}, {}},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -92,7 +106,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
timeout: 1 * time.Second,
|
||||
events: []*models.Event{&models.Event{}, &models.Event{}, &models.Event{}, &models.Event{}},
|
||||
events: []*models.Event{{}, {}, {}, {}},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -102,7 +116,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
currentHandler: &testHandler{processSleep: 1 * time.Second, processError: fmt.Errorf("i am an error")},
|
||||
},
|
||||
args: args{
|
||||
events: []*models.Event{&models.Event{}, &models.Event{}},
|
||||
events: []*models.Event{{}, {}},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -120,7 +134,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
start = time.Now()
|
||||
}
|
||||
|
||||
if err := s.process(ctx, tt.args.events); (err != nil) != tt.wantErr {
|
||||
if err := s.process(ctx, tt.args.events, "test"); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Spooler.process() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
@@ -134,7 +148,7 @@ func TestSpooler_process(t *testing.T) {
|
||||
|
||||
func TestSpooler_awaitError(t *testing.T) {
|
||||
type fields struct {
|
||||
currentHandler Handler
|
||||
currentHandler query.Handler
|
||||
err error
|
||||
canceled bool
|
||||
}
|
||||
@@ -167,7 +181,7 @@ func TestSpooler_awaitError(t *testing.T) {
|
||||
errs := make(chan error)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go s.awaitError(cancel, errs)
|
||||
go s.awaitError(cancel, errs, "test")
|
||||
errs <- tt.fields.err
|
||||
|
||||
if ctx.Err() == nil {
|
||||
@@ -180,9 +194,8 @@ func TestSpooler_awaitError(t *testing.T) {
|
||||
// TestSpooler_load checks if load terminates
|
||||
func TestSpooler_load(t *testing.T) {
|
||||
type fields struct {
|
||||
currentHandler Handler
|
||||
currentHandler query.Handler
|
||||
locker *testLocker
|
||||
lockID string
|
||||
eventstore eventstore.Eventstore
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -193,7 +206,6 @@ func TestSpooler_load(t *testing.T) {
|
||||
"lock exists",
|
||||
fields{
|
||||
currentHandler: &testHandler{processSleep: 500 * time.Millisecond, viewModel: "testView", cycleDuration: 1 * time.Second},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, fmt.Errorf("lock already exists"), 2000*time.Millisecond),
|
||||
},
|
||||
},
|
||||
@@ -201,16 +213,14 @@ func TestSpooler_load(t *testing.T) {
|
||||
"lock fails",
|
||||
fields{
|
||||
currentHandler: &testHandler{processSleep: 100 * time.Millisecond, viewModel: "testView", cycleDuration: 1 * time.Second},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, fmt.Errorf("fail"), 2000*time.Millisecond),
|
||||
eventstore: &eventstoreStub{events: []*models.Event{&models.Event{}}},
|
||||
eventstore: &eventstoreStub{events: []*models.Event{{}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"query fails",
|
||||
fields{
|
||||
currentHandler: &testHandler{processSleep: 100 * time.Millisecond, viewModel: "testView", queryError: fmt.Errorf("query fail"), cycleDuration: 1 * time.Second},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, nil, 2000*time.Millisecond),
|
||||
eventstore: &eventstoreStub{err: fmt.Errorf("fail")},
|
||||
},
|
||||
@@ -219,9 +229,8 @@ func TestSpooler_load(t *testing.T) {
|
||||
"process event fails",
|
||||
fields{
|
||||
currentHandler: &testHandler{processError: fmt.Errorf("oups"), processSleep: 100 * time.Millisecond, viewModel: "testView", cycleDuration: 500 * time.Millisecond},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, nil, 1000*time.Millisecond),
|
||||
eventstore: &eventstoreStub{events: []*models.Event{&models.Event{}}},
|
||||
eventstore: &eventstoreStub{events: []*models.Event{{}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -231,19 +240,17 @@ func TestSpooler_load(t *testing.T) {
|
||||
s := &spooledHandler{
|
||||
Handler: tt.fields.currentHandler,
|
||||
locker: tt.fields.locker.mock,
|
||||
lockID: tt.fields.lockID,
|
||||
eventstore: tt.fields.eventstore,
|
||||
}
|
||||
s.load()
|
||||
s.load("test-worker")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpooler_lock(t *testing.T) {
|
||||
type fields struct {
|
||||
currentHandler Handler
|
||||
currentHandler query.Handler
|
||||
locker *testLocker
|
||||
lockID string
|
||||
expectsErr bool
|
||||
}
|
||||
type args struct {
|
||||
@@ -258,7 +265,6 @@ func TestSpooler_lock(t *testing.T) {
|
||||
"renew correct",
|
||||
fields{
|
||||
currentHandler: &testHandler{cycleDuration: 1 * time.Second, viewModel: "testView"},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, nil, 2000*time.Millisecond),
|
||||
expectsErr: false,
|
||||
},
|
||||
@@ -270,7 +276,6 @@ func TestSpooler_lock(t *testing.T) {
|
||||
"renew fails",
|
||||
fields{
|
||||
currentHandler: &testHandler{cycleDuration: 900 * time.Millisecond, viewModel: "testView"},
|
||||
lockID: "testID",
|
||||
locker: newTestLocker(t, "testID", "testView").expectRenew(t, fmt.Errorf("renew failed"), 1800*time.Millisecond),
|
||||
expectsErr: true,
|
||||
},
|
||||
@@ -285,13 +290,12 @@ func TestSpooler_lock(t *testing.T) {
|
||||
s := &spooledHandler{
|
||||
Handler: tt.fields.currentHandler,
|
||||
locker: tt.fields.locker.mock,
|
||||
lockID: tt.fields.lockID,
|
||||
}
|
||||
|
||||
errs := make(chan error, 1)
|
||||
ctx, _ := context.WithDeadline(context.Background(), tt.args.deadline)
|
||||
|
||||
locked := s.lock(ctx, errs)
|
||||
locked := s.lock(ctx, errs, "test-worker")
|
||||
|
||||
if tt.fields.expectsErr {
|
||||
err := <-errs
|
||||
@@ -321,7 +325,7 @@ func newTestLocker(t *testing.T, lockerID, viewName string) *testLocker {
|
||||
}
|
||||
|
||||
func (l *testLocker) expectRenew(t *testing.T, err error, waitTime time.Duration) *testLocker {
|
||||
l.mock.EXPECT().Renew(l.lockerID, l.viewName, gomock.Any()).DoAndReturn(
|
||||
l.mock.EXPECT().Renew(gomock.Any(), l.viewName, gomock.Any()).DoAndReturn(
|
||||
func(_, _ string, gotten time.Duration) error {
|
||||
if waitTime-gotten != 0 {
|
||||
t.Errorf("expected waittime %v got %v", waitTime, gotten)
|
||||
|
@@ -2,6 +2,7 @@ package eventsourcing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
mock_cache "github.com/caos/zitadel/internal/cache/mock"
|
||||
"github.com/caos/zitadel/internal/eventstore/mock"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package view
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/internal/view/repository"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/view/repository"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
key_model "github.com/caos/zitadel/internal/key/model"
|
||||
@@ -48,12 +49,8 @@ func GetActivePublicKeys(db *gorm.DB, table string) ([]*model.KeyView, error) {
|
||||
}
|
||||
|
||||
func PutKeys(db *gorm.DB, table string, privateKey, publicKey *model.KeyView) error {
|
||||
save := repository.PrepareSave(table)
|
||||
err := save(db, privateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return save(db, publicKey)
|
||||
save := repository.PrepareBulkSave(table)
|
||||
return save(db, privateKey, publicKey)
|
||||
}
|
||||
|
||||
func DeleteKey(db *gorm.DB, table, keyID string, private bool) error {
|
||||
|
@@ -2,13 +2,13 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
proj_event "github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
es_model "github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/project/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
@@ -20,8 +20,6 @@ const (
|
||||
applicationTable = "management.applications"
|
||||
)
|
||||
|
||||
func (p *Application) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *Application) ViewModel() string {
|
||||
return applicationTable
|
||||
}
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/caos/zitadel/internal/config/types"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
"github.com/caos/zitadel/internal/management/repository/eventsourcing/view"
|
||||
org_event "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
proj_event "github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
@@ -31,8 +31,8 @@ type EventstoreRepos struct {
|
||||
OrgEvents *org_event.OrgEventstore
|
||||
}
|
||||
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos) []spooler.Handler {
|
||||
return []spooler.Handler{
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos) []query.Handler {
|
||||
return []query.Handler{
|
||||
&Project{handler: handler{view, bulkLimit, configs.cycleDuration("Project"), errorCount}, eventstore: eventstore},
|
||||
&ProjectGrant{handler: handler{view, bulkLimit, configs.cycleDuration("ProjectGrant"), errorCount}, eventstore: eventstore, projectEvents: repos.ProjectEvents, orgEvents: repos.OrgEvents},
|
||||
&ProjectRole{handler: handler{view, bulkLimit, configs.cycleDuration("ProjectRole"), errorCount}, projectEvents: repos.ProjectEvents},
|
||||
@@ -54,3 +54,11 @@ func (configs Configs) cycleDuration(viewModel string) time.Duration {
|
||||
}
|
||||
return c.MinimumCycleDuration.Duration
|
||||
}
|
||||
|
||||
func (h *handler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
|
||||
func (h *handler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
@@ -1,14 +1,13 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
)
|
||||
|
||||
type Org struct {
|
||||
@@ -19,8 +18,6 @@ const (
|
||||
orgTable = "management.orgs"
|
||||
)
|
||||
|
||||
func (o *Org) MinimumCycleDuration() time.Duration { return o.cycleDuration }
|
||||
|
||||
func (o *Org) ViewModel() string {
|
||||
return orgTable
|
||||
}
|
||||
|
@@ -1,14 +1,13 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
)
|
||||
|
||||
type OrgDomain struct {
|
||||
@@ -19,8 +18,6 @@ const (
|
||||
orgDomainTable = "management.org_domains"
|
||||
)
|
||||
|
||||
func (d *OrgDomain) MinimumCycleDuration() time.Duration { return d.cycleDuration }
|
||||
|
||||
func (d *OrgDomain) ViewModel() string {
|
||||
return orgDomainTable
|
||||
}
|
||||
|
@@ -2,14 +2,14 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
org_model "github.com/caos/zitadel/internal/org/repository/view/model"
|
||||
usr_model "github.com/caos/zitadel/internal/user/model"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
usr_es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
@@ -24,8 +24,6 @@ const (
|
||||
orgMemberTable = "management.org_members"
|
||||
)
|
||||
|
||||
func (m *OrgMember) MinimumCycleDuration() time.Duration { return m.cycleDuration }
|
||||
|
||||
func (m *OrgMember) ViewModel() string {
|
||||
return orgMemberTable
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
@@ -22,8 +20,6 @@ const (
|
||||
projectTable = "management.projects"
|
||||
)
|
||||
|
||||
func (p *Project) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *Project) ViewModel() string {
|
||||
return projectTable
|
||||
}
|
||||
|
@@ -2,7 +2,6 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
@@ -28,8 +27,6 @@ const (
|
||||
grantedProjectTable = "management.project_grants"
|
||||
)
|
||||
|
||||
func (p *ProjectGrant) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *ProjectGrant) ViewModel() string {
|
||||
return grantedProjectTable
|
||||
}
|
||||
|
@@ -2,7 +2,9 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
@@ -11,7 +13,6 @@ import (
|
||||
usr_model "github.com/caos/zitadel/internal/user/model"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
usr_es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProjectGrantMember struct {
|
||||
@@ -23,8 +24,6 @@ const (
|
||||
projectGrantMemberTable = "management.project_grant_members"
|
||||
)
|
||||
|
||||
func (p *ProjectGrantMember) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *ProjectGrantMember) ViewModel() string {
|
||||
return projectGrantMemberTable
|
||||
}
|
||||
|
@@ -2,7 +2,9 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
@@ -11,7 +13,6 @@ import (
|
||||
usr_model "github.com/caos/zitadel/internal/user/model"
|
||||
usr_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
|
||||
usr_es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProjectMember struct {
|
||||
@@ -23,8 +24,6 @@ const (
|
||||
projectMemberTable = "management.project_members"
|
||||
)
|
||||
|
||||
func (p *ProjectMember) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *ProjectMember) ViewModel() string {
|
||||
return projectMemberTable
|
||||
}
|
||||
|
@@ -2,13 +2,13 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
proj_event "github.com/caos/zitadel/internal/project/repository/eventsourcing"
|
||||
es_model "github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/project/repository/view/model"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProjectRole struct {
|
||||
@@ -20,8 +20,6 @@ const (
|
||||
projectRoleTable = "management.project_roles"
|
||||
)
|
||||
|
||||
func (p *ProjectRole) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *ProjectRole) ViewModel() string {
|
||||
return projectRoleTable
|
||||
}
|
||||
|
@@ -2,19 +2,17 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
org_events "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
org_es_model "github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
org_events "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
org_es_model "github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/user/repository/view/model"
|
||||
)
|
||||
|
||||
@@ -28,8 +26,6 @@ const (
|
||||
userTable = "management.users"
|
||||
)
|
||||
|
||||
func (p *User) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *User) ViewModel() string {
|
||||
return userTable
|
||||
}
|
||||
@@ -122,12 +118,8 @@ func (u *User) fillLoginNamesOnOrgUsers(event *models.Event) error {
|
||||
}
|
||||
for _, user := range users {
|
||||
user.SetLoginNames(policy, org.Domains)
|
||||
err := u.view.PutUser(user, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return u.view.PutUsers(users, event.Sequence)
|
||||
}
|
||||
|
||||
func (u *User) fillPreferredLoginNamesOnOrgUsers(event *models.Event) error {
|
||||
@@ -148,12 +140,8 @@ func (u *User) fillPreferredLoginNamesOnOrgUsers(event *models.Event) error {
|
||||
}
|
||||
for _, user := range users {
|
||||
user.PreferredLoginName = user.GenerateLoginName(org.GetPrimaryDomain().Domain, policy.UserLoginMustBeDomain)
|
||||
err := u.view.PutUser(user, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return u.view.PutUsers(users, event.Sequence)
|
||||
}
|
||||
|
||||
func (u *User) fillLoginNames(user *view_model.UserView) (err error) {
|
||||
|
@@ -2,7 +2,6 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
@@ -35,8 +34,6 @@ const (
|
||||
userGrantTable = "management.user_grants"
|
||||
)
|
||||
|
||||
func (u *UserGrant) MinimumCycleDuration() time.Duration { return u.cycleDuration }
|
||||
|
||||
func (u *UserGrant) ViewModel() string {
|
||||
return userGrantTable
|
||||
}
|
||||
|
@@ -1,127 +0,0 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
}
|
||||
|
||||
func mockDB(t *testing.T) *dbMock {
|
||||
mockDB := dbMock{}
|
||||
var err error
|
||||
mockDB.db, mockDB.mock, err = sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error occured while creating stub db %v", err)
|
||||
}
|
||||
|
||||
mockDB.mock.MatchExpectationsInOrder(true)
|
||||
|
||||
return &mockDB
|
||||
}
|
||||
|
||||
func (db *dbMock) expectCommit() *dbMock {
|
||||
db.mock.ExpectCommit()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRollback() *dbMock {
|
||||
db.mock.ExpectRollback()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectBegin() *dbMock {
|
||||
db.mock.ExpectBegin()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("RELEASE SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO management\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
query.WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
} else {
|
||||
query.WillReturnResult(sqlmock.NewResult(1, affectedRows))
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_locker_Renew(t *testing.T) {
|
||||
type fields struct {
|
||||
db *dbMock
|
||||
}
|
||||
type args struct {
|
||||
lockerID string
|
||||
viewModel string
|
||||
waitTime time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "renew succeeded",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 1).
|
||||
expectReleaseSavepoint().
|
||||
expectCommit(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "renew now rows updated",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 0).
|
||||
expectRollback(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &locker{
|
||||
dbClient: tt.fields.db.db,
|
||||
}
|
||||
if err := l.Renew(tt.args.lockerID, tt.args.viewModel, tt.args.waitTime); (err != nil) != tt.wantErr {
|
||||
t.Errorf("locker.Renew() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err := tt.fields.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("not all database expectations met: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -2,6 +2,7 @@ package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/management/repository/eventsourcing/handler"
|
||||
@@ -11,16 +12,16 @@ import (
|
||||
type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentTasks int
|
||||
ConcurrentWorkers int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, sql *sql.DB, eventstoreRepos handler.EventstoreRepos) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentTasks: c.ConcurrentTasks,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, eventstoreRepos),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, eventstoreRepos),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@@ -35,6 +35,14 @@ func (v *View) UserMfas(userID string) ([]*usr_model.MultiFactor, error) {
|
||||
return view.UserMfas(v.Db, userTable, userID)
|
||||
}
|
||||
|
||||
func (v *View) PutUsers(user []*model.UserView, sequence uint64) error {
|
||||
err := view.PutUsers(v.Db, userTable, user...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return v.ProcessedUserSequence(sequence)
|
||||
}
|
||||
|
||||
func (v *View) PutUser(user *model.UserView, sequence uint64) error {
|
||||
err := view.PutUser(v.Db, userTable, user)
|
||||
if err != nil {
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/caos/zitadel/internal/config/types"
|
||||
"github.com/caos/zitadel/internal/crypto"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/eventstore/query"
|
||||
"github.com/caos/zitadel/internal/i18n"
|
||||
"github.com/caos/zitadel/internal/notification/repository/eventsourcing/view"
|
||||
org_event "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
@@ -33,12 +33,12 @@ type EventstoreRepos struct {
|
||||
OrgEvents *org_event.OrgEventstore
|
||||
}
|
||||
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults, i18n *i18n.Translator, dir http.FileSystem) []spooler.Handler {
|
||||
func Register(configs Configs, bulkLimit, errorCount uint64, view *view.View, eventstore eventstore.Eventstore, repos EventstoreRepos, systemDefaults sd.SystemDefaults, i18n *i18n.Translator, dir http.FileSystem) []query.Handler {
|
||||
aesCrypto, err := crypto.NewAESCrypto(systemDefaults.UserVerificationKey)
|
||||
if err != nil {
|
||||
logging.Log("HANDL-s90ew").WithError(err).Debug("error create new aes crypto")
|
||||
}
|
||||
return []spooler.Handler{
|
||||
return []query.Handler{
|
||||
&NotifyUser{
|
||||
handler: handler{view, bulkLimit, configs.cycleDuration("User"), errorCount},
|
||||
orgEvents: repos.OrgEvents,
|
||||
@@ -62,3 +62,11 @@ func (configs Configs) cycleDuration(viewModel string) time.Duration {
|
||||
}
|
||||
return c.MinimumCycleDuration.Duration
|
||||
}
|
||||
|
||||
func (h *handler) MinimumCycleDuration() time.Duration {
|
||||
return h.cycleDuration
|
||||
}
|
||||
|
||||
func (h *handler) QueryLimit() uint64 {
|
||||
return h.bulkLimit
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
@@ -35,8 +34,6 @@ const (
|
||||
NotifyUserID = "NOTIFICATION"
|
||||
)
|
||||
|
||||
func (n *Notification) MinimumCycleDuration() time.Duration { return n.cycleDuration }
|
||||
|
||||
func (n *Notification) ViewModel() string {
|
||||
return notificationTable
|
||||
}
|
||||
|
@@ -2,18 +2,17 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
org_events "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
org_es_model "github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
"time"
|
||||
|
||||
"github.com/caos/logging"
|
||||
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
org_model "github.com/caos/zitadel/internal/org/model"
|
||||
org_events "github.com/caos/zitadel/internal/org/repository/eventsourcing"
|
||||
org_es_model "github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
view_model "github.com/caos/zitadel/internal/user/repository/view/model"
|
||||
)
|
||||
|
||||
@@ -27,8 +26,6 @@ const (
|
||||
userTable = "notification.notify_users"
|
||||
)
|
||||
|
||||
func (p *NotifyUser) MinimumCycleDuration() time.Duration { return p.cycleDuration }
|
||||
|
||||
func (p *NotifyUser) ViewModel() string {
|
||||
return userTable
|
||||
}
|
||||
|
@@ -1,127 +0,0 @@
|
||||
package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
type dbMock struct {
|
||||
db *sql.DB
|
||||
mock sqlmock.Sqlmock
|
||||
}
|
||||
|
||||
func mockDB(t *testing.T) *dbMock {
|
||||
mockDB := dbMock{}
|
||||
var err error
|
||||
mockDB.db, mockDB.mock, err = sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error occured while creating stub db %v", err)
|
||||
}
|
||||
|
||||
mockDB.mock.MatchExpectationsInOrder(true)
|
||||
|
||||
return &mockDB
|
||||
}
|
||||
|
||||
func (db *dbMock) expectCommit() *dbMock {
|
||||
db.mock.ExpectCommit()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRollback() *dbMock {
|
||||
db.mock.ExpectRollback()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectBegin() *dbMock {
|
||||
db.mock.ExpectBegin()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectReleaseSavepoint() *dbMock {
|
||||
db.mock.ExpectExec("RELEASE SAVEPOINT").WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *dbMock) expectRenew(lockerID, view string, affectedRows int64) *dbMock {
|
||||
query := db.mock.
|
||||
ExpectExec(`INSERT INTO notification\.locks \(object_type, locker_id, locked_until\) VALUES \(\$1, \$2, now\(\)\+\$3\) ON CONFLICT \(object_type\) DO UPDATE SET locked_until = now\(\)\+\$4, locker_id = \$5 WHERE \(locks\.locked_until < now\(\) OR locks\.locker_id = \$6\) AND locks\.object_type = \$7`).
|
||||
WithArgs(view, lockerID, sqlmock.AnyArg(), sqlmock.AnyArg(), lockerID, lockerID, view).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
if affectedRows == 0 {
|
||||
query.WillReturnResult(sqlmock.NewResult(0, 0))
|
||||
} else {
|
||||
query.WillReturnResult(sqlmock.NewResult(1, affectedRows))
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_locker_Renew(t *testing.T) {
|
||||
type fields struct {
|
||||
db *dbMock
|
||||
}
|
||||
type args struct {
|
||||
lockerID string
|
||||
viewModel string
|
||||
waitTime time.Duration
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "renew succeeded",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 1).
|
||||
expectReleaseSavepoint().
|
||||
expectCommit(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "renew now rows updated",
|
||||
fields: fields{
|
||||
db: mockDB(t).
|
||||
expectBegin().
|
||||
expectSavepoint().
|
||||
expectRenew("locker", "view", 0).
|
||||
expectRollback(),
|
||||
},
|
||||
args: args{lockerID: "locker", viewModel: "view", waitTime: 1 * time.Second},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &locker{
|
||||
dbClient: tt.fields.db.db,
|
||||
}
|
||||
if err := l.Renew(tt.args.lockerID, tt.args.viewModel, tt.args.waitTime); (err != nil) != tt.wantErr {
|
||||
t.Errorf("locker.Renew() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err := tt.fields.db.mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("not all database expectations met: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -2,28 +2,29 @@ package spooler
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"net/http"
|
||||
|
||||
sd "github.com/caos/zitadel/internal/config/systemdefaults"
|
||||
"github.com/caos/zitadel/internal/eventstore"
|
||||
"github.com/caos/zitadel/internal/eventstore/spooler"
|
||||
"github.com/caos/zitadel/internal/i18n"
|
||||
"github.com/caos/zitadel/internal/notification/repository/eventsourcing/handler"
|
||||
"github.com/caos/zitadel/internal/notification/repository/eventsourcing/view"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentTasks int
|
||||
ConcurrentWorkers int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es eventstore.Eventstore, view *view.View, sql *sql.DB, eventstoreRepos handler.EventstoreRepos, systemDefaults sd.SystemDefaults, i18n *i18n.Translator, dir http.FileSystem) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentTasks: c.ConcurrentTasks,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, eventstoreRepos, systemDefaults, i18n, dir),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, eventstoreRepos, systemDefaults, i18n, dir),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@@ -246,7 +246,7 @@ func ChangesQuery(orgID string, latestSequence, limit uint64, sortAscending bool
|
||||
AggregateTypeFilter(model.OrgAggregate)
|
||||
|
||||
if !sortAscending {
|
||||
query.OrderDesc() //TODO: configure from param
|
||||
query.OrderDesc()
|
||||
}
|
||||
|
||||
query.LatestSequenceFilter(latestSequence).
|
||||
|
@@ -2,6 +2,7 @@ package eventsourcing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/caos/zitadel/internal/id"
|
||||
|
||||
mock_cache "github.com/caos/zitadel/internal/cache/mock"
|
||||
@@ -15,7 +16,7 @@ func GetMockedEventstoreAge(ctrl *gomock.Controller, mockEs *mock.MockEventstore
|
||||
return &PolicyEventstore{
|
||||
Eventstore: mockEs,
|
||||
policyCache: GetMockCacheAge(ctrl),
|
||||
idGenerator: GetSonyFlacke(),
|
||||
idGenerator: GetSonyFlake(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +27,7 @@ func GetMockCacheAge(ctrl *gomock.Controller) *PolicyCache {
|
||||
return &PolicyCache{policyCache: mockCache}
|
||||
}
|
||||
|
||||
func GetSonyFlacke() id.Generator {
|
||||
func GetSonyFlake() id.Generator {
|
||||
return id.SonyFlakeGenerator
|
||||
}
|
||||
|
||||
|
@@ -14,7 +14,7 @@ func GetMockedEventstoreComplexity(ctrl *gomock.Controller, mockEs *mock.MockEve
|
||||
return &PolicyEventstore{
|
||||
Eventstore: mockEs,
|
||||
policyCache: GetMockCacheComplexity(ctrl),
|
||||
idGenerator: GetSonyFlacke(),
|
||||
idGenerator: GetSonyFlake(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -14,7 +14,7 @@ func GetMockedEventstoreLockout(ctrl *gomock.Controller, mockEs *mock.MockEvents
|
||||
return &PolicyEventstore{
|
||||
Eventstore: mockEs,
|
||||
policyCache: GetMockCacheLockout(ctrl),
|
||||
idGenerator: GetSonyFlacke(),
|
||||
idGenerator: GetSonyFlake(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -325,7 +325,7 @@ func ChangesQuery(userID string, latestSequence, limit uint64, sortAscending boo
|
||||
query := es_models.NewSearchQuery().
|
||||
AggregateTypeFilter(model.UserAggregate)
|
||||
if !sortAscending {
|
||||
query.OrderDesc() //TODO: configure from param
|
||||
query.OrderDesc()
|
||||
}
|
||||
|
||||
query.LatestSequenceFilter(latestSequence).
|
||||
|
@@ -2,12 +2,13 @@ package eventsourcing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
es_sdk "github.com/caos/zitadel/internal/eventstore/sdk"
|
||||
org_es_model "github.com/caos/zitadel/internal/org/repository/eventsourcing/model"
|
||||
"github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func UserByIDQuery(id string, latestSequence uint64) (*es_models.SearchQuery, error) {
|
||||
|
@@ -2,10 +2,11 @@ package eventsourcing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/caos/zitadel/internal/crypto"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/internal/crypto"
|
||||
|
||||
"github.com/caos/zitadel/internal/api/authz"
|
||||
caos_errs "github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/eventstore/models"
|
||||
|
@@ -105,12 +105,21 @@ func UserMfas(db *gorm.DB, table, userID string) ([]*usr_model.MultiFactor, erro
|
||||
if user.OTPState == int32(usr_model.MfaStateUnspecified) {
|
||||
return []*usr_model.MultiFactor{}, nil
|
||||
}
|
||||
return []*usr_model.MultiFactor{&usr_model.MultiFactor{Type: usr_model.MfaTypeOTP, State: usr_model.MfaState(user.OTPState)}}, nil
|
||||
return []*usr_model.MultiFactor{{Type: usr_model.MfaTypeOTP, State: usr_model.MfaState(user.OTPState)}}, nil
|
||||
}
|
||||
|
||||
func PutUser(db *gorm.DB, table string, project *model.UserView) error {
|
||||
func PutUsers(db *gorm.DB, table string, users ...*model.UserView) error {
|
||||
save := repository.PrepareBulkSave(table)
|
||||
u := make([]interface{}, len(users))
|
||||
for i, user := range users {
|
||||
u[i] = user
|
||||
}
|
||||
return save(db, u...)
|
||||
}
|
||||
|
||||
func PutUser(db *gorm.DB, table string, user *model.UserView) error {
|
||||
save := repository.PrepareSave(table)
|
||||
return save(db, project)
|
||||
return save(db, user)
|
||||
}
|
||||
|
||||
func DeleteUser(db *gorm.DB, table, userID string) error {
|
||||
|
@@ -2,6 +2,7 @@ package eventsourcing
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
mock_cache "github.com/caos/zitadel/internal/cache/mock"
|
||||
"github.com/caos/zitadel/internal/eventstore/mock"
|
||||
es_models "github.com/caos/zitadel/internal/eventstore/models"
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/caos/zitadel/internal/errors"
|
||||
"github.com/caos/zitadel/internal/model"
|
||||
view_model "github.com/caos/zitadel/internal/view/model"
|
||||
"github.com/jinzhu/gorm"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@@ -50,6 +50,26 @@ func PrepareGetByQuery(table string, queries ...SearchQuery) func(db *gorm.DB, r
|
||||
}
|
||||
}
|
||||
|
||||
func PrepareBulkSave(table string) func(db *gorm.DB, objects ...interface{}) error {
|
||||
return func(db *gorm.DB, objects ...interface{}) error {
|
||||
db = db.Table(table)
|
||||
db = db.Begin()
|
||||
if err := db.Error; err != nil {
|
||||
return caos_errs.ThrowInternal(err, "REPOS-Fl0Is", "unable to begin")
|
||||
}
|
||||
for _, object := range objects {
|
||||
err := db.Save(object).Error
|
||||
if err != nil {
|
||||
return caos_errs.ThrowInternal(err, "VIEW-oJJSm", "unable to put object to view")
|
||||
}
|
||||
}
|
||||
if err := db.Commit().Error; err != nil {
|
||||
return caos_errs.ThrowInternal(err, "REPOS-IfhUE", "unable to commit")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PrepareSave(table string) func(db *gorm.DB, object interface{}) error {
|
||||
return func(db *gorm.DB, object interface{}) error {
|
||||
err := db.Table(table).Save(object).Error
|
||||
|
@@ -3,46 +3,33 @@ CREATE DATABASE auth;
|
||||
CREATE DATABASE notification;
|
||||
CREATE DATABASE adminapi;
|
||||
CREATE DATABASE authz;
|
||||
CREATE DATABASE eventstore;
|
||||
|
||||
|
||||
CREATE USER eventstore;
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO eventstore;
|
||||
|
||||
CREATE USER management;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE management TO management;
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO management;
|
||||
GRANT SELECT, INSERT, UPDATE ON TABLE eventstore.* TO management;
|
||||
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO management;
|
||||
|
||||
CREATE USER adminapi;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE, DROP ON DATABASE adminapi TO adminapi;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO adminapi;
|
||||
GRANT SELECT, INSERT, UPDATE ON TABLE eventstore.* TO adminapi;
|
||||
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO adminapi;
|
||||
GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE auth TO adminapi;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE authz TO adminapi;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE management TO adminapi;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE notification TO adminapi;
|
||||
|
||||
|
||||
CREATE USER auth;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE auth TO auth;
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO auth;
|
||||
GRANT SELECT, INSERT, UPDATE ON TABLE eventstore.* TO auth;
|
||||
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO auth;
|
||||
|
||||
CREATE USER notification;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE notification TO notification;
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO notification;
|
||||
GRANT SELECT, INSERT, UPDATE ON TABLE eventstore.* TO notification;
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO notification;
|
||||
|
||||
CREATE USER authz;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE authz TO authz;
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO authz;
|
||||
GRANT SELECT, INSERT, UPDATE ON TABLE eventstore.* TO authz;
|
||||
GRANT SELECT, INSERT ON DATABASE eventstore TO authz;
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE auth TO authz;
|
@@ -1,49 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE DATABASE eventstore;
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE USER eventstore;
|
||||
|
||||
GRANT SELECT, INSERT, UPDATE ON DATABASE eventstore TO eventstore;
|
||||
|
||||
COMMIT;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE SEQUENCE eventstore.event_seq;
|
||||
|
||||
COMMIT;
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE eventstore.events (
|
||||
id UUID DEFAULT gen_random_uuid(),
|
||||
|
||||
event_type TEXT,
|
||||
aggregate_type TEXT NOT NULL,
|
||||
aggregate_id TEXT NOT NULL,
|
||||
aggregate_version TEXT NOT NULL,
|
||||
event_sequence BIGINT NOT NULL DEFAULT nextval('eventstore.event_seq'),
|
||||
previous_sequence BIGINT UNIQUE,
|
||||
creation_date TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
event_data JSONB,
|
||||
editor_user TEXT NOT NULL,
|
||||
editor_service TEXT NOT NULL,
|
||||
resource_owner TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE eventstore.locks (
|
||||
aggregate_type TEXT NOT NULL,
|
||||
aggregate_id TEXT NOT NULL,
|
||||
until TIMESTAMPTZ,
|
||||
UNIQUE (aggregate_type, aggregate_id)
|
||||
);
|
||||
|
||||
COMMIT;
|
28
migrations/cockroach/V1.1__eventstore.sql
Normal file
28
migrations/cockroach/V1.1__eventstore.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
CREATE SEQUENCE eventstore.event_seq;
|
||||
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO management;
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO eventstore;
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO adminapi;
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO auth;
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO authz;
|
||||
GRANT UPDATE ON TABLE eventstore.event_seq TO notification;
|
||||
|
||||
|
||||
CREATE TABLE eventstore.events (
|
||||
id UUID DEFAULT gen_random_uuid(),
|
||||
event_type TEXT,
|
||||
aggregate_type TEXT NOT NULL,
|
||||
aggregate_id TEXT NOT NULL,
|
||||
aggregate_version TEXT NOT NULL,
|
||||
event_sequence BIGINT NOT NULL DEFAULT nextval('eventstore.event_seq'),
|
||||
previous_sequence BIGINT,
|
||||
creation_date TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
event_data JSONB,
|
||||
editor_user TEXT NOT NULL,
|
||||
editor_service TEXT NOT NULL,
|
||||
resource_owner TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT event_sequence_pk PRIMARY KEY (event_sequence DESC),
|
||||
INDEX agg_type_agg_id (aggregate_type, aggregate_id),
|
||||
CONSTRAINT previous_sequence_unique UNIQUE (previous_sequence DESC)
|
||||
);
|
@@ -1,17 +1,15 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE management.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ,
|
||||
object_type TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
|
||||
PRIMARY KEY (object_type)
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE management.current_sequences (
|
||||
view_name TEXT,
|
||||
|
||||
current_sequence BIGINT,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
@@ -82,6 +80,7 @@ CREATE TABLE management.project_members (
|
||||
first_name TEXT,
|
||||
last_name TEXT,
|
||||
roles TEXT ARRAY,
|
||||
display_name TEXT,
|
||||
sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (project_id, user_id)
|
||||
@@ -100,6 +99,7 @@ CREATE TABLE management.project_grant_members (
|
||||
first_name TEXT,
|
||||
last_name TEXT,
|
||||
roles TEXT ARRAY,
|
||||
display_name TEXT,
|
||||
sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (grant_id, user_id)
|
||||
@@ -181,6 +181,7 @@ CREATE TABLE management.user_grants (
|
||||
last_name TEXT,
|
||||
email TEXT,
|
||||
role_keys TEXT Array,
|
||||
grant_id TEXT,
|
||||
|
||||
grant_state SMALLINT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
@@ -190,16 +191,30 @@ CREATE TABLE management.user_grants (
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE management.org_domains (
|
||||
creation_date TIMESTAMPTZ,
|
||||
change_date TIMESTAMPTZ,
|
||||
sequence BIGINT,
|
||||
|
||||
domain TEXT,
|
||||
org_id TEXT,
|
||||
verified BOOLEAN,
|
||||
primary_domain BOOLEAN,
|
||||
|
||||
PRIMARY KEY (org_id, domain)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ,
|
||||
object_type TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
|
||||
PRIMARY KEY (object_type)
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE auth.current_sequences (
|
||||
view_name TEXT,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
current_sequence BIGINT,
|
||||
|
||||
@@ -305,14 +320,15 @@ CREATE TABLE auth.tokens (
|
||||
|
||||
CREATE TABLE notification.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ,
|
||||
object_type TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
|
||||
PRIMARY KEY (object_type)
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE notification.current_sequences (
|
||||
view_name TEXT,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
current_sequence BIGINT,
|
||||
|
||||
@@ -348,6 +364,8 @@ CREATE TABLE notification.notify_users (
|
||||
verified_phone TEXT,
|
||||
sequence BIGINT,
|
||||
password_set BOOLEAN,
|
||||
login_names TEXT,
|
||||
preferred_login_name TEXT,
|
||||
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
@@ -378,20 +396,38 @@ CREATE TABLE adminapi.failed_events (
|
||||
|
||||
CREATE TABLE adminapi.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ,
|
||||
object_type TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
|
||||
PRIMARY KEY (object_type)
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.current_sequences (
|
||||
view_name TEXT,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
current_sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.iam_members (
|
||||
user_id TEXT,
|
||||
|
||||
iam_id TEXT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
change_date TIMESTAMPTZ,
|
||||
|
||||
user_name TEXT,
|
||||
email_address TEXT,
|
||||
first_name TEXT,
|
||||
last_name TEXT,
|
||||
roles TEXT ARRAY,
|
||||
display_name TEXT,
|
||||
sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (user_id)
|
||||
);
|
||||
|
||||
CREATE TABLE management.orgs (
|
||||
id TEXT,
|
||||
@@ -419,6 +455,7 @@ CREATE TABLE management.org_members (
|
||||
first_name TEXT,
|
||||
last_name TEXT,
|
||||
roles TEXT ARRAY,
|
||||
display_name TEXT,
|
||||
sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (org_id, user_id)
|
||||
@@ -479,6 +516,7 @@ CREATE TABLE auth.user_grants (
|
||||
display_name TEXT,
|
||||
email TEXT,
|
||||
role_keys TEXT Array,
|
||||
grant_id TEXT,
|
||||
|
||||
grant_state SMALLINT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
@@ -504,14 +542,15 @@ CREATE TABLE auth.orgs (
|
||||
|
||||
CREATE TABLE authz.locks (
|
||||
locker_id TEXT,
|
||||
locked_until TIMESTAMPTZ,
|
||||
object_type TEXT,
|
||||
locked_until TIMESTAMPTZ(3),
|
||||
view_name TEXT,
|
||||
|
||||
PRIMARY KEY (object_type)
|
||||
PRIMARY KEY (view_name)
|
||||
);
|
||||
|
||||
CREATE TABLE authz.current_sequences (
|
||||
view_name TEXT,
|
||||
timestamp TIMESTAMPTZ,
|
||||
|
||||
current_sequence BIGINT,
|
||||
|
||||
@@ -540,6 +579,7 @@ CREATE TABLE authz.user_grants (
|
||||
display_name TEXT,
|
||||
email TEXT,
|
||||
role_keys TEXT Array,
|
||||
grant_id TEXT,
|
||||
|
||||
grant_state SMALLINT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
@@ -573,34 +613,16 @@ CREATE TABLE authz.applications (
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE management.org_domains (
|
||||
CREATE TABLE authz.orgs (
|
||||
id TEXT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
change_date TIMESTAMPTZ,
|
||||
resource_owner TEXT,
|
||||
org_state SMALLINT,
|
||||
sequence BIGINT,
|
||||
|
||||
domain TEXT,
|
||||
org_id TEXT,
|
||||
verified BOOLEAN,
|
||||
primary_domain BOOLEAN,
|
||||
name TEXT,
|
||||
|
||||
PRIMARY KEY (org_id, domain)
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE adminapi.iam_members (
|
||||
user_id TEXT,
|
||||
|
||||
iam_id TEXT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
change_date TIMESTAMPTZ,
|
||||
|
||||
user_name TEXT,
|
||||
email_address TEXT,
|
||||
first_name TEXT,
|
||||
last_name TEXT,
|
||||
roles TEXT ARRAY,
|
||||
sequence BIGINT,
|
||||
|
||||
PRIMARY KEY (user_id)
|
||||
);
|
||||
|
||||
COMMIT;
|
||||
|
@@ -1,6 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE notification.notify_users ADD COLUMN login_names TEXT ARRAY;
|
||||
ALTER TABLE notification.notify_users ADD COLUMN preferred_login_name TEXT;
|
||||
|
||||
COMMIT;
|
@@ -1,7 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE management.user_grants ADD COLUMN grant_id TEXT;
|
||||
ALTER TABLE auth.user_grants ADD COLUMN grant_id TEXT;
|
||||
ALTER TABLE authz.user_grants ADD COLUMN grant_id TEXT;
|
||||
|
||||
COMMIT;
|
@@ -1,9 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE authz.current_sequences ADD COLUMN timestamp TIMESTAMPTZ;
|
||||
ALTER TABLE auth.current_sequences ADD COLUMN timestamp TIMESTAMPTZ;
|
||||
ALTER TABLE management.current_sequences ADD COLUMN timestamp TIMESTAMPTZ;
|
||||
ALTER TABLE notification.current_sequences ADD COLUMN timestamp TIMESTAMPTZ;
|
||||
ALTER TABLE adminapi.current_sequences ADD COLUMN timestamp TIMESTAMPTZ;
|
||||
|
||||
COMMIT;
|
@@ -1,17 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE authz.orgs (
|
||||
id TEXT,
|
||||
creation_date TIMESTAMPTZ,
|
||||
change_date TIMESTAMPTZ,
|
||||
resource_owner TEXT,
|
||||
org_state SMALLINT,
|
||||
sequence BIGINT,
|
||||
|
||||
domain TEXT,
|
||||
name TEXT,
|
||||
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
COMMIT;
|
@@ -1,8 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE management.project_members ADD COLUMN display_name TEXT;
|
||||
ALTER TABLE management.project_grant_members ADD COLUMN display_name TEXT;
|
||||
ALTER TABLE management.org_members ADD COLUMN display_name TEXT;
|
||||
ALTER TABLE adminapi.iam_members ADD COLUMN display_name TEXT;
|
||||
|
||||
COMMIT;
|
Reference in New Issue
Block a user