mirror of
https://github.com/zitadel/zitadel.git
synced 2025-02-28 15:47:24 +00:00
fix: scheduling (#3978)
* fix: improve scheduling * build pre-release * fix: locker * fix: user handler and print stack in case of panic in reducer * chore: remove sentry * fix: improve handler projection and implement tests * more tests * fix: race condition in tests * Update internal/eventstore/repository/sql/query.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * fix: implemented suggested changes * fix: lock statement Co-authored-by: Silvan <silvan.reusser@gmail.com>
This commit is contained in:
parent
0cc548e3f8
commit
aed7010508
@ -3,7 +3,7 @@ module.exports = {
|
||||
{name: 'main'},
|
||||
{name: '1.x.x', range: '1.x.x', channel: '1.x.x'},
|
||||
{name: 'v2-alpha', prerelease: true},
|
||||
{name: 'auth-users', prerelease: true},
|
||||
{name: 'scheduler', prerelease: true},
|
||||
],
|
||||
plugins: [
|
||||
"@semantic-release/commit-analyzer"
|
||||
|
10
README.md
10
README.md
@ -142,16 +142,6 @@ Delegate the right to assign roles to another organization
|
||||
Customize login and console with your design
|
||||

|
||||
|
||||
## Usage Data
|
||||
|
||||
ZITADEL components send errors and usage data to CAOS Ltd., so that we are able to identify code improvement potential. If you don't want to send this data or don't have an internet connection, pass the global flag `--disable-analytics` when using zitadelctl. For disabling ingestion for already-running components, execute the takeoff command again with the `` flag.
|
||||
|
||||
We try to distinguishing the environments from which events come from. As environment identifier, we enrich the events by the domain you have configured in zitadel.yml, as soon as it's available. When it's not available and you passed the --gitops flag, we defer the environment identifier from your git repository URL.
|
||||
|
||||
Besides from errors that don't clearly come from misconfiguration or cli misusage, we send an initial event when any binary is started. This is a "<component> invoked" event along with the flags that are passed to it, except secret values of course.
|
||||
|
||||
We only ingest operational data. Your ZITADEL workload data from the IAM application itself is never sent anywhere unless you chose to integrate other systems yourself.
|
||||
|
||||
## Security
|
||||
|
||||
See the policy [here](./SECURITY.md)
|
||||
|
@ -104,9 +104,10 @@ Machine:
|
||||
# JPath: "$.compute.vmId"
|
||||
|
||||
Projections:
|
||||
RequeueEvery: 10s
|
||||
RequeueEvery: 60s
|
||||
RetryFailedAfter: 1s
|
||||
MaxFailureCount: 5
|
||||
ConcurrentInstances: 10
|
||||
BulkLimit: 200
|
||||
MaxIterators: 1
|
||||
Customizations:
|
||||
@ -117,6 +118,7 @@ Auth:
|
||||
SearchLimit: 1000
|
||||
Spooler:
|
||||
ConcurrentWorkers: 1
|
||||
ConcurrentInstances: 10
|
||||
BulkLimit: 10000
|
||||
FailureCountUntilSkip: 5
|
||||
|
||||
@ -124,6 +126,7 @@ Admin:
|
||||
SearchLimit: 1000
|
||||
Spooler:
|
||||
ConcurrentWorkers: 1
|
||||
ConcurrentInstances: 10
|
||||
BulkLimit: 10000
|
||||
FailureCountUntilSkip: 5
|
||||
|
||||
@ -180,6 +183,7 @@ Notification:
|
||||
Repository:
|
||||
Spooler:
|
||||
ConcurrentWorkers: 1
|
||||
ConcurrentInstances: 10
|
||||
BulkLimit: 10000
|
||||
FailureCountUntilSkip: 5
|
||||
Handlers:
|
||||
|
7
go.mod
7
go.mod
@ -17,7 +17,6 @@ require (
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d
|
||||
github.com/duo-labs/webauthn v0.0.0-20211216225436-9a12cd078b8a
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2
|
||||
github.com/getsentry/sentry-go v0.11.0
|
||||
github.com/golang/glog v1.0.0
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
@ -48,7 +47,7 @@ require (
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/superseriousbusiness/exifremove v0.0.0-20210330092427-6acd27eac203
|
||||
github.com/ttacon/libphonenumber v1.2.1
|
||||
github.com/zitadel/logging v0.3.3
|
||||
github.com/zitadel/logging v0.3.4
|
||||
github.com/zitadel/oidc/v2 v2.0.0-dynamic-issuer.3
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.27.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.27.0
|
||||
@ -154,7 +153,7 @@ require (
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rs/xid v1.2.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.8.1 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
@ -170,7 +169,7 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v0.10.0 // indirect
|
||||
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.63.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
94
go.sum
94
go.sum
@ -53,20 +53,16 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
|
||||
cloud.google.com/go/trace v1.0.0 h1:laKx2y7IWMjguCe5zZx6n7qLtREk4kyE69SXVC0VSN8=
|
||||
cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.0.0 h1:38fNtfhHY6bs22b/D6+hDzO6JR0rDzpGPD36dY2uPL4=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.0.0/go.mod h1:jE23wM1jvwSKgdGcoOkj5j9n1VWtncW36pL2bK1JU+0=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
@ -78,14 +74,12 @@ github.com/Masterminds/squirrel v1.5.2 h1:UiOEi2ZX4RCSkpiNDQN5kro/XIBpSRk9iTqdIR
|
||||
github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VictoriaMetrics/fastcache v1.8.0 h1:ybZqS7kRy8YVzYsI09GLzQhs7iqS6cOEH2avtknD1SU=
|
||||
github.com/VictoriaMetrics/fastcache v1.8.0/go.mod h1:n7Sl+ioh/HlWeYHLSIBIE8TcZFHg/+xgvomWSS5xuEE=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
|
||||
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
|
||||
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw=
|
||||
@ -103,7 +97,6 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
@ -112,7 +105,6 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws=
|
||||
github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -163,9 +155,6 @@ github.com/cockroachdb/cockroach-go/v2 v2.2.4 h1:VuiBJKut2Imgrzl+TNk+U5+GxLOh3hn
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.2.4/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
@ -173,7 +162,6 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
@ -187,9 +175,7 @@ github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6RO
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
@ -227,7 +213,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@ -244,12 +229,9 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
||||
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@ -260,17 +242,12 @@ github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWp
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fxamacker/cbor/v2 v2.2.0 h1:6eXqdDDe588rSYAi1HfZKbx6YYQO4mxQ9eC6xYpU/JQ=
|
||||
github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8=
|
||||
github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.7.4 h1:QmUZXrvJ9qZ3GfWvQ+2wnW/1ePrTEJqPKMYEU3lD/DM=
|
||||
github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
|
||||
github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.0.2 h1:xMxH9j2fNg/L4hLn/4y3M0IUsn0M6Wbu/Uh9QlOfBh4=
|
||||
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
@ -284,7 +261,6 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
|
||||
@ -295,7 +271,6 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
@ -371,7 +346,6 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
|
||||
@ -506,17 +480,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ=
|
||||
github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
@ -586,14 +554,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/k3a/html2text v1.0.8 h1:rVanLhKilpnJUJs/CNKWzMC4YaQINGxK0rSG8ssmnV0=
|
||||
github.com/k3a/html2text v1.0.8/go.mod h1:ieEXykM67iT8lTvEWBh6fhpH4B23kB9OMKPdIBmgUqA=
|
||||
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
|
||||
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
|
||||
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
|
||||
github.com/kevinburke/go-types v0.0.0-20210723172823-2deba1f80ba7 h1:K8qael4LemsmJCGt+ccI8b0fCNFDttmEu3qtpFt3G0M=
|
||||
github.com/kevinburke/go-types v0.0.0-20210723172823-2deba1f80ba7/go.mod h1:/Pk5i/SqYdYv1cie5wGwoZ4P6TpgMi+Yf58mtJSHdOw=
|
||||
github.com/kevinburke/rest v0.0.0-20210506044642-5611499aa33c h1:hnbwWED5rIu+UaMkLR3JtnscMVGqp35lfzQwLuZAAUY=
|
||||
@ -603,14 +565,11 @@ github.com/kevinburke/twilio-go v0.0.0-20210327194925-1623146bcf73/go.mod h1:Fm9
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
@ -628,8 +587,6 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
|
||||
@ -653,7 +610,6 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@ -679,12 +635,9 @@ github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
@ -717,7 +670,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/muesli/clusters v0.0.0-20180605185049-a07a36e67d36/go.mod h1:mw5KDqUj0eLj/6DUNINLVJNoPTFkEuGMHtJsXLviLkY=
|
||||
github.com/muesli/clusters v0.0.0-20200529215643-2700303c1762 h1:p4A2Jx7Lm3NV98VRMKlyWd3nqf8obft8NfXlAUmqd3I=
|
||||
github.com/muesli/clusters v0.0.0-20200529215643-2700303c1762/go.mod h1:mw5KDqUj0eLj/6DUNINLVJNoPTFkEuGMHtJsXLviLkY=
|
||||
@ -744,9 +696,7 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
@ -760,14 +710,11 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@ -825,19 +772,15 @@ github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
@ -849,6 +792,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
@ -858,26 +803,20 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
||||
github.com/sony/sonyflake v1.0.0 h1:MpU6Ro7tfXwgn2l5eluf9xQvQJDROTBImNCfRXn/YeM=
|
||||
github.com/sony/sonyflake v1.0.0/go.mod h1:Jv3cfhf/UFtolOTTRd3q4Nl6ENqM+KfyZ5PseKfZGF4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.1 h1:izYHOT71f9iZ7iq37Uqjael60/vYC6vMtzedudZ0zEk=
|
||||
github.com/spf13/afero v1.8.1/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
@ -907,31 +846,17 @@ github.com/ttacon/libphonenumber v1.2.1/go.mod h1:E0TpmdVMq5dyVlQ7oenAkhsLu86OkU
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/wcharczuk/go-chart/v2 v2.1.0 h1:tY2slqVQ6bN+yHSnDYwZebLQFkphK4WNrVwnt7CJZ2I=
|
||||
github.com/wcharczuk/go-chart/v2 v2.1.0/go.mod h1:yx7MvAVNcP/kN9lKXM/NTce4au4DFN99j6i1OwDclNA=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/xrash/smetrics v0.0.0-20200730060457-89a2a8a1fb0b h1:tnWgqoOBmInkt5pbLjagwNVjjT4RdJhFHzL1ebCSRh8=
|
||||
github.com/xrash/smetrics v0.0.0-20200730060457-89a2a8a1fb0b/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -941,6 +866,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
github.com/zitadel/logging v0.3.3 h1:/nAoki9HFJK+qMLBVY5Jhbfp/6o3YLK49Tw5j2oRhjM=
|
||||
github.com/zitadel/logging v0.3.3/go.mod h1:aPpLQhE+v6ocNK0TWrBrd363hZ95KcI17Q1ixAQwZF0=
|
||||
github.com/zitadel/logging v0.3.4 h1:9hZsTjMMTE3X2LUi0xcF9Q9EdLo+FAezeu52ireBbHM=
|
||||
github.com/zitadel/logging v0.3.4/go.mod h1:aPpLQhE+v6ocNK0TWrBrd363hZ95KcI17Q1ixAQwZF0=
|
||||
github.com/zitadel/oidc/v2 v2.0.0-dynamic-issuer.3 h1:Z0WrhhjPLrbywD+6226Ca7Mvt9VKAU3J/ojYq24CUEY=
|
||||
github.com/zitadel/oidc/v2 v2.0.0-dynamic-issuer.3/go.mod h1:uoJw5Xc6HXfnQbNZiLbld9dED0/8UMu0M4gOipTRZBA=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
@ -1013,7 +940,6 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
@ -1025,7 +951,6 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
@ -1088,7 +1013,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -1098,7 +1022,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@ -1173,7 +1096,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1184,7 +1106,6 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1255,6 +1176,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220207234003-57398862261d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@ -1275,13 +1198,11 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@ -1527,12 +1448,10 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
@ -1549,7 +1468,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -73,30 +73,26 @@ func (m *Styling) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (m *Styling) EventQuery() (*models.SearchQuery, error) {
|
||||
sequences, err := m.view.GetLatestStylingSequences()
|
||||
func (m *Styling) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
sequences, err := m.view.GetLatestStylingSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
searchQuery := models.NewSearchQuery()
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
var seq uint64
|
||||
for _, instanceID := range instanceIDs {
|
||||
if sequence.InstanceID == instanceID {
|
||||
seq = sequence.CurrentSequence
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
searchQuery.AddQuery().
|
||||
AggregateTypeFilter(m.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
LatestSequenceFilter(seq).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(m.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return searchQuery, nil
|
||||
}
|
||||
|
||||
func (m *Styling) Reduce(event *models.Event) (err error) {
|
||||
@ -299,7 +295,7 @@ func (m *Styling) generateColorPaletteRGBA255(hex string) map[string]string {
|
||||
if ok {
|
||||
palette["500"] = cssRGB(color500.RGB255())
|
||||
}
|
||||
|
||||
|
||||
color600, ok := colorful.MakeColor(gamut.Darker(defaultColor, 0.06))
|
||||
if ok {
|
||||
palette["600"] = cssRGB(color600.RGB255())
|
||||
|
@ -15,15 +15,17 @@ type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentWorkers int
|
||||
ConcurrentInstances int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es v1.Eventstore, view *view.View, sql *sql.DB, static static.Storage) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, static),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: sql},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ConcurrentInstances: c.ConcurrentInstances,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, static),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@ -19,8 +19,8 @@ func (v *View) latestSequence(viewName, instanceID string) (*repository.CurrentS
|
||||
return repository.LatestSequence(v.Db, sequencesTable, viewName, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) latestSequences(viewName string) ([]*repository.CurrentSequence, error) {
|
||||
return repository.LatestSequences(v.Db, sequencesTable, viewName)
|
||||
func (v *View) latestSequences(viewName string, instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return repository.LatestSequences(v.Db, sequencesTable, viewName, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) AllCurrentSequences(db string) ([]*repository.CurrentSequence, error) {
|
||||
|
@ -27,8 +27,8 @@ func (v *View) GetLatestStylingSequence(instanceID string) (*global_view.Current
|
||||
return v.latestSequence(stylingTyble, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestStylingSequences() ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(stylingTyble)
|
||||
func (v *View) GetLatestStylingSequences(instanceIDs ...string) ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(stylingTyble, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedStylingSequence(event *models.Event) error {
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
sentryhttp "github.com/getsentry/sentry-go/http"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/improbable-eng/grpc-web/go/grpcweb"
|
||||
"github.com/zitadel/logging"
|
||||
@ -67,7 +66,6 @@ func (a *API) RegisterServer(ctx context.Context, grpcServer server.Server) erro
|
||||
func (a *API) RegisterHandler(prefix string, handler http.Handler) {
|
||||
prefix = strings.TrimSuffix(prefix, "/")
|
||||
subRouter := a.router.PathPrefix(prefix).Name(prefix).Subrouter()
|
||||
subRouter.Use(sentryhttp.New(sentryhttp.Options{}).Handle)
|
||||
subRouter.PathPrefix("").Handler(http.StripPrefix(prefix, handler))
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
sentryhttp "github.com/getsentry/sentry-go/http"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
@ -89,7 +88,7 @@ func NewHandler(commands *command.Commands, verifier *authz.TokenVerifier, authC
|
||||
|
||||
verifier.RegisterServer("Assets-API", "assets", AssetsService_AuthMethods)
|
||||
router := mux.NewRouter()
|
||||
router.Use(sentryhttp.New(sentryhttp.Options{}).Handle, instanceInterceptor)
|
||||
router.Use(instanceInterceptor)
|
||||
RegisterRoutes(router, h)
|
||||
router.PathPrefix("/{owner}").Methods("GET").HandlerFunc(DownloadHandleFunc(h, h.GetFile()))
|
||||
return http_util.CopyHeadersToContext(http_mw.CORSInterceptor(router))
|
||||
@ -117,6 +116,10 @@ func UploadHandleFunc(s AssetsService, uploader Uploader) func(http.ResponseWrit
|
||||
ctx := r.Context()
|
||||
ctxData := authz.GetCtxData(ctx)
|
||||
err := r.ParseMultipartForm(maxMemory)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
file, handler, err := r.FormFile(paramFile)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
|
@ -1,34 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func SentryHandler() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return sendErrToSentry(ctx, req, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func sendErrToSentry(ctx context.Context, req interface{}, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
resp, err := handler(ctx, req)
|
||||
code := status.Code(err)
|
||||
switch code {
|
||||
case codes.Canceled,
|
||||
codes.Unknown,
|
||||
codes.DeadlineExceeded,
|
||||
codes.ResourceExhausted,
|
||||
codes.Aborted,
|
||||
codes.Unimplemented,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
codes.DataLoss:
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
return resp, err
|
||||
}
|
@ -30,7 +30,6 @@ func CreateServer(verifier *authz.TokenVerifier, authConfig authz.Config, querie
|
||||
grpc_middleware.ChainUnaryServer(
|
||||
middleware.DefaultTracingServer(),
|
||||
middleware.MetricsHandler(metricTypes, grpc_api.Probes...),
|
||||
middleware.SentryHandler(),
|
||||
middleware.NoCacheInterceptor(),
|
||||
middleware.ErrorHandler(),
|
||||
middleware.InstanceInterceptor(queries, hostHeaderName, system_pb.SystemService_MethodPrefix),
|
||||
|
@ -8,8 +8,10 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
sd "github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/v1/models"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/v1/query"
|
||||
query2 "github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/view/repository"
|
||||
)
|
||||
|
||||
type Configs map[string]*Config
|
||||
@ -75,3 +77,21 @@ func (h *handler) QueryLimit() uint64 {
|
||||
func withInstanceID(ctx context.Context, instanceID string) context.Context {
|
||||
return authz.WithInstanceID(ctx, instanceID)
|
||||
}
|
||||
|
||||
func newSearchQuery(sequences []*repository.CurrentSequence, aggregateTypes []models.AggregateType, instanceIDs []string) *models.SearchQuery {
|
||||
searchQuery := models.NewSearchQuery()
|
||||
for _, sequence := range sequences {
|
||||
var seq uint64
|
||||
for _, instanceID := range instanceIDs {
|
||||
if sequence.InstanceID == instanceID {
|
||||
seq = sequence.CurrentSequence
|
||||
break
|
||||
}
|
||||
}
|
||||
searchQuery.AddQuery().
|
||||
AggregateTypeFilter(aggregateTypes...).
|
||||
LatestSequenceFilter(seq).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return searchQuery
|
||||
}
|
||||
|
@ -62,31 +62,12 @@ func (i *IDPConfig) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (i *IDPConfig) EventQuery() (*models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestIDPConfigSequences()
|
||||
func (i *IDPConfig) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestIDPConfigSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, i.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (i *IDPConfig) Reduce(event *models.Event) (err error) {
|
||||
|
@ -76,30 +76,13 @@ func (i *IDPProvider) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (i *IDPProvider) EventQuery() (*models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestIDPProviderSequences()
|
||||
func (i *IDPProvider) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestIDPProviderSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
|
||||
return newSearchQuery(sequences, i.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (i *IDPProvider) Reduce(event *models.Event) (err error) {
|
||||
|
@ -62,30 +62,12 @@ func (p *OrgProjectMapping) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (p *OrgProjectMapping) EventQuery() (*es_models.SearchQuery, error) {
|
||||
sequences, err := p.view.GetLatestOrgProjectMappingSequences()
|
||||
func (p *OrgProjectMapping) EventQuery(instanceIDs ...string) (*es_models.SearchQuery, error) {
|
||||
sequences, err := p.view.GetLatestOrgProjectMappingSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(p.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(p.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, p.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (p *OrgProjectMapping) Reduce(event *es_models.Event) (err error) {
|
||||
|
@ -66,30 +66,12 @@ func (t *RefreshToken) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (t *RefreshToken) EventQuery() (*es_models.SearchQuery, error) {
|
||||
sequences, err := t.view.GetLatestRefreshTokenSequences()
|
||||
func (t *RefreshToken) EventQuery(instanceIDs ...string) (*es_models.SearchQuery, error) {
|
||||
sequences, err := t.view.GetLatestRefreshTokenSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(t.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(t.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, t.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (t *RefreshToken) Reduce(event *es_models.Event) (err error) {
|
||||
|
@ -72,30 +72,12 @@ func (p *Token) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (t *Token) EventQuery() (*es_models.SearchQuery, error) {
|
||||
sequences, err := t.view.GetLatestTokenSequences()
|
||||
func (t *Token) EventQuery(instanceIDs ...string) (*es_models.SearchQuery, error) {
|
||||
sequences, err := t.view.GetLatestTokenSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(t.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(t.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, t.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (t *Token) Reduce(event *es_models.Event) (err error) {
|
||||
|
@ -74,30 +74,12 @@ func (u *User) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (u *User) EventQuery() (*es_models.SearchQuery, error) {
|
||||
sequences, err := u.view.GetLatestUserSequences()
|
||||
func (u *User) EventQuery(instanceIDs ...string) (*es_models.SearchQuery, error) {
|
||||
sequences, err := u.view.GetLatestUserSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(u.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(u.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, u.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (u *User) Reduce(event *es_models.Event) (err error) {
|
||||
@ -176,6 +158,7 @@ func (u *User) ProcessUser(event *es_models.Event) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user = &view_model.UserView{}
|
||||
for _, e := range events {
|
||||
if err = user.AppendEvent(e); err != nil {
|
||||
return err
|
||||
@ -198,6 +181,7 @@ func (u *User) ProcessUser(event *es_models.Event) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user = &view_model.UserView{}
|
||||
for _, e := range events {
|
||||
if err = user.AppendEvent(e); err != nil {
|
||||
return err
|
||||
|
@ -77,30 +77,12 @@ func (i *ExternalIDP) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (i *ExternalIDP) EventQuery() (*es_models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestExternalIDPSequences()
|
||||
func (i *ExternalIDP) EventQuery(instanceIDs ...string) (*es_models.SearchQuery, error) {
|
||||
sequences, err := i.view.GetLatestExternalIDPSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := es_models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(i.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, i.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (i *ExternalIDP) Reduce(event *es_models.Event) (err error) {
|
||||
|
@ -72,30 +72,12 @@ func (u *UserSession) CurrentSequence(instanceID string) (uint64, error) {
|
||||
return sequence.CurrentSequence, nil
|
||||
}
|
||||
|
||||
func (u *UserSession) EventQuery() (*models.SearchQuery, error) {
|
||||
sequences, err := u.view.GetLatestUserSessionSequences()
|
||||
func (u *UserSession) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
sequences, err := u.view.GetLatestUserSessionSequences(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := models.NewSearchQuery()
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences {
|
||||
for _, instance := range instances {
|
||||
if sequence.InstanceID == instance {
|
||||
break
|
||||
}
|
||||
}
|
||||
instances = append(instances, sequence.InstanceID)
|
||||
query.AddQuery().
|
||||
AggregateTypeFilter(u.AggregateTypes()...).
|
||||
LatestSequenceFilter(sequence.CurrentSequence).
|
||||
InstanceIDFilter(sequence.InstanceID)
|
||||
}
|
||||
return query.AddQuery().
|
||||
AggregateTypeFilter(u.AggregateTypes()...).
|
||||
LatestSequenceFilter(0).
|
||||
ExcludedInstanceIDsFilter(instances...).
|
||||
SearchQuery(), nil
|
||||
return newSearchQuery(sequences, u.AggregateTypes(), instanceIDs), nil
|
||||
}
|
||||
|
||||
func (u *UserSession) Reduce(event *models.Event) (err error) {
|
||||
|
@ -16,15 +16,17 @@ type SpoolerConfig struct {
|
||||
BulkLimit uint64
|
||||
FailureCountUntilSkip uint64
|
||||
ConcurrentWorkers int
|
||||
ConcurrentInstances int
|
||||
Handlers handler.Configs
|
||||
}
|
||||
|
||||
func StartSpooler(c SpoolerConfig, es v1.Eventstore, view *view.View, client *sql.DB, systemDefaults sd.SystemDefaults, queries *query.Queries) *spooler.Spooler {
|
||||
spoolerConfig := spooler.Config{
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: client},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, systemDefaults, queries),
|
||||
Eventstore: es,
|
||||
Locker: &locker{dbClient: client},
|
||||
ConcurrentWorkers: c.ConcurrentWorkers,
|
||||
ConcurrentInstances: c.ConcurrentInstances,
|
||||
ViewHandlers: handler.Register(c.Handlers, c.BulkLimit, c.FailureCountUntilSkip, view, es, systemDefaults, queries),
|
||||
}
|
||||
spool := spoolerConfig.New()
|
||||
spool.Start()
|
||||
|
@ -60,8 +60,8 @@ func (v *View) GetLatestExternalIDPSequence(instanceID string) (*global_view.Cur
|
||||
return v.latestSequence(externalIDPTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestExternalIDPSequences() ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(externalIDPTable)
|
||||
func (v *View) GetLatestExternalIDPSequences(instanceIDs ...string) ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(externalIDPTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedExternalIDPSequence(event *models.Event) error {
|
||||
|
@ -45,8 +45,8 @@ func (v *View) GetLatestIDPConfigSequence(instanceID string) (*global_view.Curre
|
||||
return v.latestSequence(idpConfigTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestIDPConfigSequences() ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(idpConfigTable)
|
||||
func (v *View) GetLatestIDPConfigSequences(instanceIDs ...string) ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(idpConfigTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedIDPConfigSequence(event *models.Event) error {
|
||||
|
@ -65,8 +65,8 @@ func (v *View) GetLatestIDPProviderSequence(instanceID string) (*global_view.Cur
|
||||
return v.latestSequence(idpProviderTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestIDPProviderSequences() ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(idpProviderTable)
|
||||
func (v *View) GetLatestIDPProviderSequences(instanceIDs ...string) ([]*global_view.CurrentSequence, error) {
|
||||
return v.latestSequences(idpProviderTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedIDPProviderSequence(event *models.Event) error {
|
||||
|
@ -44,8 +44,8 @@ func (v *View) GetLatestOrgProjectMappingSequence(instanceID string) (*repositor
|
||||
return v.latestSequence(orgPrgojectMappingTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestOrgProjectMappingSequences() ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(orgPrgojectMappingTable)
|
||||
func (v *View) GetLatestOrgProjectMappingSequences(instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(orgPrgojectMappingTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedOrgProjectMappingSequence(event *models.Event) error {
|
||||
|
@ -69,8 +69,8 @@ func (v *View) GetLatestRefreshTokenSequence(instanceID string) (*repository.Cur
|
||||
return v.latestSequence(refreshTokenTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestRefreshTokenSequences() ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(refreshTokenTable)
|
||||
func (v *View) GetLatestRefreshTokenSequences(instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(refreshTokenTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedRefreshTokenSequence(event *models.Event) error {
|
||||
|
@ -19,8 +19,8 @@ func (v *View) latestSequence(viewName, instanceID string) (*repository.CurrentS
|
||||
return repository.LatestSequence(v.Db, sequencesTable, viewName, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) latestSequences(viewName string) ([]*repository.CurrentSequence, error) {
|
||||
return repository.LatestSequences(v.Db, sequencesTable, viewName)
|
||||
func (v *View) latestSequences(viewName string, instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return repository.LatestSequences(v.Db, sequencesTable, viewName, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) updateSpoolerRunSequence(viewName string) error {
|
||||
|
@ -80,8 +80,8 @@ func (v *View) GetLatestTokenSequence(instanceID string) (*repository.CurrentSeq
|
||||
return v.latestSequence(tokenTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestTokenSequences() ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(tokenTable)
|
||||
func (v *View) GetLatestTokenSequences(instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(tokenTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedTokenSequence(event *models.Event) error {
|
||||
|
@ -143,8 +143,8 @@ func (v *View) GetLatestUserSequence(instanceID string) (*repository.CurrentSequ
|
||||
return v.latestSequence(userTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestUserSequences() ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(userTable)
|
||||
func (v *View) GetLatestUserSequences(instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(userTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedUserSequence(event *models.Event) error {
|
||||
|
@ -60,8 +60,8 @@ func (v *View) GetLatestUserSessionSequence(instanceID string) (*repository.Curr
|
||||
return v.latestSequence(userSessionTable, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestUserSessionSequences() ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(userSessionTable)
|
||||
func (v *View) GetLatestUserSessionSequences(instanceIDs ...string) ([]*repository.CurrentSequence, error) {
|
||||
return v.latestSequences(userSessionTable, instanceIDs...)
|
||||
}
|
||||
|
||||
func (v *View) ProcessedUserSessionSequence(event *models.Event) error {
|
||||
|
@ -186,6 +186,15 @@ func (es *Eventstore) LatestSequence(ctx context.Context, queryFactory *SearchQu
|
||||
return es.repo.LatestSequence(ctx, query)
|
||||
}
|
||||
|
||||
//InstanceIDs returns the instance ids found by the search query
|
||||
func (es *Eventstore) InstanceIDs(ctx context.Context, queryFactory *SearchQueryBuilder) ([]string, error) {
|
||||
query, err := queryFactory.build(authz.GetInstance(ctx).InstanceID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return es.repo.InstanceIDs(ctx, query)
|
||||
}
|
||||
|
||||
type QueryReducer interface {
|
||||
reducer
|
||||
//Query returns the SearchQueryFactory for the events needed in reducer
|
||||
|
@ -688,10 +688,11 @@ func TestEventstore_aggregatesToEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
type testRepo struct {
|
||||
events []*repository.Event
|
||||
sequence uint64
|
||||
err error
|
||||
t *testing.T
|
||||
events []*repository.Event
|
||||
sequence uint64
|
||||
instances []string
|
||||
err error
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (repo *testRepo) Health(ctx context.Context) error {
|
||||
@ -735,6 +736,13 @@ func (repo *testRepo) LatestSequence(ctx context.Context, queryFactory *reposito
|
||||
return repo.sequence, nil
|
||||
}
|
||||
|
||||
func (repo *testRepo) InstanceIDs(ctx context.Context, queryFactory *repository.SearchQuery) ([]string, error) {
|
||||
if repo.err != nil {
|
||||
return nil, repo.err
|
||||
}
|
||||
return repo.instances, nil
|
||||
}
|
||||
|
||||
func TestEventstore_Push(t *testing.T) {
|
||||
type args struct {
|
||||
events []Command
|
||||
|
@ -6,12 +6,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
const (
|
||||
currentSequenceStmtFormat = `SELECT current_sequence, aggregate_type, instance_id FROM %s WHERE projection_name = $1 FOR UPDATE`
|
||||
currentSequenceStmtFormat = `SELECT current_sequence, aggregate_type, instance_id FROM %s WHERE projection_name = $1 AND instance_id = ANY ($2) FOR UPDATE`
|
||||
updateCurrentSequencesStmtFormat = `UPSERT INTO %s (projection_name, aggregate_type, current_sequence, instance_id, timestamp) VALUES `
|
||||
)
|
||||
|
||||
@ -22,8 +24,8 @@ type instanceSequence struct {
|
||||
sequence uint64
|
||||
}
|
||||
|
||||
func (h *StatementHandler) currentSequences(ctx context.Context, query func(context.Context, string, ...interface{}) (*sql.Rows, error)) (currentSequences, error) {
|
||||
rows, err := query(ctx, h.currentSequenceStmt, h.ProjectionName)
|
||||
func (h *StatementHandler) currentSequences(ctx context.Context, query func(context.Context, string, ...interface{}) (*sql.Rows, error), instanceIDs []string) (currentSequences, error) {
|
||||
rows, err := query(ctx, h.currentSequenceStmt, h.ProjectionName, pq.StringArray(instanceIDs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
@ -123,34 +124,40 @@ func expectSavePointRelease() func(sqlmock.Sqlmock) {
|
||||
}
|
||||
}
|
||||
|
||||
func expectCurrentSequence(tableName, projection string, seq uint64, aggregateType, instanceID string) func(sqlmock.Sqlmock) {
|
||||
func expectCurrentSequence(tableName, projection string, seq uint64, aggregateType string, instanceIDs []string) func(sqlmock.Sqlmock) {
|
||||
rows := sqlmock.NewRows([]string{"current_sequence", "aggregate_type", "instance_id"})
|
||||
for _, instanceID := range instanceIDs {
|
||||
rows.AddRow(seq, aggregateType, instanceID)
|
||||
}
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM ` + tableName + ` WHERE projection_name = \$1 FOR UPDATE`).
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM `+tableName+` WHERE projection_name = \$1 AND instance_id = ANY \(\$2\) FOR UPDATE`).
|
||||
WithArgs(
|
||||
projection,
|
||||
pq.StringArray(instanceIDs),
|
||||
).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"current_sequence", "aggregate_type", "instance_id"}).
|
||||
AddRow(seq, aggregateType, instanceID),
|
||||
rows,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func expectCurrentSequenceErr(tableName, projection string, err error) func(sqlmock.Sqlmock) {
|
||||
func expectCurrentSequenceErr(tableName, projection string, instanceIDs []string, err error) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM ` + tableName + ` WHERE projection_name = \$1 FOR UPDATE`).
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM `+tableName+` WHERE projection_name = \$1 AND instance_id = ANY \(\$2\) FOR UPDATE`).
|
||||
WithArgs(
|
||||
projection,
|
||||
pq.StringArray(instanceIDs),
|
||||
).
|
||||
WillReturnError(err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectCurrentSequenceNoRows(tableName, projection string) func(sqlmock.Sqlmock) {
|
||||
func expectCurrentSequenceNoRows(tableName, projection string, instanceIDs []string) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM ` + tableName + ` WHERE projection_name = \$1 FOR UPDATE`).
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM `+tableName+` WHERE projection_name = \$1 AND instance_id = ANY \(\$2\) FOR UPDATE`).
|
||||
WithArgs(
|
||||
projection,
|
||||
pq.StringArray(instanceIDs),
|
||||
).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"current_sequence", "aggregate_type", "instance_id"}),
|
||||
@ -158,11 +165,12 @@ func expectCurrentSequenceNoRows(tableName, projection string) func(sqlmock.Sqlm
|
||||
}
|
||||
}
|
||||
|
||||
func expectCurrentSequenceScanErr(tableName, projection string) func(sqlmock.Sqlmock) {
|
||||
func expectCurrentSequenceScanErr(tableName, projection string, instanceIDs []string) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM ` + tableName + ` WHERE projection_name = \$1 FOR UPDATE`).
|
||||
m.ExpectQuery(`SELECT current_sequence, aggregate_type, instance_id FROM `+tableName+` WHERE projection_name = \$1 AND instance_id = ANY \(\$2\) FOR UPDATE`).
|
||||
WithArgs(
|
||||
projection,
|
||||
pq.StringArray(instanceIDs),
|
||||
).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"current_sequence", "aggregate_type", "instance_id"}).
|
||||
@ -286,12 +294,34 @@ func expectLock(lockTable, workerName string, d time.Duration, instanceID string
|
||||
` \(locker_id, locked_until, projection_name, instance_id\) VALUES \(\$1, now\(\)\+\$2::INTERVAL, \$3\, \$4\)`+
|
||||
` ON CONFLICT \(projection_name, instance_id\)`+
|
||||
` DO UPDATE SET locker_id = \$1, locked_until = now\(\)\+\$2::INTERVAL`+
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = \$4 AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = ANY \(\$5\) AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
WithArgs(
|
||||
workerName,
|
||||
float64(d),
|
||||
projectionName,
|
||||
instanceID,
|
||||
pq.StringArray{instanceID},
|
||||
).
|
||||
WillReturnResult(
|
||||
sqlmock.NewResult(1, 1),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func expectLockMultipleInstances(lockTable, workerName string, d time.Duration, instanceID1, instanceID2 string) func(sqlmock.Sqlmock) {
|
||||
return func(m sqlmock.Sqlmock) {
|
||||
m.ExpectExec(`INSERT INTO `+lockTable+
|
||||
` \(locker_id, locked_until, projection_name, instance_id\) VALUES \(\$1, now\(\)\+\$2::INTERVAL, \$3\, \$4\), \(\$1, now\(\)\+\$2::INTERVAL, \$3\, \$5\)`+
|
||||
` ON CONFLICT \(projection_name, instance_id\)`+
|
||||
` DO UPDATE SET locker_id = \$1, locked_until = now\(\)\+\$2::INTERVAL`+
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = ANY \(\$6\) AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
WithArgs(
|
||||
workerName,
|
||||
float64(d),
|
||||
projectionName,
|
||||
instanceID1,
|
||||
instanceID2,
|
||||
pq.StringArray{instanceID1, instanceID2},
|
||||
).
|
||||
WillReturnResult(
|
||||
sqlmock.NewResult(1, 1),
|
||||
@ -305,12 +335,13 @@ func expectLockNoRows(lockTable, workerName string, d time.Duration, instanceID
|
||||
` \(locker_id, locked_until, projection_name, instance_id\) VALUES \(\$1, now\(\)\+\$2::INTERVAL, \$3\, \$4\)`+
|
||||
` ON CONFLICT \(projection_name, instance_id\)`+
|
||||
` DO UPDATE SET locker_id = \$1, locked_until = now\(\)\+\$2::INTERVAL`+
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = \$4 AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = ANY \(\$5\) AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
WithArgs(
|
||||
workerName,
|
||||
float64(d),
|
||||
projectionName,
|
||||
instanceID,
|
||||
pq.StringArray{instanceID},
|
||||
).
|
||||
WillReturnResult(driver.ResultNoRows)
|
||||
}
|
||||
@ -322,12 +353,13 @@ func expectLockErr(lockTable, workerName string, d time.Duration, instanceID str
|
||||
` \(locker_id, locked_until, projection_name, instance_id\) VALUES \(\$1, now\(\)\+\$2::INTERVAL, \$3\, \$4\)`+
|
||||
` ON CONFLICT \(projection_name, instance_id\)`+
|
||||
` DO UPDATE SET locker_id = \$1, locked_until = now\(\)\+\$2::INTERVAL`+
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = \$4 AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
` WHERE `+lockTable+`\.projection_name = \$3 AND `+lockTable+`\.instance_id = ANY \(\$5\) AND \(`+lockTable+`\.locker_id = \$1 OR `+lockTable+`\.locked_until < now\(\)\)`).
|
||||
WithArgs(
|
||||
workerName,
|
||||
float64(d),
|
||||
projectionName,
|
||||
instanceID,
|
||||
pq.StringArray{instanceID},
|
||||
).
|
||||
WillReturnError(err)
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -75,84 +74,62 @@ func NewStatementHandler(
|
||||
bulkLimit: config.BulkLimit,
|
||||
Locker: NewLocker(config.Client, config.LockTable, config.ProjectionHandlerConfig.ProjectionName),
|
||||
}
|
||||
h.ProjectionHandler = handler.NewProjectionHandler(config.ProjectionHandlerConfig, h.reduce, h.Update, h.SearchQuery)
|
||||
h.ProjectionHandler = handler.NewProjectionHandler(ctx, config.ProjectionHandlerConfig, h.reduce, h.Update, h.SearchQuery, h.Lock, h.Unlock)
|
||||
|
||||
err := h.Init(ctx, config.InitCheck)
|
||||
logging.OnError(err).Fatal("unable to initialize projections")
|
||||
|
||||
go h.Process(
|
||||
ctx,
|
||||
h.reduce,
|
||||
h.Update,
|
||||
h.Lock,
|
||||
h.Unlock,
|
||||
h.SearchQuery,
|
||||
)
|
||||
|
||||
h.Subscribe(h.aggregates...)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *StatementHandler) TriggerBulk(ctx context.Context) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
var err error
|
||||
defer span.EndWithError(err)
|
||||
|
||||
err = h.ProjectionHandler.TriggerBulk(ctx, h.Lock, h.Unlock)
|
||||
logging.OnError(err).WithField("projection", h.ProjectionName).Warn("unable to trigger bulk")
|
||||
}
|
||||
|
||||
func (h *StatementHandler) SearchQuery(ctx context.Context) (*eventstore.SearchQueryBuilder, uint64, error) {
|
||||
sequences, err := h.currentSequences(ctx, h.client.QueryContext)
|
||||
func (h *StatementHandler) SearchQuery(ctx context.Context, instanceIDs []string) (*eventstore.SearchQueryBuilder, uint64, error) {
|
||||
sequences, err := h.currentSequences(ctx, h.client.QueryContext, instanceIDs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
queryBuilder := eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).Limit(h.bulkLimit)
|
||||
|
||||
for _, aggregateType := range h.aggregates {
|
||||
instances := make([]string, 0)
|
||||
for _, sequence := range sequences[aggregateType] {
|
||||
instances = appendToIgnoredInstances(instances, sequence.instanceID)
|
||||
for _, instanceID := range instanceIDs {
|
||||
var seq uint64
|
||||
for _, sequence := range sequences[aggregateType] {
|
||||
if sequence.instanceID == instanceID {
|
||||
seq = sequence.sequence
|
||||
break
|
||||
}
|
||||
}
|
||||
queryBuilder.
|
||||
AddQuery().
|
||||
AggregateTypes(aggregateType).
|
||||
SequenceGreater(sequence.sequence).
|
||||
InstanceID(sequence.instanceID)
|
||||
SequenceGreater(seq).
|
||||
InstanceID(instanceID)
|
||||
}
|
||||
queryBuilder.
|
||||
AddQuery().
|
||||
AggregateTypes(aggregateType).
|
||||
SequenceGreater(0).
|
||||
ExcludedInstanceID(instances...)
|
||||
}
|
||||
|
||||
return queryBuilder, h.bulkLimit, nil
|
||||
}
|
||||
|
||||
func appendToIgnoredInstances(instances []string, id string) []string {
|
||||
for _, instance := range instances {
|
||||
if instance == id {
|
||||
return instances
|
||||
}
|
||||
}
|
||||
return append(instances, id)
|
||||
}
|
||||
|
||||
//Update implements handler.Update
|
||||
func (h *StatementHandler) Update(ctx context.Context, stmts []*handler.Statement, reduce handler.Reduce) (unexecutedStmts []*handler.Statement, err error) {
|
||||
func (h *StatementHandler) Update(ctx context.Context, stmts []*handler.Statement, reduce handler.Reduce) (index int, err error) {
|
||||
if len(stmts) == 0 {
|
||||
return nil, nil
|
||||
return -1, nil
|
||||
}
|
||||
instanceIDs := make([]string, 0, len(stmts))
|
||||
for _, stmt := range stmts {
|
||||
instanceIDs = appendToInstanceIDs(instanceIDs, stmt.InstanceID)
|
||||
}
|
||||
tx, err := h.client.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return stmts, errors.ThrowInternal(err, "CRDB-e89Gq", "begin failed")
|
||||
return -1, errors.ThrowInternal(err, "CRDB-e89Gq", "begin failed")
|
||||
}
|
||||
|
||||
sequences, err := h.currentSequences(ctx, tx.QueryContext)
|
||||
sequences, err := h.currentSequences(ctx, tx.QueryContext, instanceIDs)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return stmts, err
|
||||
return -1, err
|
||||
}
|
||||
|
||||
//checks for events between create statement and current sequence
|
||||
@ -162,7 +139,7 @@ func (h *StatementHandler) Update(ctx context.Context, stmts []*handler.Statemen
|
||||
previousStmts, err := h.fetchPreviousStmts(ctx, tx, stmts[0].Sequence, stmts[0].InstanceID, sequences, reduce)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return stmts, err
|
||||
return -1, err
|
||||
}
|
||||
stmts = append(previousStmts, stmts...)
|
||||
}
|
||||
@ -173,27 +150,19 @@ func (h *StatementHandler) Update(ctx context.Context, stmts []*handler.Statemen
|
||||
err = h.updateCurrentSequences(tx, sequences)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return stmts, err
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return stmts, err
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if lastSuccessfulIdx == -1 && len(stmts) > 0 {
|
||||
return stmts, handler.ErrSomeStmtsFailed
|
||||
if lastSuccessfulIdx < len(stmts)-1 {
|
||||
return lastSuccessfulIdx, handler.ErrSomeStmtsFailed
|
||||
}
|
||||
|
||||
unexecutedStmts = make([]*handler.Statement, len(stmts)-(lastSuccessfulIdx+1))
|
||||
copy(unexecutedStmts, stmts[lastSuccessfulIdx+1:])
|
||||
stmts = nil
|
||||
|
||||
if len(unexecutedStmts) > 0 {
|
||||
return unexecutedStmts, handler.ErrSomeStmtsFailed
|
||||
}
|
||||
|
||||
return unexecutedStmts, nil
|
||||
return lastSuccessfulIdx, nil
|
||||
}
|
||||
|
||||
func (h *StatementHandler) fetchPreviousStmts(ctx context.Context, tx *sql.Tx, stmtSeq uint64, instanceID string, sequences currentSequences, reduce handler.Reduce) (previousStmts []*handler.Statement, err error) {
|
||||
@ -316,3 +285,12 @@ func updateSequences(sequences currentSequences, stmt *handler.Statement) {
|
||||
sequence: stmt.Sequence,
|
||||
})
|
||||
}
|
||||
|
||||
func appendToInstanceIDs(instances []string, id string) []string {
|
||||
for _, instance := range instances {
|
||||
if instance == id {
|
||||
return instances
|
||||
}
|
||||
}
|
||||
return append(instances, id)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -61,9 +62,13 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
||||
aggregates []eventstore.AggregateType
|
||||
bulkLimit uint64
|
||||
}
|
||||
type args struct {
|
||||
instanceIDs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
@ -74,13 +79,16 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
||||
bulkLimit: 5,
|
||||
},
|
||||
args: args{
|
||||
instanceIDs: []string{"instanceID1"},
|
||||
},
|
||||
want: want{
|
||||
limit: 0,
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, sql.ErrTxDone)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequenceErr("my_sequences", "my_projection", sql.ErrTxDone),
|
||||
expectCurrentSequenceErr("my_sequences", "my_projection", []string{"instanceID1"}, sql.ErrTxDone),
|
||||
},
|
||||
SearchQueryBuilder: nil,
|
||||
},
|
||||
@ -93,24 +101,56 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
||||
bulkLimit: 5,
|
||||
},
|
||||
args: args{
|
||||
instanceIDs: []string{"instanceID1"},
|
||||
},
|
||||
want: want{
|
||||
limit: 5,
|
||||
isErr: func(err error) bool {
|
||||
return err == nil
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID1"}),
|
||||
},
|
||||
SearchQueryBuilder: eventstore.
|
||||
NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||
AddQuery().
|
||||
AggregateTypes("testAgg").
|
||||
SequenceGreater(5).
|
||||
InstanceID("instanceID").
|
||||
InstanceID("instanceID1").
|
||||
Builder().
|
||||
Limit(5),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple instances",
|
||||
fields: fields{
|
||||
sequenceTable: "my_sequences",
|
||||
projectionName: "my_projection",
|
||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
||||
bulkLimit: 5,
|
||||
},
|
||||
args: args{
|
||||
instanceIDs: []string{"instanceID1", "instanceID2"},
|
||||
},
|
||||
want: want{
|
||||
limit: 5,
|
||||
isErr: func(err error) bool {
|
||||
return err == nil
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID1", "instanceID2"}),
|
||||
},
|
||||
SearchQueryBuilder: eventstore.
|
||||
NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||
AddQuery().
|
||||
AggregateTypes("testAgg").
|
||||
SequenceGreater(5).
|
||||
InstanceID("instanceID1").
|
||||
Or().
|
||||
AggregateTypes("testAgg").
|
||||
SequenceGreater(0).
|
||||
ExcludedInstanceID("instanceID").
|
||||
SequenceGreater(5).
|
||||
InstanceID("instanceID2").
|
||||
Builder().
|
||||
Limit(5),
|
||||
},
|
||||
@ -140,7 +180,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
||||
expectation(mock)
|
||||
}
|
||||
|
||||
query, limit, err := h.SearchQuery(context.Background())
|
||||
query, limit, err := h.SearchQuery(context.Background(), tt.args.instanceIDs)
|
||||
if !tt.want.isErr(err) {
|
||||
t.Errorf("ProjectionHandler.prepareBulkStmts() error = %v", err)
|
||||
return
|
||||
@ -211,13 +251,14 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 6,
|
||||
previousSequence: 0,
|
||||
instanceID: "instanceID",
|
||||
}),
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequenceErr("my_sequences", "my_projection", sql.ErrTxDone),
|
||||
expectCurrentSequenceErr("my_sequences", "my_projection", []string{"instanceID"}, sql.ErrTxDone),
|
||||
expectRollback(),
|
||||
},
|
||||
isErr: func(err error) bool {
|
||||
@ -241,13 +282,14 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
aggregateType: "agg",
|
||||
sequence: 6,
|
||||
previousSequence: 0,
|
||||
instanceID: "instanceID",
|
||||
}),
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID"}),
|
||||
expectRollback(),
|
||||
},
|
||||
isErr: func(err error) bool {
|
||||
@ -272,6 +314,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
aggregateType: "testAgg",
|
||||
sequence: 7,
|
||||
previousSequence: 6,
|
||||
instanceID: "instanceID",
|
||||
},
|
||||
[]handler.Column{
|
||||
{
|
||||
@ -284,7 +327,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID"}),
|
||||
expectCommit(),
|
||||
},
|
||||
isErr: func(err error) bool {
|
||||
@ -322,7 +365,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "agg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "agg", []string{"instanceID"}),
|
||||
expectSavePoint(),
|
||||
expectCreate("my_projection", []string{"col"}, []string{"$1"}),
|
||||
expectSavePointRelease(),
|
||||
@ -364,7 +407,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "agg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "agg", []string{"instanceID"}),
|
||||
expectSavePoint(),
|
||||
expectCreate("my_projection", []string{"col"}, []string{"$1"}),
|
||||
expectSavePointRelease(),
|
||||
@ -399,7 +442,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID"}),
|
||||
expectUpdateCurrentSequence("my_sequences", "my_projection", 7, "testAgg", "instanceID"),
|
||||
expectCommit(),
|
||||
},
|
||||
@ -431,7 +474,7 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID"}),
|
||||
expectUpdateCurrentSequence("my_sequences", "my_projection", 7, "testAgg", "instanceID"),
|
||||
expectCommit(),
|
||||
},
|
||||
@ -470,13 +513,14 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectBegin(),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", "instanceID"),
|
||||
expectCurrentSequence("my_sequences", "my_projection", 5, "testAgg", []string{"instanceID"}),
|
||||
expectUpdateCurrentSequence("my_sequences", "my_projection", 7, "testAgg", "instanceID"),
|
||||
expectCommit(),
|
||||
},
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, nil)
|
||||
},
|
||||
stmtsLen: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -488,17 +532,18 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
h := NewStatementHandler(context.Background(), StatementHandlerConfig{
|
||||
ProjectionHandlerConfig: handler.ProjectionHandlerConfig{
|
||||
ProjectionName: "my_projection",
|
||||
HandlerConfig: handler.HandlerConfig{
|
||||
h := &StatementHandler{
|
||||
ProjectionHandler: &handler.ProjectionHandler{
|
||||
Handler: handler.Handler{
|
||||
Eventstore: tt.fields.eventstore,
|
||||
},
|
||||
RequeueEvery: 0,
|
||||
ProjectionName: "my_projection",
|
||||
},
|
||||
SequenceTable: "my_sequences",
|
||||
Client: client,
|
||||
})
|
||||
sequenceTable: "my_sequences",
|
||||
currentSequenceStmt: fmt.Sprintf(currentSequenceStmtFormat, "my_sequences"),
|
||||
updateSequencesBaseStmt: fmt.Sprintf(updateCurrentSequencesStmtFormat, "my_sequences"),
|
||||
client: client,
|
||||
}
|
||||
|
||||
h.aggregates = tt.fields.aggregates
|
||||
|
||||
@ -506,12 +551,12 @@ func TestStatementHandler_Update(t *testing.T) {
|
||||
expectation(mock)
|
||||
}
|
||||
|
||||
stmts, err := h.Update(tt.args.ctx, tt.args.stmts, tt.args.reduce)
|
||||
index, err := h.Update(tt.args.ctx, tt.args.stmts, tt.args.reduce)
|
||||
if !tt.want.isErr(err) {
|
||||
t.Errorf("StatementHandler.Update() error = %v", err)
|
||||
}
|
||||
if err == nil && tt.want.stmtsLen != len(stmts) {
|
||||
t.Errorf("wrong stmts length: want: %d got %d", tt.want.stmtsLen, len(stmts))
|
||||
if err == nil && tt.want.stmtsLen != index {
|
||||
t.Errorf("wrong stmts length: want: %d got %d", tt.want.stmtsLen, index)
|
||||
}
|
||||
|
||||
mock.MatchExpectationsInOrder(true)
|
||||
@ -696,17 +741,12 @@ func TestProjectionHandler_fetchPreviousStmts(t *testing.T) {
|
||||
h := &StatementHandler{
|
||||
aggregates: tt.fields.aggregates,
|
||||
}
|
||||
h.ProjectionHandler = handler.NewProjectionHandler(handler.ProjectionHandlerConfig{
|
||||
HandlerConfig: handler.HandlerConfig{
|
||||
h.ProjectionHandler = &handler.ProjectionHandler{
|
||||
Handler: handler.Handler{
|
||||
Eventstore: tt.fields.eventstore,
|
||||
},
|
||||
ProjectionName: "my_projection",
|
||||
RequeueEvery: 0,
|
||||
},
|
||||
h.reduce,
|
||||
h.Update,
|
||||
h.SearchQuery,
|
||||
)
|
||||
}
|
||||
stmts, err := h.fetchPreviousStmts(tt.args.ctx, nil, tt.args.stmtSeq, "", tt.args.sequences, tt.args.reduce)
|
||||
if !tt.want.isErr(err) {
|
||||
t.Errorf("ProjectionHandler.prepareBulkStmts() error = %v", err)
|
||||
@ -1311,7 +1351,8 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
aggregates []eventstore.AggregateType
|
||||
}
|
||||
type args struct {
|
||||
stmt handler.Statement
|
||||
stmt handler.Statement
|
||||
instanceIDs []string
|
||||
}
|
||||
type want struct {
|
||||
expectations []mockExpectation
|
||||
@ -1338,7 +1379,7 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
return errors.Is(err, sql.ErrConnDone)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequenceErr("my_table", "my_projection", sql.ErrConnDone),
|
||||
expectCurrentSequenceErr("my_table", "my_projection", nil, sql.ErrConnDone),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1350,14 +1391,15 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
aggregates: []eventstore.AggregateType{"agg"},
|
||||
},
|
||||
args: args{
|
||||
stmt: handler.Statement{},
|
||||
stmt: handler.Statement{},
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
want: want{
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, nil)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequenceNoRows("my_table", "my_projection"),
|
||||
expectCurrentSequenceNoRows("my_table", "my_projection", []string{"instanceID"}),
|
||||
},
|
||||
sequences: currentSequences{},
|
||||
},
|
||||
@ -1370,14 +1412,15 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
aggregates: []eventstore.AggregateType{"agg"},
|
||||
},
|
||||
args: args{
|
||||
stmt: handler.Statement{},
|
||||
stmt: handler.Statement{},
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
want: want{
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, sql.ErrTxDone)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequenceScanErr("my_table", "my_projection"),
|
||||
expectCurrentSequenceScanErr("my_table", "my_projection", []string{"instanceID"}),
|
||||
},
|
||||
sequences: currentSequences{},
|
||||
},
|
||||
@ -1390,14 +1433,15 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
aggregates: []eventstore.AggregateType{"agg"},
|
||||
},
|
||||
args: args{
|
||||
stmt: handler.Statement{},
|
||||
stmt: handler.Statement{},
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
want: want{
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, nil)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequence("my_table", "my_projection", 5, "agg", "instanceID"),
|
||||
expectCurrentSequence("my_table", "my_projection", 5, "agg", []string{"instanceID"}),
|
||||
},
|
||||
sequences: currentSequences{
|
||||
"agg": []*instanceSequence{
|
||||
@ -1409,15 +1453,48 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple found",
|
||||
fields: fields{
|
||||
sequenceTable: "my_table",
|
||||
projectionName: "my_projection",
|
||||
aggregates: []eventstore.AggregateType{"agg"},
|
||||
},
|
||||
args: args{
|
||||
stmt: handler.Statement{},
|
||||
instanceIDs: []string{"instanceID1", "instanceID2"},
|
||||
},
|
||||
want: want{
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, nil)
|
||||
},
|
||||
expectations: []mockExpectation{
|
||||
expectCurrentSequence("my_table", "my_projection", 5, "agg", []string{"instanceID1", "instanceID2"}),
|
||||
},
|
||||
sequences: currentSequences{
|
||||
"agg": []*instanceSequence{
|
||||
{
|
||||
sequence: 5,
|
||||
instanceID: "instanceID1",
|
||||
},
|
||||
{
|
||||
sequence: 5,
|
||||
instanceID: "instanceID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h := NewStatementHandler(context.Background(), StatementHandlerConfig{
|
||||
ProjectionHandlerConfig: handler.ProjectionHandlerConfig{
|
||||
h := &StatementHandler{
|
||||
ProjectionHandler: &handler.ProjectionHandler{
|
||||
ProjectionName: tt.fields.projectionName,
|
||||
},
|
||||
SequenceTable: tt.fields.sequenceTable,
|
||||
})
|
||||
sequenceTable: tt.fields.sequenceTable,
|
||||
currentSequenceStmt: fmt.Sprintf(currentSequenceStmtFormat, tt.fields.sequenceTable),
|
||||
}
|
||||
|
||||
h.aggregates = tt.fields.aggregates
|
||||
|
||||
@ -1440,7 +1517,7 @@ func TestStatementHandler_currentSequence(t *testing.T) {
|
||||
t.Fatalf("unexpected err in begin: %v", err)
|
||||
}
|
||||
|
||||
seq, err := h.currentSequences(context.Background(), tx.QueryContext)
|
||||
seq, err := h.currentSequences(context.Background(), tx.QueryContext, tt.args.instanceIDs)
|
||||
if !tt.want.isErr(err) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -1615,12 +1692,13 @@ func TestStatementHandler_updateCurrentSequence(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
h := NewStatementHandler(context.Background(), StatementHandlerConfig{
|
||||
ProjectionHandlerConfig: handler.ProjectionHandlerConfig{
|
||||
h := &StatementHandler{
|
||||
ProjectionHandler: &handler.ProjectionHandler{
|
||||
ProjectionName: tt.fields.projectionName,
|
||||
},
|
||||
SequenceTable: tt.fields.sequenceTable,
|
||||
})
|
||||
sequenceTable: tt.fields.sequenceTable,
|
||||
updateSequencesBaseStmt: fmt.Sprintf(updateCurrentSequencesStmtFormat, tt.fields.sequenceTable),
|
||||
}
|
||||
|
||||
h.aggregates = tt.fields.aggregates
|
||||
|
||||
|
@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
@ -14,20 +17,20 @@ import (
|
||||
|
||||
const (
|
||||
lockStmtFormat = "INSERT INTO %[1]s" +
|
||||
" (locker_id, locked_until, projection_name, instance_id) VALUES ($1, now()+$2::INTERVAL, $3, $4)" +
|
||||
" (locker_id, locked_until, projection_name, instance_id) VALUES %[2]s" +
|
||||
" ON CONFLICT (projection_name, instance_id)" +
|
||||
" DO UPDATE SET locker_id = $1, locked_until = now()+$2::INTERVAL" +
|
||||
" WHERE %[1]s.projection_name = $3 AND %[1]s.instance_id = $4 AND (%[1]s.locker_id = $1 OR %[1]s.locked_until < now())"
|
||||
" WHERE %[1]s.projection_name = $3 AND %[1]s.instance_id = ANY ($%[3]d) AND (%[1]s.locker_id = $1 OR %[1]s.locked_until < now())"
|
||||
)
|
||||
|
||||
type Locker interface {
|
||||
Lock(ctx context.Context, lockDuration time.Duration, instanceID string) <-chan error
|
||||
Unlock(instanceID string) error
|
||||
Lock(ctx context.Context, lockDuration time.Duration, instanceIDs ...string) <-chan error
|
||||
Unlock(instanceIDs ...string) error
|
||||
}
|
||||
|
||||
type locker struct {
|
||||
client *sql.DB
|
||||
lockStmt string
|
||||
lockStmt func(values string, instances int) string
|
||||
workerName string
|
||||
projectionName string
|
||||
}
|
||||
@ -36,25 +39,27 @@ func NewLocker(client *sql.DB, lockTable, projectionName string) Locker {
|
||||
workerName, err := id.SonyFlakeGenerator().Next()
|
||||
logging.OnError(err).Panic("unable to generate lockID")
|
||||
return &locker{
|
||||
client: client,
|
||||
lockStmt: fmt.Sprintf(lockStmtFormat, lockTable),
|
||||
client: client,
|
||||
lockStmt: func(values string, instances int) string {
|
||||
return fmt.Sprintf(lockStmtFormat, lockTable, values, instances)
|
||||
},
|
||||
workerName: workerName,
|
||||
projectionName: projectionName,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *locker) Lock(ctx context.Context, lockDuration time.Duration, instanceID string) <-chan error {
|
||||
func (h *locker) Lock(ctx context.Context, lockDuration time.Duration, instanceIDs ...string) <-chan error {
|
||||
errs := make(chan error)
|
||||
go h.handleLock(ctx, errs, lockDuration, instanceID)
|
||||
go h.handleLock(ctx, errs, lockDuration, instanceIDs...)
|
||||
return errs
|
||||
}
|
||||
|
||||
func (h *locker) handleLock(ctx context.Context, errs chan error, lockDuration time.Duration, instanceID string) {
|
||||
func (h *locker) handleLock(ctx context.Context, errs chan error, lockDuration time.Duration, instanceIDs ...string) {
|
||||
renewLock := time.NewTimer(0)
|
||||
for {
|
||||
select {
|
||||
case <-renewLock.C:
|
||||
errs <- h.renewLock(ctx, lockDuration, instanceID)
|
||||
errs <- h.renewLock(ctx, lockDuration, instanceIDs...)
|
||||
//refresh the lock 500ms before it times out. 500ms should be enough for one transaction
|
||||
renewLock.Reset(lockDuration - (500 * time.Millisecond))
|
||||
case <-ctx.Done():
|
||||
@ -65,24 +70,38 @@ func (h *locker) handleLock(ctx context.Context, errs chan error, lockDuration t
|
||||
}
|
||||
}
|
||||
|
||||
func (h *locker) renewLock(ctx context.Context, lockDuration time.Duration, instanceID string) error {
|
||||
//the unit of crdb interval is seconds (https://www.cockroachlabs.com/docs/stable/interval.html).
|
||||
res, err := h.client.ExecContext(ctx, h.lockStmt, h.workerName, lockDuration.Seconds(), h.projectionName, instanceID)
|
||||
func (h *locker) renewLock(ctx context.Context, lockDuration time.Duration, instanceIDs ...string) error {
|
||||
lockStmt, values := h.lockStatement(lockDuration, instanceIDs)
|
||||
res, err := h.client.ExecContext(ctx, lockStmt, values...)
|
||||
if err != nil {
|
||||
return errors.ThrowInternal(err, "CRDB-uaDoR", "unable to execute lock")
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
return errors.ThrowAlreadyExists(nil, "CRDB-mmi4J", "projection already locked")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *locker) Unlock(instanceID string) error {
|
||||
_, err := h.client.Exec(h.lockStmt, h.workerName, float64(0), h.projectionName, instanceID)
|
||||
func (h *locker) Unlock(instanceIDs ...string) error {
|
||||
lockStmt, values := h.lockStatement(0, instanceIDs)
|
||||
_, err := h.client.Exec(lockStmt, values...)
|
||||
if err != nil {
|
||||
return errors.ThrowUnknown(err, "CRDB-JjfwO", "unlock failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *locker) lockStatement(lockDuration time.Duration, instanceIDs []string) (string, []interface{}) {
|
||||
valueQueries := make([]string, len(instanceIDs))
|
||||
values := make([]interface{}, len(instanceIDs)+4)
|
||||
values[0] = h.workerName
|
||||
//the unit of crdb interval is seconds (https://www.cockroachlabs.com/docs/stable/interval.html).
|
||||
values[1] = lockDuration.Seconds()
|
||||
values[2] = h.projectionName
|
||||
for i, instanceID := range instanceIDs {
|
||||
valueQueries[i] = "($1, now()+$2::INTERVAL, $3, $" + strconv.Itoa(i+4) + ")"
|
||||
values[i+3] = instanceID
|
||||
}
|
||||
values[len(values)-1] = pq.StringArray(instanceIDs)
|
||||
return h.lockStmt(strings.Join(valueQueries, ", "), len(values)), values
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ func TestStatementHandler_handleLock(t *testing.T) {
|
||||
lockDuration time.Duration
|
||||
ctx context.Context
|
||||
errMock *errsMock
|
||||
instanceID string
|
||||
instanceIDs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -56,7 +56,7 @@ func TestStatementHandler_handleLock(t *testing.T) {
|
||||
successfulIters: 2,
|
||||
shouldErr: true,
|
||||
},
|
||||
instanceID: "instanceID",
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -74,7 +74,25 @@ func TestStatementHandler_handleLock(t *testing.T) {
|
||||
errs: make(chan error),
|
||||
successfulIters: 2,
|
||||
},
|
||||
instanceID: "instanceID",
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success with multiple",
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectLockMultipleInstances(lockTable, workerName, 2, "instanceID1", "instanceID2"),
|
||||
expectLockMultipleInstances(lockTable, workerName, 2, "instanceID1", "instanceID2"),
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
lockDuration: 2 * time.Second,
|
||||
ctx: context.Background(),
|
||||
errMock: &errsMock{
|
||||
errs: make(chan error),
|
||||
successfulIters: 2,
|
||||
},
|
||||
instanceIDs: []string{"instanceID1", "instanceID2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -88,7 +106,9 @@ func TestStatementHandler_handleLock(t *testing.T) {
|
||||
projectionName: projectionName,
|
||||
client: client,
|
||||
workerName: workerName,
|
||||
lockStmt: fmt.Sprintf(lockStmtFormat, lockTable),
|
||||
lockStmt: func(values string, instances int) string {
|
||||
return fmt.Sprintf(lockStmtFormat, lockTable, values, instances)
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectation := range tt.want.expectations {
|
||||
@ -99,7 +119,7 @@ func TestStatementHandler_handleLock(t *testing.T) {
|
||||
|
||||
go tt.args.errMock.handleErrs(t, cancel)
|
||||
|
||||
go h.handleLock(ctx, tt.args.errMock.errs, tt.args.lockDuration, tt.args.instanceID)
|
||||
go h.handleLock(ctx, tt.args.errMock.errs, tt.args.lockDuration, tt.args.instanceIDs...)
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
@ -118,7 +138,7 @@ func TestStatementHandler_renewLock(t *testing.T) {
|
||||
}
|
||||
type args struct {
|
||||
lockDuration time.Duration
|
||||
instanceID string
|
||||
instanceIDs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -137,7 +157,7 @@ func TestStatementHandler_renewLock(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
lockDuration: 1 * time.Second,
|
||||
instanceID: "instanceID",
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -152,7 +172,7 @@ func TestStatementHandler_renewLock(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
lockDuration: 2 * time.Second,
|
||||
instanceID: "instanceID",
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -167,7 +187,22 @@ func TestStatementHandler_renewLock(t *testing.T) {
|
||||
},
|
||||
args: args{
|
||||
lockDuration: 3 * time.Second,
|
||||
instanceID: "instanceID",
|
||||
instanceIDs: []string{"instanceID"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success with multiple",
|
||||
want: want{
|
||||
expectations: []mockExpectation{
|
||||
expectLockMultipleInstances(lockTable, workerName, 3, "instanceID1", "instanceID2"),
|
||||
},
|
||||
isErr: func(err error) bool {
|
||||
return errors.Is(err, nil)
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
lockDuration: 3 * time.Second,
|
||||
instanceIDs: []string{"instanceID1", "instanceID2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -181,14 +216,16 @@ func TestStatementHandler_renewLock(t *testing.T) {
|
||||
projectionName: projectionName,
|
||||
client: client,
|
||||
workerName: workerName,
|
||||
lockStmt: fmt.Sprintf(lockStmtFormat, lockTable),
|
||||
lockStmt: func(values string, instances int) string {
|
||||
return fmt.Sprintf(lockStmtFormat, lockTable, values, instances)
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectation := range tt.want.expectations {
|
||||
expectation(mock)
|
||||
}
|
||||
|
||||
err = h.renewLock(context.Background(), tt.args.lockDuration, tt.args.instanceID)
|
||||
err = h.renewLock(context.Background(), tt.args.lockDuration, tt.args.instanceIDs...)
|
||||
if !tt.want.isErr(err) {
|
||||
t.Errorf("unexpected error = %v", err)
|
||||
}
|
||||
@ -253,7 +290,9 @@ func TestStatementHandler_Unlock(t *testing.T) {
|
||||
projectionName: projectionName,
|
||||
client: client,
|
||||
workerName: workerName,
|
||||
lockStmt: fmt.Sprintf(lockStmtFormat, lockTable),
|
||||
lockStmt: func(values string, instances int) string {
|
||||
return fmt.Sprintf(lockStmtFormat, lockTable, values, instances)
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectation := range tt.want.expectations {
|
||||
|
@ -27,3 +27,10 @@ func (h *Handler) Subscribe(aggregates ...eventstore.AggregateType) {
|
||||
func (h *Handler) SubscribeEvents(types map[eventstore.AggregateType][]eventstore.EventType) {
|
||||
h.Sub = eventstore.SubscribeEventTypes(h.EventQueue, types)
|
||||
}
|
||||
|
||||
func (h *Handler) Unsubscribe() {
|
||||
if h.Sub == nil {
|
||||
return
|
||||
}
|
||||
h.Sub.Unsubscribe()
|
||||
}
|
||||
|
@ -2,13 +2,13 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
@ -16,241 +16,207 @@ const systemID = "system"
|
||||
|
||||
type ProjectionHandlerConfig struct {
|
||||
HandlerConfig
|
||||
ProjectionName string
|
||||
RequeueEvery time.Duration
|
||||
RetryFailedAfter time.Duration
|
||||
ProjectionName string
|
||||
RequeueEvery time.Duration
|
||||
RetryFailedAfter time.Duration
|
||||
Retries uint
|
||||
ConcurrentInstances uint
|
||||
}
|
||||
|
||||
//Update updates the projection with the given statements
|
||||
type Update func(context.Context, []*Statement, Reduce) (unexecutedStmts []*Statement, err error)
|
||||
type Update func(context.Context, []*Statement, Reduce) (index int, err error)
|
||||
|
||||
//Reduce reduces the given event to a statement
|
||||
//which is used to update the projection
|
||||
type Reduce func(eventstore.Event) (*Statement, error)
|
||||
|
||||
//SearchQuery generates the search query to lookup for events
|
||||
type SearchQuery func(ctx context.Context, instanceIDs []string) (query *eventstore.SearchQueryBuilder, queryLimit uint64, err error)
|
||||
|
||||
//Lock is used for mutex handling if needed on the projection
|
||||
type Lock func(context.Context, time.Duration, string) <-chan error
|
||||
type Lock func(context.Context, time.Duration, ...string) <-chan error
|
||||
|
||||
//Unlock releases the mutex of the projection
|
||||
type Unlock func(string) error
|
||||
|
||||
//SearchQuery generates the search query to lookup for events
|
||||
type SearchQuery func(ctx context.Context) (query *eventstore.SearchQueryBuilder, queryLimit uint64, err error)
|
||||
type Unlock func(...string) error
|
||||
|
||||
type ProjectionHandler struct {
|
||||
Handler
|
||||
|
||||
requeueAfter time.Duration
|
||||
shouldBulk *time.Timer
|
||||
bulkMu sync.Mutex
|
||||
bulkLocked bool
|
||||
execBulk executeBulk
|
||||
|
||||
retryFailedAfter time.Duration
|
||||
shouldPush *time.Timer
|
||||
pushSet bool
|
||||
|
||||
ProjectionName string
|
||||
|
||||
lockMu sync.Mutex
|
||||
stmts []*Statement
|
||||
ProjectionName string
|
||||
reduce Reduce
|
||||
update Update
|
||||
searchQuery SearchQuery
|
||||
triggerProjection *time.Timer
|
||||
lock Lock
|
||||
unlock Unlock
|
||||
requeueAfter time.Duration
|
||||
retryFailedAfter time.Duration
|
||||
retries int
|
||||
concurrentInstances int
|
||||
}
|
||||
|
||||
func NewProjectionHandler(
|
||||
ctx context.Context,
|
||||
config ProjectionHandlerConfig,
|
||||
reduce Reduce,
|
||||
update Update,
|
||||
query SearchQuery,
|
||||
lock Lock,
|
||||
unlock Unlock,
|
||||
) *ProjectionHandler {
|
||||
concurrentInstances := int(config.ConcurrentInstances)
|
||||
if concurrentInstances < 1 {
|
||||
concurrentInstances = 1
|
||||
}
|
||||
h := &ProjectionHandler{
|
||||
Handler: NewHandler(config.HandlerConfig),
|
||||
ProjectionName: config.ProjectionName,
|
||||
requeueAfter: config.RequeueEvery,
|
||||
// first bulk is instant on startup
|
||||
shouldBulk: time.NewTimer(0),
|
||||
shouldPush: time.NewTimer(0),
|
||||
retryFailedAfter: config.RetryFailedAfter,
|
||||
Handler: NewHandler(config.HandlerConfig),
|
||||
ProjectionName: config.ProjectionName,
|
||||
reduce: reduce,
|
||||
update: update,
|
||||
searchQuery: query,
|
||||
lock: lock,
|
||||
unlock: unlock,
|
||||
requeueAfter: config.RequeueEvery,
|
||||
triggerProjection: time.NewTimer(0), // first trigger is instant on startup
|
||||
retryFailedAfter: config.RetryFailedAfter,
|
||||
retries: int(config.Retries),
|
||||
concurrentInstances: concurrentInstances,
|
||||
}
|
||||
|
||||
h.execBulk = h.prepareExecuteBulk(query, reduce, update)
|
||||
go h.subscribe(ctx)
|
||||
|
||||
//unitialized timer
|
||||
//https://github.com/golang/go/issues/12721
|
||||
<-h.shouldPush.C
|
||||
go h.schedule(ctx)
|
||||
|
||||
if config.RequeueEvery <= 0 {
|
||||
if !h.shouldBulk.Stop() {
|
||||
<-h.shouldBulk.C
|
||||
}
|
||||
logging.WithFields("projection", h.ProjectionName).Info("starting handler without requeue")
|
||||
return h
|
||||
} else if config.RequeueEvery < 500*time.Millisecond {
|
||||
logging.WithFields("projection", h.ProjectionName).Fatal("requeue every must be greater 500ms or <= 0")
|
||||
}
|
||||
logging.WithFields("projection", h.ProjectionName).Info("starting handler")
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) ResetShouldBulk() {
|
||||
if h.requeueAfter > 0 {
|
||||
h.shouldBulk.Reset(h.requeueAfter)
|
||||
//Trigger handles all events for the provided instances (or current instance from context if non specified)
|
||||
//by calling FetchEvents and Process until the amount of events is smaller than the BulkLimit
|
||||
func (h *ProjectionHandler) Trigger(ctx context.Context, instances ...string) error {
|
||||
ids := []string{authz.GetInstance(ctx).InstanceID()}
|
||||
if len(instances) > 0 {
|
||||
ids = instances
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) triggerShouldPush(after time.Duration) {
|
||||
if !h.pushSet {
|
||||
h.pushSet = true
|
||||
h.shouldPush.Reset(after)
|
||||
}
|
||||
}
|
||||
|
||||
//Process waits for several conditions:
|
||||
// if context is canceled the function gracefully shuts down
|
||||
// if an event occures it reduces the event
|
||||
// if the internal timer expires the handler will check
|
||||
// for unprocessed events on eventstore
|
||||
func (h *ProjectionHandler) Process(
|
||||
ctx context.Context,
|
||||
reduce Reduce,
|
||||
update Update,
|
||||
lock Lock,
|
||||
unlock Unlock,
|
||||
query SearchQuery,
|
||||
) {
|
||||
//handle panic
|
||||
defer func() {
|
||||
cause := recover()
|
||||
logging.WithFields("projection", h.ProjectionName, "cause", cause, "stack", string(debug.Stack())).Error("projection handler paniced")
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if h.pushSet {
|
||||
h.push(context.Background(), update, reduce)
|
||||
events, hasLimitExceeded, err := h.FetchEvents(ctx, ids...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(events) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, err = h.Process(ctx, events...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !hasLimitExceeded {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Process handles multiple events by reducing them to statements and updating the projection
|
||||
func (h *ProjectionHandler) Process(ctx context.Context, events ...eventstore.Event) (index int, err error) {
|
||||
if len(events) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
index = -1
|
||||
statements := make([]*Statement, len(events))
|
||||
for i, event := range events {
|
||||
statements[i], err = h.reduce(event)
|
||||
if err != nil {
|
||||
return index, err
|
||||
}
|
||||
}
|
||||
for retry := 0; retry <= h.retries; retry++ {
|
||||
index, err = h.update(ctx, statements[index+1:], h.reduce)
|
||||
if err != nil && !errors.Is(err, ErrSomeStmtsFailed) {
|
||||
return index, err
|
||||
}
|
||||
if err == nil {
|
||||
return index, nil
|
||||
}
|
||||
time.Sleep(h.retryFailedAfter)
|
||||
}
|
||||
return index, err
|
||||
}
|
||||
|
||||
//FetchEvents checks the current sequences and filters for newer events
|
||||
func (h *ProjectionHandler) FetchEvents(ctx context.Context, instances ...string) ([]eventstore.Event, bool, error) {
|
||||
eventQuery, eventsLimit, err := h.searchQuery(ctx, instances)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
events, err := h.Eventstore.Filter(ctx, eventQuery)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return events, int(eventsLimit) == len(events), err
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) subscribe(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
h.Handler.Unsubscribe()
|
||||
logging.WithFields("projection", h.ProjectionName).Errorf("subscription panicked: %v", err)
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
for firstEvent := range h.EventQueue {
|
||||
events := checkAdditionalEvents(h.EventQueue, firstEvent)
|
||||
|
||||
index, err := h.Process(ctx, events...)
|
||||
if err != nil || index < len(events)-1 {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Error("unable to process all events from subscription")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) schedule(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName, "cause", err, "stack", string(debug.Stack())).Error("schedule panicked")
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
for range h.triggerProjection.C {
|
||||
ids, err := h.Eventstore.InstanceIDs(ctx, eventstore.NewSearchQueryBuilder(eventstore.ColumnsInstanceIDs).AddQuery().ExcludedInstanceID("").Builder())
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Error("instance ids")
|
||||
h.triggerProjection.Reset(h.requeueAfter)
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(ids); i = i + h.concurrentInstances {
|
||||
max := i + h.concurrentInstances
|
||||
if max > len(ids) {
|
||||
max = len(ids)
|
||||
}
|
||||
h.shutdown()
|
||||
return
|
||||
case event := <-h.EventQueue:
|
||||
if err := h.processEvent(ctx, event, reduce); err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Warn("process failed")
|
||||
instances := ids[i:max]
|
||||
lockCtx, cancelLock := context.WithCancel(ctx)
|
||||
errs := h.lock(lockCtx, h.requeueAfter, instances...)
|
||||
//wait until projection is locked
|
||||
if err, ok := <-errs; err != nil || !ok {
|
||||
cancelLock()
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(err).Warn("initial lock failed")
|
||||
continue
|
||||
}
|
||||
h.triggerShouldPush(0)
|
||||
case <-h.shouldBulk.C:
|
||||
h.bulkMu.Lock()
|
||||
h.bulkLocked = true
|
||||
h.bulk(ctx, lock, unlock)
|
||||
h.ResetShouldBulk()
|
||||
h.bulkLocked = false
|
||||
h.bulkMu.Unlock()
|
||||
default:
|
||||
//lower prio select with push
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if h.pushSet {
|
||||
h.push(context.Background(), update, reduce)
|
||||
}
|
||||
h.shutdown()
|
||||
return
|
||||
case event := <-h.EventQueue:
|
||||
if err := h.processEvent(ctx, event, reduce); err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Warn("process failed")
|
||||
continue
|
||||
}
|
||||
h.triggerShouldPush(0)
|
||||
case <-h.shouldBulk.C:
|
||||
h.bulkMu.Lock()
|
||||
h.bulkLocked = true
|
||||
h.bulk(ctx, lock, unlock)
|
||||
h.ResetShouldBulk()
|
||||
h.bulkLocked = false
|
||||
h.bulkMu.Unlock()
|
||||
case <-h.shouldPush.C:
|
||||
h.push(ctx, update, reduce)
|
||||
h.ResetShouldBulk()
|
||||
go h.cancelOnErr(lockCtx, errs, cancelLock)
|
||||
err = h.Trigger(lockCtx, instances...)
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName, "instanceIDs", instances).WithError(err).Error("trigger failed")
|
||||
}
|
||||
|
||||
cancelLock()
|
||||
unlockErr := h.unlock(instances...)
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(unlockErr).Warn("unable to unlock")
|
||||
}
|
||||
h.triggerProjection.Reset(h.requeueAfter)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) processEvent(
|
||||
ctx context.Context,
|
||||
event eventstore.Event,
|
||||
reduce Reduce,
|
||||
) error {
|
||||
stmt, err := reduce(event)
|
||||
if err != nil {
|
||||
logging.New().WithError(err).Warn("unable to process event")
|
||||
return err
|
||||
}
|
||||
|
||||
h.lockMu.Lock()
|
||||
defer h.lockMu.Unlock()
|
||||
|
||||
h.stmts = append(h.stmts, stmt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) TriggerBulk(
|
||||
ctx context.Context,
|
||||
lock Lock,
|
||||
unlock Unlock,
|
||||
) error {
|
||||
if !h.shouldBulk.Stop() {
|
||||
//make sure to flush shouldBulk chan
|
||||
select {
|
||||
case <-h.shouldBulk.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
defer h.ResetShouldBulk()
|
||||
|
||||
h.bulkMu.Lock()
|
||||
if h.bulkLocked {
|
||||
logging.WithFields("projection", h.ProjectionName).Debugf("waiting for existing bulk to finish")
|
||||
h.bulkMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
h.bulkLocked = true
|
||||
defer func() {
|
||||
h.bulkLocked = false
|
||||
h.bulkMu.Unlock()
|
||||
}()
|
||||
|
||||
return h.bulk(ctx, lock, unlock)
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) bulk(
|
||||
ctx context.Context,
|
||||
lock Lock,
|
||||
unlock Unlock,
|
||||
) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
errs := lock(ctx, h.requeueAfter, systemID)
|
||||
//wait until projection is locked
|
||||
if err, ok := <-errs; err != nil || !ok {
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(err).Warn("initial lock failed")
|
||||
return err
|
||||
}
|
||||
go h.cancelOnErr(ctx, errs, cancel)
|
||||
|
||||
execErr := h.execBulk(ctx)
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(execErr).Warn("unable to execute")
|
||||
|
||||
unlockErr := unlock(systemID)
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(unlockErr).Warn("unable to unlock")
|
||||
|
||||
if execErr != nil {
|
||||
return execErr
|
||||
}
|
||||
|
||||
return unlockErr
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) cancelOnErr(ctx context.Context, errs <-chan error, cancel func()) {
|
||||
for {
|
||||
select {
|
||||
@ -268,98 +234,15 @@ func (h *ProjectionHandler) cancelOnErr(ctx context.Context, errs <-chan error,
|
||||
}
|
||||
}
|
||||
|
||||
type executeBulk func(ctx context.Context) error
|
||||
|
||||
func (h *ProjectionHandler) prepareExecuteBulk(
|
||||
query SearchQuery,
|
||||
reduce Reduce,
|
||||
update Update,
|
||||
) executeBulk {
|
||||
return func(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
hasLimitExeeded, err := h.fetchBulkStmts(ctx, query, reduce)
|
||||
if err != nil || len(h.stmts) == 0 {
|
||||
logging.WithFields("projection", h.ProjectionName).OnError(err).Warn("unable to fetch stmts")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = h.push(ctx, update, reduce); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hasLimitExeeded {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func checkAdditionalEvents(eventQueue chan eventstore.Event, event eventstore.Event) []eventstore.Event {
|
||||
events := make([]eventstore.Event, 1)
|
||||
events[0] = event
|
||||
for {
|
||||
select {
|
||||
case event := <-eventQueue:
|
||||
events = append(events, event)
|
||||
default:
|
||||
return events
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) fetchBulkStmts(
|
||||
ctx context.Context,
|
||||
query SearchQuery,
|
||||
reduce Reduce,
|
||||
) (limitExeeded bool, err error) {
|
||||
eventQuery, eventsLimit, err := query(ctx)
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Warn("unable to create event query")
|
||||
return false, err
|
||||
}
|
||||
|
||||
events, err := h.Eventstore.Filter(ctx, eventQuery)
|
||||
if err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName).WithError(err).Info("Unable to bulk fetch events")
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
if err = h.processEvent(ctx, event, reduce); err != nil {
|
||||
logging.WithFields("projection", h.ProjectionName, "sequence", event.Sequence(), "instanceID", event.Aggregate().InstanceID).WithError(err).Warn("unable to process event in bulk")
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return len(events) == int(eventsLimit), nil
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) push(
|
||||
ctx context.Context,
|
||||
update Update,
|
||||
reduce Reduce,
|
||||
) (err error) {
|
||||
h.lockMu.Lock()
|
||||
defer h.lockMu.Unlock()
|
||||
|
||||
sort.Slice(h.stmts, func(i, j int) bool {
|
||||
return h.stmts[i].Sequence < h.stmts[j].Sequence
|
||||
})
|
||||
|
||||
h.stmts, err = update(ctx, h.stmts, reduce)
|
||||
h.pushSet = len(h.stmts) > 0
|
||||
|
||||
if h.pushSet {
|
||||
h.triggerShouldPush(h.retryFailedAfter)
|
||||
return nil
|
||||
}
|
||||
|
||||
h.shouldPush.Stop()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *ProjectionHandler) shutdown() {
|
||||
h.lockMu.Lock()
|
||||
defer h.lockMu.Unlock()
|
||||
h.Sub.Unsubscribe()
|
||||
if !h.shouldBulk.Stop() {
|
||||
<-h.shouldBulk.C
|
||||
}
|
||||
if !h.shouldPush.Stop() {
|
||||
<-h.shouldPush.C
|
||||
}
|
||||
logging.New().Info("stop processing")
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,8 +8,8 @@ import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
repository "github.com/zitadel/zitadel/internal/eventstore/repository"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
repository "github.com/zitadel/zitadel/internal/eventstore/repository"
|
||||
)
|
||||
|
||||
// MockRepository is a mock of Repository interface.
|
||||
@ -78,6 +78,21 @@ func (mr *MockRepositoryMockRecorder) Health(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockRepository)(nil).Health), arg0)
|
||||
}
|
||||
|
||||
// InstanceIDs mocks base method.
|
||||
func (m *MockRepository) InstanceIDs(arg0 context.Context, arg1 *repository.SearchQuery) ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InstanceIDs", arg0, arg1)
|
||||
ret0, _ := ret[0].([]string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// InstanceIDs indicates an expected call of InstanceIDs.
|
||||
func (mr *MockRepositoryMockRecorder) InstanceIDs(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceIDs", reflect.TypeOf((*MockRepository)(nil).InstanceIDs), arg0, arg1)
|
||||
}
|
||||
|
||||
// LatestSequence mocks base method.
|
||||
func (m *MockRepository) LatestSequence(arg0 context.Context, arg1 *repository.SearchQuery) (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -29,6 +29,16 @@ func (m *MockRepository) ExpectFilterEventsError(err error) *MockRepository {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockRepository) ExpectInstanceIDs(instanceIDs ...string) *MockRepository {
|
||||
m.EXPECT().InstanceIDs(gomock.Any(), gomock.Any()).Return(instanceIDs, nil)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockRepository) ExpectInstanceIDsError(err error) *MockRepository {
|
||||
m.EXPECT().InstanceIDs(gomock.Any(), gomock.Any()).Return(nil, err)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockRepository) ExpectPush(expectedEvents []*repository.Event, expectedUniqueConstraints ...*repository.UniqueConstraint) *MockRepository {
|
||||
m.EXPECT().Push(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(ctx context.Context, events []*repository.Event, uniqueConstraints ...*repository.UniqueConstraint) error {
|
||||
|
@ -8,14 +8,16 @@ import (
|
||||
type Repository interface {
|
||||
//Health checks if the connection to the storage is available
|
||||
Health(ctx context.Context) error
|
||||
// PushEvents adds all events of the given aggregates to the eventstreams of the aggregates.
|
||||
// Push adds all events of the given aggregates to the event streams of the aggregates.
|
||||
// if unique constraints are pushed, they will be added to the unique table for checking unique constraint violations
|
||||
// This call is transaction save. The transaction will be rolled back if one event fails
|
||||
Push(ctx context.Context, events []*Event, uniqueConstraints ...*UniqueConstraint) error
|
||||
// Filter returns all events matching the given search query
|
||||
Filter(ctx context.Context, searchQuery *SearchQuery) (events []*Event, err error)
|
||||
//LatestSequence returns the latests sequence found by the the search query
|
||||
//LatestSequence returns the latest sequence found by the search query
|
||||
LatestSequence(ctx context.Context, queryFactory *SearchQuery) (uint64, error)
|
||||
//InstanceIDs returns the instance ids found by the search query
|
||||
InstanceIDs(ctx context.Context, queryFactory *SearchQuery) ([]string, error)
|
||||
//CreateInstance creates a new sequence for the given instance
|
||||
CreateInstance(ctx context.Context, instanceID string) error
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ const (
|
||||
ColumnsEvent = iota + 1
|
||||
//ColumnsMaxSequence represents the latest sequence of the filtered events
|
||||
ColumnsMaxSequence
|
||||
// ColumnsInstanceIDs represents the instance ids of the filtered events
|
||||
ColumnsInstanceIDs
|
||||
|
||||
columnsCount
|
||||
)
|
||||
|
@ -218,7 +218,7 @@ func (db *CRDB) Filter(ctx context.Context, searchQuery *repository.SearchQuery)
|
||||
return events, nil
|
||||
}
|
||||
|
||||
//LatestSequence returns the latests sequence found by the the search query
|
||||
//LatestSequence returns the latest sequence found by the search query
|
||||
func (db *CRDB) LatestSequence(ctx context.Context, searchQuery *repository.SearchQuery) (uint64, error) {
|
||||
var seq Sequence
|
||||
err := query(ctx, db, searchQuery, &seq)
|
||||
@ -228,6 +228,16 @@ func (db *CRDB) LatestSequence(ctx context.Context, searchQuery *repository.Sear
|
||||
return uint64(seq), nil
|
||||
}
|
||||
|
||||
//InstanceIDs returns the instance ids found by the search query
|
||||
func (db *CRDB) InstanceIDs(ctx context.Context, searchQuery *repository.SearchQuery) ([]string, error) {
|
||||
var ids []string
|
||||
err := query(ctx, db, searchQuery, &ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (db *CRDB) db() *sql.DB {
|
||||
return db.client
|
||||
}
|
||||
@ -262,6 +272,10 @@ func (db *CRDB) maxSequenceQuery() string {
|
||||
return "SELECT MAX(event_sequence) FROM eventstore.events"
|
||||
}
|
||||
|
||||
func (db *CRDB) instanceIDsQuery() string {
|
||||
return "SELECT DISTINCT instance_id FROM eventstore.events"
|
||||
}
|
||||
|
||||
func (db *CRDB) columnName(col repository.Field) string {
|
||||
switch col {
|
||||
case repository.FieldAggregateID:
|
||||
|
@ -22,6 +22,7 @@ type querier interface {
|
||||
placeholder(query string) string
|
||||
eventQuery() string
|
||||
maxSequenceQuery() string
|
||||
instanceIDsQuery() string
|
||||
db() *sql.DB
|
||||
orderByEventSequence(desc bool) string
|
||||
}
|
||||
@ -36,7 +37,7 @@ func query(ctx context.Context, criteria querier, searchQuery *repository.Search
|
||||
}
|
||||
query += where
|
||||
|
||||
if searchQuery.Columns != repository.ColumnsMaxSequence {
|
||||
if searchQuery.Columns == repository.ColumnsEvent {
|
||||
query += criteria.orderByEventSequence(searchQuery.Desc)
|
||||
}
|
||||
|
||||
@ -76,6 +77,8 @@ func prepareColumns(criteria querier, columns repository.Columns) (string, func(
|
||||
switch columns {
|
||||
case repository.ColumnsMaxSequence:
|
||||
return criteria.maxSequenceQuery(), maxSequenceScanner
|
||||
case repository.ColumnsInstanceIDs:
|
||||
return criteria.instanceIDsQuery(), instanceIDsScanner
|
||||
case repository.ColumnsEvent:
|
||||
return criteria.eventQuery(), eventsScanner
|
||||
default:
|
||||
@ -95,6 +98,22 @@ func maxSequenceScanner(row scan, dest interface{}) (err error) {
|
||||
return z_errors.ThrowInternal(err, "SQL-bN5xg", "something went wrong")
|
||||
}
|
||||
|
||||
func instanceIDsScanner(scanner scan, dest interface{}) (err error) {
|
||||
ids, ok := dest.(*[]string)
|
||||
if !ok {
|
||||
return z_errors.ThrowInvalidArgument(nil, "SQL-Begh2", "type must be an array of string")
|
||||
}
|
||||
var id string
|
||||
err = scanner(&id)
|
||||
if err != nil {
|
||||
logging.WithError(err).Warn("unable to scan row")
|
||||
return z_errors.ThrowInternal(err, "SQL-DEFGe", "unable to scan row")
|
||||
}
|
||||
*ids = append(*ids, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func eventsScanner(scanner scan, dest interface{}) (err error) {
|
||||
events, ok := dest.(*[]*repository.Event)
|
||||
if !ok {
|
||||
@ -157,7 +176,7 @@ func prepareCondition(criteria querier, filters [][]*repository.Filter) (clause
|
||||
var err error
|
||||
value, err = json.Marshal(value)
|
||||
if err != nil {
|
||||
logging.New().WithError(err).Warn("unable to marshal search value")
|
||||
logging.WithError(err).Warn("unable to marshal search value")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -39,6 +39,8 @@ const (
|
||||
ColumnsEvent Columns = repository.ColumnsEvent
|
||||
// ColumnsMaxSequence represents the latest sequence of the filtered events
|
||||
ColumnsMaxSequence Columns = repository.ColumnsMaxSequence
|
||||
// ColumnsInstanceIDs represents the instance ids of the filtered events
|
||||
ColumnsInstanceIDs Columns = repository.ColumnsInstanceIDs
|
||||
)
|
||||
|
||||
// AggregateType is the object name
|
||||
@ -278,6 +280,9 @@ func (query *SearchQuery) eventTypeFilter() *repository.Filter {
|
||||
}
|
||||
|
||||
func (query *SearchQuery) aggregateTypeFilter() *repository.Filter {
|
||||
if len(query.aggregateTypes) < 1 {
|
||||
return nil
|
||||
}
|
||||
if len(query.aggregateTypes) == 1 {
|
||||
return repository.NewFilter(repository.FieldAggregateType, repository.AggregateType(query.aggregateTypes[0]), repository.OperationEquals)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ type Eventstore interface {
|
||||
Health(ctx context.Context) error
|
||||
FilterEvents(ctx context.Context, searchQuery *models.SearchQuery) (events []*models.Event, err error)
|
||||
Subscribe(aggregates ...models.AggregateType) *Subscription
|
||||
InstanceIDs(ctx context.Context, searchQuery *models.SearchQuery) ([]string, error)
|
||||
}
|
||||
|
||||
var _ Eventstore = (*eventstore)(nil)
|
||||
@ -37,3 +38,10 @@ func (es *eventstore) FilterEvents(ctx context.Context, searchQuery *models.Sear
|
||||
func (es *eventstore) Health(ctx context.Context) error {
|
||||
return es.repo.Health(ctx)
|
||||
}
|
||||
|
||||
func (es *eventstore) InstanceIDs(ctx context.Context, searchQuery *models.SearchQuery) ([]string, error) {
|
||||
if err := searchQuery.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return es.repo.InstanceIDs(ctx, models.FactoryFromSearchQuery(searchQuery))
|
||||
}
|
||||
|
@ -11,6 +11,8 @@ type Repository interface {
|
||||
|
||||
// Filter returns all events matching the given search query
|
||||
Filter(ctx context.Context, searchQuery *models.SearchQueryFactory) (events []*models.Event, err error)
|
||||
//LatestSequence returns the latests sequence found by the the search query
|
||||
//LatestSequence returns the latest sequence found by the search query
|
||||
LatestSequence(ctx context.Context, queryFactory *models.SearchQueryFactory) (uint64, error)
|
||||
//InstanceIDs returns the instance ids found by the search query
|
||||
InstanceIDs(ctx context.Context, queryFactory *models.SearchQueryFactory) ([]string, error)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
es_models "github.com/zitadel/zitadel/internal/eventstore/v1/models"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
@ -60,3 +61,31 @@ func (db *SQL) LatestSequence(ctx context.Context, queryFactory *es_models.Searc
|
||||
}
|
||||
return uint64(*sequence), nil
|
||||
}
|
||||
|
||||
func (db *SQL) InstanceIDs(ctx context.Context, queryFactory *es_models.SearchQueryFactory) ([]string, error) {
|
||||
query, _, values, rowScanner := buildQuery(queryFactory)
|
||||
if query == "" {
|
||||
return nil, errors.ThrowInvalidArgument(nil, "SQL-Sfwg2", "invalid query factory")
|
||||
}
|
||||
|
||||
rows, err := db.client.Query(query, values...)
|
||||
if err != nil {
|
||||
logging.New().WithError(err).Info("query failed")
|
||||
return nil, errors.ThrowInternal(err, "SQL-Sfg3r", "unable to filter instance ids")
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
ids := make([]string, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var id string
|
||||
err := rowScanner(rows.Scan, &id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func buildQuery(queryFactory *es_models.SearchQueryFactory) (query string, limit
|
||||
}
|
||||
query += where
|
||||
|
||||
if searchQuery.Columns != es_models.Columns_Max_Sequence {
|
||||
if searchQuery.Columns == es_models.Columns_Event {
|
||||
query += " ORDER BY event_sequence"
|
||||
if searchQuery.Desc {
|
||||
query += " DESC"
|
||||
@ -104,6 +104,19 @@ func prepareColumns(columns es_models.Columns) (string, func(s scan, dest interf
|
||||
}
|
||||
return z_errors.ThrowInternal(err, "SQL-bN5xg", "something went wrong")
|
||||
}
|
||||
case es_models.Columns_InstanceIDs:
|
||||
return "SELECT DISTINCT instance_id FROM eventstore.events", func(row scan, dest interface{}) (err error) {
|
||||
instanceID, ok := dest.(*string)
|
||||
if !ok {
|
||||
return z_errors.ThrowInvalidArgument(nil, "SQL-Fef5h", "type must be *string]")
|
||||
}
|
||||
err = row(instanceID)
|
||||
if err != nil {
|
||||
logging.New().WithError(err).Warn("unable to scan row")
|
||||
return z_errors.ThrowInternal(err, "SQL-SFef3", "unable to scan row")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case es_models.Columns_Event:
|
||||
return selectStmt, func(row scan, dest interface{}) (err error) {
|
||||
event, ok := dest.(*es_models.Event)
|
||||
|
@ -41,6 +41,7 @@ type Columns int32
|
||||
const (
|
||||
Columns_Event = iota
|
||||
Columns_Max_Sequence
|
||||
Columns_InstanceIDs
|
||||
//insert new columns-types before this columnsCount because count is needed for validation
|
||||
columnsCount
|
||||
)
|
||||
@ -48,7 +49,7 @@ const (
|
||||
//FactoryFromSearchQuery is deprecated because it's for migration purposes. use NewSearchQueryFactory
|
||||
func FactoryFromSearchQuery(q *SearchQuery) *SearchQueryFactory {
|
||||
factory := &SearchQueryFactory{
|
||||
columns: Columns_Event,
|
||||
columns: q.Columns,
|
||||
desc: q.Desc,
|
||||
limit: q.Limit,
|
||||
queries: make([]*query, len(q.Queries)),
|
||||
@ -232,6 +233,9 @@ func (q *query) eventTypeFilter() *Filter {
|
||||
}
|
||||
|
||||
func (q *query) aggregateTypeFilter() *Filter {
|
||||
if len(q.aggregateTypes) < 1 {
|
||||
return nil
|
||||
}
|
||||
if len(q.aggregateTypes) == 1 {
|
||||
return NewFilter(Field_AggregateType, q.aggregateTypes[0], Operation_Equals)
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
//SearchQuery is deprecated. Use SearchQueryFactory
|
||||
type SearchQuery struct {
|
||||
Columns Columns
|
||||
Limit uint64
|
||||
Desc bool
|
||||
Filters []*Filter
|
||||
@ -27,6 +28,11 @@ func NewSearchQuery() *SearchQuery {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *SearchQuery) SetColumn(columns Columns) *SearchQuery {
|
||||
q.Columns = columns
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *SearchQuery) AddQuery() *Query {
|
||||
query := &Query{
|
||||
searchQuery: q,
|
||||
|
@ -2,9 +2,9 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
|
||||
@ -17,7 +17,7 @@ const (
|
||||
|
||||
type Handler interface {
|
||||
ViewModel() string
|
||||
EventQuery() (*models.SearchQuery, error)
|
||||
EventQuery(instanceIDs ...string) (*models.SearchQuery, error)
|
||||
Reduce(*models.Event) error
|
||||
OnError(event *models.Event, err error) error
|
||||
OnSuccess() error
|
||||
@ -37,14 +37,13 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
err := recover()
|
||||
|
||||
if err != nil {
|
||||
sentry.CurrentHub().Recover(err)
|
||||
handler.Subscription().Unsubscribe()
|
||||
logging.WithFields("HANDL-SAFe1").Errorf("reduce panicked: %v", err)
|
||||
logging.WithFields("cause", err, "stack", string(debug.Stack())).Error("reduce panicked")
|
||||
}
|
||||
}()
|
||||
currentSequence, err := handler.CurrentSequence(event.InstanceID)
|
||||
if err != nil {
|
||||
logging.New().WithError(err).Warn("unable to get current sequence")
|
||||
logging.WithError(err).Warn("unable to get current sequence")
|
||||
return
|
||||
}
|
||||
|
||||
@ -58,14 +57,14 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
|
||||
unprocessedEvents, err := handler.Eventstore().FilterEvents(context.Background(), searchQuery)
|
||||
if err != nil {
|
||||
logging.WithFields("HANDL-L6YH1", "sequence", event.Sequence).Warn("filter failed")
|
||||
logging.WithFields("sequence", event.Sequence).Warn("filter failed")
|
||||
return
|
||||
}
|
||||
|
||||
for _, unprocessedEvent := range unprocessedEvents {
|
||||
currentSequence, err := handler.CurrentSequence(unprocessedEvent.InstanceID)
|
||||
if err != nil {
|
||||
logging.Log("HANDL-BmpkC").WithError(err).Warn("unable to get current sequence")
|
||||
logging.WithError(err).Warn("unable to get current sequence")
|
||||
return
|
||||
}
|
||||
if unprocessedEvent.Sequence < currentSequence {
|
||||
@ -78,12 +77,12 @@ func ReduceEvent(handler Handler, event *models.Event) {
|
||||
}
|
||||
|
||||
err = handler.Reduce(unprocessedEvent)
|
||||
logging.WithFields("HANDL-V42TI", "sequence", unprocessedEvent.Sequence).OnError(err).Warn("reduce failed")
|
||||
logging.WithFields("sequence", unprocessedEvent.Sequence).OnError(err).Warn("reduce failed")
|
||||
}
|
||||
if len(unprocessedEvents) == eventLimit {
|
||||
logging.WithFields("QUERY-BSqe9", "sequence", event.Sequence).Warn("didnt process event")
|
||||
logging.WithFields("sequence", event.Sequence).Warn("didnt process event")
|
||||
return
|
||||
}
|
||||
err = handler.Reduce(event)
|
||||
logging.WithFields("HANDL-wQDL2", "sequence", event.Sequence).OnError(err).Warn("reduce failed")
|
||||
logging.WithFields("sequence", event.Sequence).OnError(err).Warn("reduce failed")
|
||||
}
|
||||
|
@ -11,10 +11,11 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Eventstore v1.Eventstore
|
||||
Locker Locker
|
||||
ViewHandlers []query.Handler
|
||||
ConcurrentWorkers int
|
||||
Eventstore v1.Eventstore
|
||||
Locker Locker
|
||||
ViewHandlers []query.Handler
|
||||
ConcurrentWorkers int
|
||||
ConcurrentInstances int
|
||||
}
|
||||
|
||||
func (c *Config) New() *Spooler {
|
||||
@ -27,11 +28,12 @@ func (c *Config) New() *Spooler {
|
||||
})
|
||||
|
||||
return &Spooler{
|
||||
handlers: c.ViewHandlers,
|
||||
lockID: lockID,
|
||||
eventstore: c.Eventstore,
|
||||
locker: c.Locker,
|
||||
queue: make(chan *spooledHandler, len(c.ViewHandlers)),
|
||||
workers: c.ConcurrentWorkers,
|
||||
handlers: c.ViewHandlers,
|
||||
lockID: lockID,
|
||||
eventstore: c.Eventstore,
|
||||
locker: c.Locker,
|
||||
queue: make(chan *spooledHandler, len(c.ViewHandlers)),
|
||||
workers: c.ConcurrentWorkers,
|
||||
concurrentInstances: c.ConcurrentInstances,
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,11 @@ package spooler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
v1 "github.com/zitadel/zitadel/internal/eventstore/v1"
|
||||
@ -19,12 +19,13 @@ import (
|
||||
const systemID = "system"
|
||||
|
||||
type Spooler struct {
|
||||
handlers []query.Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
eventstore v1.Eventstore
|
||||
workers int
|
||||
queue chan *spooledHandler
|
||||
handlers []query.Handler
|
||||
locker Locker
|
||||
lockID string
|
||||
eventstore v1.Eventstore
|
||||
workers int
|
||||
queue chan *spooledHandler
|
||||
concurrentInstances int
|
||||
}
|
||||
|
||||
type Locker interface {
|
||||
@ -33,9 +34,10 @@ type Locker interface {
|
||||
|
||||
type spooledHandler struct {
|
||||
query.Handler
|
||||
locker Locker
|
||||
queuedAt time.Time
|
||||
eventstore v1.Eventstore
|
||||
locker Locker
|
||||
queuedAt time.Time
|
||||
eventstore v1.Eventstore
|
||||
concurrentInstances int
|
||||
}
|
||||
|
||||
func (s *Spooler) Start() {
|
||||
@ -55,7 +57,7 @@ func (s *Spooler) Start() {
|
||||
}
|
||||
go func() {
|
||||
for _, handler := range s.handlers {
|
||||
s.queue <- &spooledHandler{Handler: handler, locker: s.locker, queuedAt: time.Now(), eventstore: s.eventstore}
|
||||
s.queue <- &spooledHandler{Handler: handler, locker: s.locker, queuedAt: time.Now(), eventstore: s.eventstore, concurrentInstances: s.concurrentInstances}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -73,7 +75,7 @@ func (s *spooledHandler) load(workerID string) {
|
||||
err := recover()
|
||||
|
||||
if err != nil {
|
||||
sentry.CurrentHub().Recover(err)
|
||||
logging.WithFields("cause", err, "stack", string(debug.Stack())).Error("reduce panicked")
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -82,29 +84,50 @@ func (s *spooledHandler) load(workerID string) {
|
||||
|
||||
if <-hasLocked {
|
||||
for {
|
||||
events, err := s.query(ctx)
|
||||
ids, err := s.eventstore.InstanceIDs(ctx, models.NewSearchQuery().SetColumn(models.Columns_InstanceIDs).AddQuery().ExcludedInstanceIDsFilter("").SearchQuery())
|
||||
if err != nil {
|
||||
errs <- err
|
||||
break
|
||||
}
|
||||
err = s.process(ctx, events, workerID)
|
||||
if err != nil {
|
||||
errs <- err
|
||||
break
|
||||
}
|
||||
if uint64(len(events)) < s.QueryLimit() {
|
||||
// no more events to process
|
||||
// stop chan
|
||||
if ctx.Err() == nil {
|
||||
errs <- nil
|
||||
for i := 0; i < len(ids); i = i + s.concurrentInstances {
|
||||
max := i + s.concurrentInstances
|
||||
if max > len(ids) {
|
||||
max = len(ids)
|
||||
}
|
||||
err = s.processInstances(ctx, workerID, ids[i:max]...)
|
||||
if err != nil {
|
||||
errs <- err
|
||||
}
|
||||
break
|
||||
}
|
||||
if ctx.Err() == nil {
|
||||
errs <- nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (s *spooledHandler) processInstances(ctx context.Context, workerID string, ids ...string) error {
|
||||
for {
|
||||
events, err := s.query(ctx, ids...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(events) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = s.process(ctx, events, workerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(len(events)) < s.QueryLimit() {
|
||||
// no more events to process
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spooledHandler) awaitError(cancel func(), errs chan error, workerID string) {
|
||||
select {
|
||||
case err := <-errs:
|
||||
@ -135,8 +158,8 @@ func (s *spooledHandler) process(ctx context.Context, events []*models.Event, wo
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *spooledHandler) query(ctx context.Context) ([]*models.Event, error) {
|
||||
query, err := s.EventQuery()
|
||||
func (s *spooledHandler) query(ctx context.Context, instanceIDs ...string) ([]*models.Event, error) {
|
||||
query, err := s.EventQuery(instanceIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (h *testHandler) Subscription() *v1.Subscription {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *testHandler) EventQuery() (*models.SearchQuery, error) {
|
||||
func (h *testHandler) EventQuery(instanceIDs ...string) (*models.SearchQuery, error) {
|
||||
if h.queryError != nil {
|
||||
return nil, h.queryError
|
||||
}
|
||||
@ -111,6 +111,9 @@ func (es *eventstoreStub) PushAggregates(ctx context.Context, in ...*models.Aggr
|
||||
func (es *eventstoreStub) LatestSequence(ctx context.Context, in *models.SearchQueryFactory) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
func (es *eventstoreStub) InstanceIDs(ctx context.Context, in *models.SearchQuery) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (es *eventstoreStub) V2() *eventstore.Eventstore {
|
||||
return nil
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ var (
|
||||
|
||||
func (q *Queries) AppByProjectAndAppID(ctx context.Context, shouldTriggerBulk bool, projectID, appID string) (*App, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.AppProjection.TriggerBulk(ctx)
|
||||
projection.AppProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareAppQuery()
|
||||
|
@ -124,7 +124,7 @@ func (q *Queries) SearchAuthNKeys(ctx context.Context, queries *AuthNKeySearchQu
|
||||
|
||||
func (q *Queries) GetAuthNKeyByID(ctx context.Context, shouldTriggerBulk bool, id string, queries ...SearchQuery) (*AuthNKey, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.AuthNKeyProjection.TriggerBulk(ctx)
|
||||
projection.AuthNKeyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareAuthNKeyQuery()
|
||||
|
@ -82,7 +82,7 @@ var (
|
||||
|
||||
func (q *Queries) DomainPolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string) (*DomainPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.DomainPolicyProjection.TriggerBulk(ctx)
|
||||
projection.DomainPolicyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareDomainPolicyQuery()
|
||||
|
@ -115,7 +115,7 @@ func (q *FailedEventSearchQueries) toQuery(query sq.SelectBuilder) sq.SelectBuil
|
||||
return query
|
||||
}
|
||||
|
||||
func prepareFailedEventQuery() (sq.SelectBuilder, func(*sql.Row) (*FailedEvent, error)) {
|
||||
func prepareFailedEventQuery(instanceIDs ...string) (sq.SelectBuilder, func(*sql.Row) (*FailedEvent, error)) {
|
||||
return sq.Select(
|
||||
FailedEventsColumnProjectionName.identifier(),
|
||||
FailedEventsColumnFailedSequence.identifier(),
|
||||
|
@ -182,7 +182,7 @@ var (
|
||||
//IDPByIDAndResourceOwner searches for the requested id in the context of the resource owner and IAM
|
||||
func (q *Queries) IDPByIDAndResourceOwner(ctx context.Context, shouldTriggerBulk bool, id, resourceOwner string) (*IDP, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.IDPProjection.TriggerBulk(ctx)
|
||||
projection.IDPProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareIDPByIDQuery()
|
||||
|
@ -159,7 +159,7 @@ func (q *Queries) SearchInstances(ctx context.Context, queries *InstanceSearchQu
|
||||
|
||||
func (q *Queries) Instance(ctx context.Context, shouldTriggerBulk bool) (*Instance, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.InstanceProjection.TriggerBulk(ctx)
|
||||
projection.InstanceProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareInstanceDomainQuery(authz.GetInstance(ctx).RequestedDomain())
|
||||
|
@ -77,7 +77,7 @@ var (
|
||||
|
||||
func (q *Queries) LockoutPolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string) (*LockoutPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.LockoutPolicyProjection.TriggerBulk(ctx)
|
||||
projection.LockoutPolicyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareLockoutPolicyQuery()
|
||||
|
@ -141,7 +141,7 @@ var (
|
||||
|
||||
func (q *Queries) LoginPolicyByID(ctx context.Context, shouldTriggerBulk bool, orgID string) (*LoginPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.LoginPolicyProjection.TriggerBulk(ctx)
|
||||
projection.LoginPolicyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareLoginPolicyQuery()
|
||||
|
@ -88,7 +88,7 @@ func (q *OrgSearchQueries) toQuery(query sq.SelectBuilder) sq.SelectBuilder {
|
||||
|
||||
func (q *Queries) OrgByID(ctx context.Context, shouldTriggerBulk bool, id string) (*Org, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.OrgProjection.TriggerBulk(ctx)
|
||||
projection.OrgProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareOrgQuery()
|
||||
|
@ -76,7 +76,7 @@ var (
|
||||
|
||||
func (q *Queries) PasswordAgePolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string) (*PasswordAgePolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PasswordAgeProjection.TriggerBulk(ctx)
|
||||
projection.PasswordAgeProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePasswordAgePolicyQuery()
|
||||
@ -106,7 +106,7 @@ func (q *Queries) PasswordAgePolicyByOrg(ctx context.Context, shouldTriggerBulk
|
||||
|
||||
func (q *Queries) DefaultPasswordAgePolicy(ctx context.Context, shouldTriggerBulk bool) (*PasswordAgePolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PasswordAgeProjection.TriggerBulk(ctx)
|
||||
projection.PasswordAgeProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePasswordAgePolicyQuery()
|
||||
|
@ -33,7 +33,7 @@ type PasswordComplexityPolicy struct {
|
||||
|
||||
func (q *Queries) PasswordComplexityPolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string) (*PasswordComplexityPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PasswordComplexityProjection.TriggerBulk(ctx)
|
||||
projection.PasswordComplexityProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePasswordComplexityPolicyQuery()
|
||||
@ -63,7 +63,7 @@ func (q *Queries) PasswordComplexityPolicyByOrg(ctx context.Context, shouldTrigg
|
||||
|
||||
func (q *Queries) DefaultPasswordComplexityPolicy(ctx context.Context, shouldTriggerBulk bool) (*PasswordComplexityPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PasswordComplexityProjection.TriggerBulk(ctx)
|
||||
projection.PasswordComplexityProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePasswordComplexityPolicyQuery()
|
||||
|
@ -81,7 +81,7 @@ var (
|
||||
|
||||
func (q *Queries) PrivacyPolicyByOrg(ctx context.Context, shouldTriggerBulk bool, orgID string) (*PrivacyPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PrivacyPolicyProjection.TriggerBulk(ctx)
|
||||
projection.PrivacyPolicyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePrivacyPolicyQuery()
|
||||
@ -111,7 +111,7 @@ func (q *Queries) PrivacyPolicyByOrg(ctx context.Context, shouldTriggerBulk bool
|
||||
|
||||
func (q *Queries) DefaultPrivacyPolicy(ctx context.Context, shouldTriggerBulk bool) (*PrivacyPolicy, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PrivacyPolicyProjection.TriggerBulk(ctx)
|
||||
projection.PrivacyPolicyProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := preparePrivacyPolicyQuery()
|
||||
|
@ -96,7 +96,7 @@ type ProjectSearchQueries struct {
|
||||
|
||||
func (q *Queries) ProjectByID(ctx context.Context, shouldTriggerBulk bool, id string) (*Project, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.ProjectProjection.TriggerBulk(ctx)
|
||||
projection.ProjectProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareProjectQuery()
|
||||
|
@ -103,7 +103,7 @@ type ProjectGrantSearchQueries struct {
|
||||
|
||||
func (q *Queries) ProjectGrantByID(ctx context.Context, shouldTriggerBulk bool, id string) (*ProjectGrant, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.ProjectGrantProjection.TriggerBulk(ctx)
|
||||
projection.ProjectGrantProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
stmt, scan := prepareProjectGrantQuery()
|
||||
|
@ -78,7 +78,7 @@ type ProjectRoleSearchQueries struct {
|
||||
|
||||
func (q *Queries) SearchProjectRoles(ctx context.Context, shouldTriggerBulk bool, queries *ProjectRoleSearchQueries) (projects *ProjectRoles, err error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.ProjectRoleProjection.TriggerBulk(ctx)
|
||||
projection.ProjectRoleProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareProjectRolesQuery()
|
||||
|
@ -5,17 +5,19 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
RequeueEvery time.Duration
|
||||
RetryFailedAfter time.Duration
|
||||
MaxFailureCount uint
|
||||
BulkLimit uint64
|
||||
Customizations map[string]CustomConfig
|
||||
MaxIterators int
|
||||
RequeueEvery time.Duration
|
||||
RetryFailedAfter time.Duration
|
||||
MaxFailureCount uint
|
||||
ConcurrentInstances uint
|
||||
BulkLimit uint64
|
||||
Customizations map[string]CustomConfig
|
||||
MaxIterators int
|
||||
}
|
||||
|
||||
type CustomConfig struct {
|
||||
RequeueEvery *time.Duration
|
||||
RetryFailedAfter *time.Duration
|
||||
MaxFailureCount *uint
|
||||
BulkLimit *uint64
|
||||
RequeueEvery *time.Duration
|
||||
RetryFailedAfter *time.Duration
|
||||
MaxFailureCount *uint
|
||||
ConcurrentInstances *uint
|
||||
BulkLimit *uint64
|
||||
}
|
||||
|
@ -83,8 +83,8 @@ func newKeyProjection(ctx context.Context, config crdb.StatementHandlerConfig, k
|
||||
crdb.WithForeignKey(crdb.NewForeignKeyOfPublicKeys("fk_public_ref_keys")),
|
||||
),
|
||||
)
|
||||
p.StatementHandler = crdb.NewStatementHandler(ctx, config)
|
||||
p.encryptionAlgorithm = keyEncryptionAlgorithm
|
||||
p.StatementHandler = crdb.NewStatementHandler(ctx, config)
|
||||
|
||||
return p
|
||||
}
|
||||
|
@ -68,8 +68,10 @@ func Start(ctx context.Context, sqlClient *sql.DB, es *eventstore.Eventstore, co
|
||||
HandlerConfig: handler.HandlerConfig{
|
||||
Eventstore: es,
|
||||
},
|
||||
RequeueEvery: config.RequeueEvery,
|
||||
RetryFailedAfter: config.RetryFailedAfter,
|
||||
RequeueEvery: config.RequeueEvery,
|
||||
RetryFailedAfter: config.RetryFailedAfter,
|
||||
Retries: config.MaxFailureCount,
|
||||
ConcurrentInstances: config.ConcurrentInstances,
|
||||
},
|
||||
Client: sqlClient,
|
||||
SequenceTable: CurrentSeqTable,
|
||||
|
@ -294,8 +294,8 @@ var (
|
||||
|
||||
func (q *Queries) GetUserByID(ctx context.Context, shouldTriggerBulk bool, userID string, queries ...SearchQuery) (*User, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.UserProjection.TriggerBulk(ctx)
|
||||
projection.LoginNameProjection.TriggerBulk(ctx)
|
||||
projection.UserProjection.Trigger(ctx)
|
||||
projection.LoginNameProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
instanceID := authz.GetInstance(ctx).InstanceID()
|
||||
@ -317,8 +317,8 @@ func (q *Queries) GetUserByID(ctx context.Context, shouldTriggerBulk bool, userI
|
||||
|
||||
func (q *Queries) GetUser(ctx context.Context, shouldTriggerBulk bool, queries ...SearchQuery) (*User, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.UserProjection.TriggerBulk(ctx)
|
||||
projection.LoginNameProjection.TriggerBulk(ctx)
|
||||
projection.UserProjection.Trigger(ctx)
|
||||
projection.LoginNameProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
instanceID := authz.GetInstance(ctx).InstanceID()
|
||||
@ -390,8 +390,8 @@ func (q *Queries) GetHumanPhone(ctx context.Context, userID string, queries ...S
|
||||
|
||||
func (q *Queries) GeNotifyUser(ctx context.Context, shouldTriggered bool, userID string, queries ...SearchQuery) (*NotifyUser, error) {
|
||||
if shouldTriggered {
|
||||
projection.UserProjection.TriggerBulk(ctx)
|
||||
projection.LoginNameProjection.TriggerBulk(ctx)
|
||||
projection.UserProjection.Trigger(ctx)
|
||||
projection.LoginNameProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
instanceID := authz.GetInstance(ctx).InstanceID()
|
||||
|
@ -193,7 +193,7 @@ var (
|
||||
|
||||
func (q *Queries) UserGrant(ctx context.Context, shouldTriggerBulk bool, queries ...SearchQuery) (*UserGrant, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.UserGrantProjection.TriggerBulk(ctx)
|
||||
projection.UserGrantProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareUserGrantQuery()
|
||||
|
@ -73,7 +73,7 @@ var (
|
||||
|
||||
func (q *Queries) GetUserMetadataByKey(ctx context.Context, shouldTriggerBulk bool, userID, key string, queries ...SearchQuery) (*UserMetadata, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.UserMetadataProjection.TriggerBulk(ctx)
|
||||
projection.UserMetadataProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareUserMetadataQuery()
|
||||
@ -96,7 +96,7 @@ func (q *Queries) GetUserMetadataByKey(ctx context.Context, shouldTriggerBulk bo
|
||||
|
||||
func (q *Queries) SearchUserMetadata(ctx context.Context, shouldTriggerBulk bool, userID string, queries *UserMetadataSearchQueries) (*UserMetadataList, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.UserMetadataProjection.TriggerBulk(ctx)
|
||||
projection.UserMetadataProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := prepareUserMetadataListQuery()
|
||||
|
@ -82,7 +82,7 @@ type PersonalAccessTokenSearchQueries struct {
|
||||
|
||||
func (q *Queries) PersonalAccessTokenByID(ctx context.Context, shouldTriggerBulk bool, id string, queries ...SearchQuery) (*PersonalAccessToken, error) {
|
||||
if shouldTriggerBulk {
|
||||
projection.PersonalAccessTokenProjection.TriggerBulk(ctx)
|
||||
projection.PersonalAccessTokenProjection.Trigger(ctx)
|
||||
}
|
||||
|
||||
query, scan := preparePersonalAccessTokenQuery()
|
||||
|
@ -55,8 +55,9 @@ func (key sequenceSearchKey) ToColumnName() string {
|
||||
}
|
||||
|
||||
type sequenceSearchQuery struct {
|
||||
key sequenceSearchKey
|
||||
value string
|
||||
key sequenceSearchKey
|
||||
method domain.SearchMethod
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (q *sequenceSearchQuery) GetKey() ColumnKey {
|
||||
@ -64,7 +65,7 @@ func (q *sequenceSearchQuery) GetKey() ColumnKey {
|
||||
}
|
||||
|
||||
func (q *sequenceSearchQuery) GetMethod() domain.SearchMethod {
|
||||
return domain.SearchMethodEquals
|
||||
return q.method
|
||||
}
|
||||
|
||||
func (q *sequenceSearchQuery) GetValue() interface{} {
|
||||
@ -94,7 +95,7 @@ func (s *sequenceSearchRequest) GetAsc() bool {
|
||||
func (s *sequenceSearchRequest) GetQueries() []SearchQuery {
|
||||
result := make([]SearchQuery, len(s.queries))
|
||||
for i, q := range s.queries {
|
||||
result[i] = &sequenceSearchQuery{key: q.key, value: q.value}
|
||||
result[i] = &sequenceSearchQuery{key: q.key, value: q.value, method: q.method}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -147,8 +148,8 @@ func UpdateCurrentSequences(db *gorm.DB, table string, currentSequences []*Curre
|
||||
|
||||
func LatestSequence(db *gorm.DB, table, viewName, instanceID string) (*CurrentSequence, error) {
|
||||
searchQueries := []SearchQuery{
|
||||
&sequenceSearchQuery{key: sequenceSearchKey(SequenceSearchKeyViewName), value: viewName},
|
||||
&sequenceSearchQuery{key: sequenceSearchKey(SequenceSearchKeyInstanceID), value: instanceID},
|
||||
&sequenceSearchQuery{key: sequenceSearchKey(SequenceSearchKeyViewName), value: viewName, method: domain.SearchMethodEquals},
|
||||
&sequenceSearchQuery{key: sequenceSearchKey(SequenceSearchKeyInstanceID), value: instanceID, method: domain.SearchMethodIsOneOf},
|
||||
}
|
||||
|
||||
// ensure highest sequence of view
|
||||
@ -168,13 +169,15 @@ func LatestSequence(db *gorm.DB, table, viewName, instanceID string) (*CurrentSe
|
||||
return nil, caos_errs.ThrowInternalf(err, "VIEW-9LyCB", "unable to get latest sequence of %s", viewName)
|
||||
}
|
||||
|
||||
func LatestSequences(db *gorm.DB, table, viewName string) ([]*CurrentSequence, error) {
|
||||
searchQueries := make([]SearchQuery, 0, 2)
|
||||
searchQueries = append(searchQueries)
|
||||
func LatestSequences(db *gorm.DB, table, viewName string, instanceIDs ...string) ([]*CurrentSequence, error) {
|
||||
searchQueries := []sequenceSearchQuery{
|
||||
{key: sequenceSearchKey(SequenceSearchKeyViewName), value: viewName, method: domain.SearchMethodEquals},
|
||||
}
|
||||
if len(instanceIDs) > 0 {
|
||||
searchQueries = append(searchQueries, sequenceSearchQuery{key: sequenceSearchKey(SequenceSearchKeyInstanceID), value: instanceIDs, method: domain.SearchMethodIsOneOf})
|
||||
}
|
||||
searchRequest := &sequenceSearchRequest{
|
||||
queries: []sequenceSearchQuery{
|
||||
{key: sequenceSearchKey(SequenceSearchKeyViewName), value: viewName},
|
||||
},
|
||||
queries: searchQueries,
|
||||
}
|
||||
|
||||
// ensure highest sequence of view
|
||||
|
Loading…
x
Reference in New Issue
Block a user