mirror of
https://github.com/zitadel/zitadel.git
synced 2025-02-28 23:37:23 +00:00
feat: push telemetry (#6027)
* document analytics config * rework configuration and docs * describe HandleActiveInstances better * describe active instances on quotas better * only projected events are considered * cleanup * describe changes at runtime * push milestones * stop tracking events * calculate and push 4 in 6 milestones * reduce milestone pushed * remove docs * fix scheduled pseudo event projection * push 5 in 6 milestones * push 6 in 6 milestones * ignore client ids * fix text array contains * push human readable milestone type * statement unit tests * improve dev and db performance * organize imports * cleanup * organize imports * test projection * check rows.Err() * test search query * pass linting * review * test 4 milestones * simplify milestone by instance ids query * use type NamespacedCondition * cleanup * lint * lint * dont overwrite original error * no opt-in in examples * cleanup * prerelease * enable request headers * make limit configurable * review fixes * only requeue special handlers secondly * include integration tests * Revert "include integration tests" This reverts commit 96db9504ecdb4e73451f09554fd749cd7c27341f. * pass reducers * test handlers * fix unit test * feat: increment version * lint * remove prerelease * fix integration tests
This commit is contained in:
parent
fa93bb7e85
commit
bb756482c7
2
.github/workflows/integration.yml
vendored
2
.github/workflows/integration.yml
vendored
@ -43,7 +43,7 @@ jobs:
|
|||||||
go run main.go init --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml
|
go run main.go init --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml
|
||||||
go run main.go setup --masterkeyFromEnv --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml
|
go run main.go setup --masterkeyFromEnv --config internal/integration/config/zitadel.yaml --config internal/integration/config/${INTEGRATION_DB_FLAVOR}.yaml
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
run: go test -tags=integration -race -p 1 -v -coverprofile=profile.cov -coverpkg=./internal/...,./cmd/... ./internal/integration ./internal/api/grpc/...
|
run: go test -tags=integration -race -p 1 -v -coverprofile=profile.cov -coverpkg=./internal/...,./cmd/... ./internal/integration ./internal/api/grpc/... ./internal/notification/handlers/...
|
||||||
- name: Publish go coverage
|
- name: Publish go coverage
|
||||||
uses: codecov/codecov-action@v3.1.0
|
uses: codecov/codecov-action@v3.1.0
|
||||||
with:
|
with:
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -59,6 +59,7 @@ docs/docs/apis/auth
|
|||||||
docs/docs/apis/admin
|
docs/docs/apis/admin
|
||||||
docs/docs/apis/mgmt
|
docs/docs/apis/mgmt
|
||||||
docs/docs/apis/system
|
docs/docs/apis/system
|
||||||
|
docs/docs/apis/proto
|
||||||
|
|
||||||
# local
|
# local
|
||||||
build/local/*.env
|
build/local/*.env
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
# Contributing to ZITADEL
|
# Contributing to ZITADEL
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
@ -34,7 +35,11 @@ Follow [@zitadel](https://twitter.com/zitadel) on twitter
|
|||||||
|
|
||||||
We strongly recommend to [talk to us](https://zitadel.com/contact) before you start contributing to streamline our and your work.
|
We strongly recommend to [talk to us](https://zitadel.com/contact) before you start contributing to streamline our and your work.
|
||||||
|
|
||||||
We accept contributions through pull requests. You need a github account for that. If you are unfamiliar with git have a look at Github's documentation on [creating forks](https://help.github.com/articles/fork-a-repo) and [creating pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). Please draft the pull request as soon as possible. Go through the following checklist before you submit the final pull request:
|
We accept contributions through pull requests.
|
||||||
|
You need a github account for that.
|
||||||
|
If you are unfamiliar with git have a look at Github's documentation on [creating forks](https://help.github.com/articles/fork-a-repo) and [creating pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork).
|
||||||
|
Please draft the pull request as soon as possible.
|
||||||
|
Go through the following checklist before you submit the final pull request:
|
||||||
|
|
||||||
### Submit a pull request (PR)
|
### Submit a pull request (PR)
|
||||||
|
|
||||||
@ -61,7 +66,8 @@ We accept contributions through pull requests. You need a github account for tha
|
|||||||
|
|
||||||
### Review a pull request
|
### Review a pull request
|
||||||
|
|
||||||
The reviewers will provide you feedback and approve your changes as soon as they are satisfied. If we ask you for changes in the code, you can follow the [GitHub Guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request) to incorporate feedback in your pull request.
|
The reviewers will provide you feedback and approve your changes as soon as they are satisfied.
|
||||||
|
If we ask you for changes in the code, you can follow the [GitHub Guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request) to incorporate feedback in your pull request.
|
||||||
|
|
||||||
<!-- TODO: how to do this via git -->
|
<!-- TODO: how to do this via git -->
|
||||||
<!-- TODO: change commit message via git -->
|
<!-- TODO: change commit message via git -->
|
||||||
@ -88,6 +94,16 @@ This is optional to indicate which component is affected. In doubt, leave blank
|
|||||||
|
|
||||||
Provide a brief description of the change.
|
Provide a brief description of the change.
|
||||||
|
|
||||||
|
### Quality assurance
|
||||||
|
|
||||||
|
Please make sure you cover your changes with tests before marking a Pull Request as ready for review:
|
||||||
|
|
||||||
|
- [ ] Integration tests against the gRPC server ensure that one or multiple API calls that belong together return the expected results.
|
||||||
|
- [ ] Integration tests against the gRPC server ensure that probable good and bad read and write permissions are tested.
|
||||||
|
- [ ] Integration tests against the gRPC server ensure that the API is easily usable despite eventual consistency.
|
||||||
|
- [ ] Integration tests against the gRPC server ensure that all probable login and registration flows are covered."
|
||||||
|
- [ ] Integration tests ensure that certain commands send expected notifications.
|
||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
|
|
||||||
The code consists of the following parts:
|
The code consists of the following parts:
|
||||||
|
@ -97,7 +97,7 @@ RUN rm -r cockroach-${COCKROACH_VERSION}.linux-amd64
|
|||||||
|
|
||||||
# Migrations for cockroach-secure
|
# Migrations for cockroach-secure
|
||||||
RUN go install github.com/rakyll/statik \
|
RUN go install github.com/rakyll/statik \
|
||||||
&& go test -race -v -coverprofile=profile.cov $(go list ./... | grep -v /operator/)
|
&& go test -race -coverprofile=profile.cov $(go list ./... | grep -v /operator/)
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
## Go test results
|
## Go test results
|
||||||
|
@ -14,6 +14,29 @@ Tracing:
|
|||||||
Fraction: 1.0
|
Fraction: 1.0
|
||||||
MetricPrefix: zitadel
|
MetricPrefix: zitadel
|
||||||
|
|
||||||
|
Telemetry:
|
||||||
|
# As long as Enabled is true, ZITADEL tries to send usage data to the configured Telemetry.Endpoints.
|
||||||
|
# Data is projected by ZITADEL even if Enabled is false.
|
||||||
|
# This means that switching this to true makes ZITADEL try to send past data.
|
||||||
|
Enabled: false
|
||||||
|
# Push telemetry data to all these endpoints at least once using an HTTP POST request.
|
||||||
|
# If one endpoint returns an unsuccessful response code or times out,
|
||||||
|
# ZITADEL retries to push the data point to all configured endpoints until it succeeds.
|
||||||
|
# Configure delivery guarantees and intervals in the section Projections.Customizations.Telemetry
|
||||||
|
# The endpoints can be reconfigured at runtime.
|
||||||
|
# Ten redirects are followed.
|
||||||
|
# If you change this configuration at runtime, remaining data that is not successfully delivered to the old endpoints is sent to the new endpoints.
|
||||||
|
Endpoints:
|
||||||
|
- https://httpbin.org/post
|
||||||
|
# These headers are sent with every request to the configured endpoints.
|
||||||
|
Headers:
|
||||||
|
# single-value: "single-value"
|
||||||
|
# multi-value:
|
||||||
|
# - "multi-value-1"
|
||||||
|
# - "multi-value-2"
|
||||||
|
# The maximum number of data points that are queried before they are sent to the configured endpoints.
|
||||||
|
Limit: 100 # ZITADEL_TELEMETRY_LIMIT
|
||||||
|
|
||||||
# Port ZITADEL will listen on
|
# Port ZITADEL will listen on
|
||||||
Port: 8080
|
Port: 8080
|
||||||
# Port ZITADEL is exposed on, it can differ from port e.g. if you proxy the traffic
|
# Port ZITADEL is exposed on, it can differ from port e.g. if you proxy the traffic
|
||||||
@ -169,17 +192,29 @@ Projections:
|
|||||||
BulkLimit: 2000
|
BulkLimit: 2000
|
||||||
# The Notifications projection is used for sending emails and SMS to users
|
# The Notifications projection is used for sending emails and SMS to users
|
||||||
Notifications:
|
Notifications:
|
||||||
# As notification projections don't result in database statements, retries don't have an effect
|
# As notification projections don't result in database statements, retries don't have any effects
|
||||||
MaxFailureCount: 0
|
MaxFailureCount: 0
|
||||||
# The NotificationsQuotas projection is used for calling quota webhooks
|
# The NotificationsQuotas projection is used for calling quota webhooks
|
||||||
NotificationsQuotas:
|
NotificationsQuotas:
|
||||||
# Delivery guarantee requirements are probably higher for quota webhooks
|
# In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances.
|
||||||
|
# An instance is active, as long as there are projected events on the instance, that are not older than the HandleActiveInstances duration.
|
||||||
|
# Delivery guarantee requirements are higher for quota webhooks
|
||||||
# Defaults to 45 days
|
# Defaults to 45 days
|
||||||
HandleActiveInstances: 1080h
|
HandleActiveInstances: 1080h
|
||||||
# As quota notification projections don't result in database statements, retries don't have an effect
|
# As quota notification projections don't result in database statements, retries don't have any effects
|
||||||
MaxFailureCount: 0
|
MaxFailureCount: 0
|
||||||
# Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the db too much.
|
# Quota notifications are not so time critical. Setting RequeueEvery every five minutes doesn't annoy the database too much.
|
||||||
RequeueEvery: 300s
|
RequeueEvery: 300s
|
||||||
|
Telemetry:
|
||||||
|
# In case of failed deliveries, ZITADEL retries to send the data points to the configured endpoints, but only for active instances.
|
||||||
|
# An instance is active, as long as there are projected events on the instance, that are not older than the HandleActiveInstances duration.
|
||||||
|
# Telemetry delivery guarantee requirements are a bit higher than normal data projections, as they are not interactively retryable.
|
||||||
|
# Defaults to 15 days
|
||||||
|
HandleActiveInstances: 360h
|
||||||
|
# As sending telemetry data doesn't result in database statements, retries don't have any effects
|
||||||
|
MaxFailureCount: 0
|
||||||
|
# Telemetry data synchronization is not time critical. Setting RequeueEvery to 55 minutes doesn't annoy the database too much.
|
||||||
|
RequeueEvery: 3300s
|
||||||
|
|
||||||
Auth:
|
Auth:
|
||||||
SearchLimit: 1000
|
SearchLimit: 1000
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
"github.com/zitadel/zitadel/internal/id"
|
"github.com/zitadel/zitadel/internal/id"
|
||||||
"github.com/zitadel/zitadel/internal/logstore"
|
"github.com/zitadel/zitadel/internal/logstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/notification/handlers"
|
||||||
"github.com/zitadel/zitadel/internal/query/projection"
|
"github.com/zitadel/zitadel/internal/query/projection"
|
||||||
static_config "github.com/zitadel/zitadel/internal/static/config"
|
static_config "github.com/zitadel/zitadel/internal/static/config"
|
||||||
metrics "github.com/zitadel/zitadel/internal/telemetry/metrics/config"
|
metrics "github.com/zitadel/zitadel/internal/telemetry/metrics/config"
|
||||||
@ -65,6 +66,7 @@ type Config struct {
|
|||||||
Eventstore *eventstore.Config
|
Eventstore *eventstore.Config
|
||||||
LogStore *logstore.Configs
|
LogStore *logstore.Configs
|
||||||
Quotas *QuotasConfig
|
Quotas *QuotasConfig
|
||||||
|
Telemetry *handlers.TelemetryPusherConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type QuotasConfig struct {
|
type QuotasConfig struct {
|
||||||
|
@ -207,14 +207,14 @@ func startZitadel(config *Config, masterKey string, server chan<- *Server) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
usageReporter := logstore.UsageReporterFunc(commands.ReportUsage)
|
usageReporter := logstore.UsageReporterFunc(commands.ReportQuotaUsage)
|
||||||
actionsLogstoreSvc := logstore.New(queries, usageReporter, actionsExecutionDBEmitter, actionsExecutionStdoutEmitter)
|
actionsLogstoreSvc := logstore.New(queries, usageReporter, actionsExecutionDBEmitter, actionsExecutionStdoutEmitter)
|
||||||
if actionsLogstoreSvc.Enabled() {
|
if actionsLogstoreSvc.Enabled() {
|
||||||
logging.Warn("execution logs are currently in beta")
|
logging.Warn("execution logs are currently in beta")
|
||||||
}
|
}
|
||||||
actions.SetLogstoreService(actionsLogstoreSvc)
|
actions.SetLogstoreService(actionsLogstoreSvc)
|
||||||
|
|
||||||
notification.Start(ctx, config.Projections.Customizations["notifications"], config.Projections.Customizations["notificationsquotas"], config.ExternalPort, config.ExternalSecure, commands, queries, eventstoreClient, assets.AssetAPIFromDomain(config.ExternalSecure, config.ExternalPort), config.SystemDefaults.Notifications.FileSystemPath, keys.User, keys.SMTP, keys.SMS)
|
notification.Start(ctx, config.Projections.Customizations["notifications"], config.Projections.Customizations["notificationsquotas"], config.Projections.Customizations["telemetry"], *config.Telemetry, config.ExternalPort, config.ExternalSecure, commands, queries, eventstoreClient, assets.AssetAPIFromDomain(config.ExternalSecure, config.ExternalPort), config.SystemDefaults.Notifications.FileSystemPath, keys.User, keys.SMTP, keys.SMS)
|
||||||
|
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
tlsConfig, err := config.TLS.Config()
|
tlsConfig, err := config.TLS.Config()
|
||||||
|
@ -33,7 +33,7 @@ kn service create zitadel \
|
|||||||
--env ZITADEL_EXTERNALPORT=80 \
|
--env ZITADEL_EXTERNALPORT=80 \
|
||||||
--env ZITADEL_TLS_ENABLED=false \
|
--env ZITADEL_TLS_ENABLED=false \
|
||||||
--env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \
|
--env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \
|
||||||
--arg "start-from-init" --arg "--masterkey" --arg "MasterkeyNeedsToHave32Characters"
|
--arg "start-from-init" --arg "--masterkey" --arg "MasterkeyNeedsToHave32Characters"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Knavite yaml
|
### Knavite yaml
|
||||||
|
@ -61,6 +61,26 @@ Instead, your execution environment should provide tooling for managing logs in
|
|||||||
This includes tasks like rotating files, routing, collecting, archiving and cleaning-up.
|
This includes tasks like rotating files, routing, collecting, archiving and cleaning-up.
|
||||||
For example, systemd has journald and kubernetes has fluentd and fluentbit.
|
For example, systemd has journald and kubernetes has fluentd and fluentbit.
|
||||||
|
|
||||||
|
## Telemetry
|
||||||
|
|
||||||
|
If you want to have some data about reached usage milestones pushed to external systems, enable telemetry in the ZITADEL configuration.
|
||||||
|
|
||||||
|
The following table describes the milestones that are sent to the endpoints:
|
||||||
|
|
||||||
|
| Trigger | Description |
|
||||||
|
|-----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| A virtual instance is created. | This data point is also sent when the first instance is automatically created during the ZITADEL binaries setup phase in a self-hosting scenario. |
|
||||||
|
| An authentication succeeded for the first time on an instance. | This is the first authentication with the instances automatically created admin user during the instance setup, which can be a human or a machine. |
|
||||||
|
| A project is created for the first time in a virtual instance. | The ZITADEL project that is automatically created during the instance setup is omitted. |
|
||||||
|
| An application is created for the first time in a virtual instance. | The applications in the ZITADEL project that are automatically created during the instance setup are omitted. |
|
||||||
|
| An authentication succeeded for the first time in a virtal instances application. | This is the first authentication using a ZITADEL application that is not created during the instance setup phase. |
|
||||||
|
| A virtual instance is deleted. | This data point is sent when a virtual instance is deleted via ZITADELs system API |
|
||||||
|
|
||||||
|
|
||||||
|
ZITADEL pushes the metrics by projecting certain events.
|
||||||
|
Therefore, you can configure delivery guarantees not in the Telemetry section of the ZITADEL configuration,
|
||||||
|
but in the Projections.Customizations.Telemetry section
|
||||||
|
|
||||||
## Database
|
## Database
|
||||||
|
|
||||||
### Prefer CockroachDB
|
### Prefer CockroachDB
|
||||||
|
@ -36,7 +36,7 @@ func TestMain(m *testing.M) {
|
|||||||
defer Tester.Done()
|
defer Tester.Done()
|
||||||
Client = Tester.Client.SessionV2
|
Client = Tester.Client.SessionV2
|
||||||
|
|
||||||
CTX, _ = Tester.WithSystemAuthorization(ctx, integration.OrgOwner), errCtx
|
CTX, _ = Tester.WithAuthorization(ctx, integration.OrgOwner), errCtx
|
||||||
User = Tester.CreateHumanUser(CTX)
|
User = Tester.CreateHumanUser(CTX)
|
||||||
Tester.RegisterUserPasskey(CTX, User.GetUserId())
|
Tester.RegisterUserPasskey(CTX, User.GetUserId())
|
||||||
return m.Run()
|
return m.Run()
|
||||||
|
@ -38,7 +38,7 @@ func TestMain(m *testing.M) {
|
|||||||
Tester = integration.NewTester(ctx)
|
Tester = integration.NewTester(ctx)
|
||||||
defer Tester.Done()
|
defer Tester.Done()
|
||||||
|
|
||||||
CTX, ErrCTX = Tester.WithSystemAuthorization(ctx, integration.OrgOwner), errCtx
|
CTX, ErrCTX = Tester.WithAuthorization(ctx, integration.OrgOwner), errCtx
|
||||||
Client = Tester.Client.UserV2
|
Client = Tester.Client.UserV2
|
||||||
return m.Run()
|
return m.Run()
|
||||||
}())
|
}())
|
||||||
@ -454,7 +454,7 @@ func TestServer_AddIDPLink(t *testing.T) {
|
|||||||
args: args{
|
args: args{
|
||||||
CTX,
|
CTX,
|
||||||
&user.AddIDPLinkRequest{
|
&user.AddIDPLinkRequest{
|
||||||
UserId: Tester.Users[integration.OrgOwner].ID,
|
UserId: Tester.Users[integration.FirstInstanceUsersKey][integration.OrgOwner].ID,
|
||||||
IdpLink: &user.IDPLink{
|
IdpLink: &user.IDPLink{
|
||||||
IdpId: "idpID",
|
IdpId: "idpID",
|
||||||
UserId: "userID",
|
UserId: "userID",
|
||||||
@ -470,7 +470,7 @@ func TestServer_AddIDPLink(t *testing.T) {
|
|||||||
args: args{
|
args: args{
|
||||||
CTX,
|
CTX,
|
||||||
&user.AddIDPLinkRequest{
|
&user.AddIDPLinkRequest{
|
||||||
UserId: Tester.Users[integration.OrgOwner].ID,
|
UserId: Tester.Users[integration.FirstInstanceUsersKey][integration.OrgOwner].ID,
|
||||||
IdpLink: &user.IDPLink{
|
IdpLink: &user.IDPLink{
|
||||||
IdpId: idpID,
|
IdpId: idpID,
|
||||||
UserId: "userID",
|
UserId: "userID",
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/zitadel/zitadel/internal/repository/idpintent"
|
"github.com/zitadel/zitadel/internal/repository/idpintent"
|
||||||
instance_repo "github.com/zitadel/zitadel/internal/repository/instance"
|
instance_repo "github.com/zitadel/zitadel/internal/repository/instance"
|
||||||
"github.com/zitadel/zitadel/internal/repository/keypair"
|
"github.com/zitadel/zitadel/internal/repository/keypair"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
"github.com/zitadel/zitadel/internal/repository/org"
|
"github.com/zitadel/zitadel/internal/repository/org"
|
||||||
proj_repo "github.com/zitadel/zitadel/internal/repository/project"
|
proj_repo "github.com/zitadel/zitadel/internal/repository/project"
|
||||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||||
@ -124,6 +125,7 @@ func StartCommands(
|
|||||||
quota.RegisterEventMappers(repo.eventstore)
|
quota.RegisterEventMappers(repo.eventstore)
|
||||||
session.RegisterEventMappers(repo.eventstore)
|
session.RegisterEventMappers(repo.eventstore)
|
||||||
idpintent.RegisterEventMappers(repo.eventstore)
|
idpintent.RegisterEventMappers(repo.eventstore)
|
||||||
|
milestone.RegisterEventMappers(repo.eventstore)
|
||||||
|
|
||||||
repo.userPasswordAlg = crypto.NewBCrypt(defaults.SecretGenerators.PasswordSaltCost)
|
repo.userPasswordAlg = crypto.NewBCrypt(defaults.SecretGenerators.PasswordSaltCost)
|
||||||
repo.machineKeySize = int(defaults.SecretGenerators.MachineKeySize)
|
repo.machineKeySize = int(defaults.SecretGenerators.MachineKeySize)
|
||||||
|
22
internal/command/milestone.go
Normal file
22
internal/command/milestone.go
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MilestonePushed writes a new milestone.PushedEvent with a new milestone.Aggregate to the eventstore
|
||||||
|
func (c *Commands) MilestonePushed(
|
||||||
|
ctx context.Context,
|
||||||
|
msType milestone.Type,
|
||||||
|
endpoints []string,
|
||||||
|
primaryDomain string,
|
||||||
|
) error {
|
||||||
|
id, err := c.idGenerator.Next()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.eventstore.Push(ctx, milestone.NewPushedEvent(ctx, milestone.NewAggregate(ctx, id), msType, endpoints, primaryDomain, c.externalDomain))
|
||||||
|
return err
|
||||||
|
}
|
@ -7,8 +7,8 @@ import (
|
|||||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReportUsage calls notification hooks and emits the notified events
|
// ReportQuotaUsage writes a slice of *quota.NotificationDueEvent directly to the eventstore
|
||||||
func (c *Commands) ReportUsage(ctx context.Context, dueNotifications []*quota.NotificationDueEvent) error {
|
func (c *Commands) ReportQuotaUsage(ctx context.Context, dueNotifications []*quota.NotificationDueEvent) error {
|
||||||
cmds := make([]eventstore.Command, len(dueNotifications))
|
cmds := make([]eventstore.Command, len(dueNotifications))
|
||||||
for idx, notification := range dueNotifications {
|
for idx, notification := range dueNotifications {
|
||||||
cmds[idx] = notification
|
cmds[idx] = notification
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/zitadel/zitadel/internal/errors"
|
"github.com/zitadel/zitadel/internal/errors"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/pseudo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -49,6 +50,8 @@ type StatementHandler struct {
|
|||||||
initialized chan bool
|
initialized chan bool
|
||||||
|
|
||||||
bulkLimit uint64
|
bulkLimit uint64
|
||||||
|
|
||||||
|
reduceScheduledPseudoEvent bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStatementHandler(
|
func NewStatementHandler(
|
||||||
@ -57,30 +60,40 @@ func NewStatementHandler(
|
|||||||
) StatementHandler {
|
) StatementHandler {
|
||||||
aggregateTypes := make([]eventstore.AggregateType, 0, len(config.Reducers))
|
aggregateTypes := make([]eventstore.AggregateType, 0, len(config.Reducers))
|
||||||
reduces := make(map[eventstore.EventType]handler.Reduce, len(config.Reducers))
|
reduces := make(map[eventstore.EventType]handler.Reduce, len(config.Reducers))
|
||||||
|
reduceScheduledPseudoEvent := false
|
||||||
for _, aggReducer := range config.Reducers {
|
for _, aggReducer := range config.Reducers {
|
||||||
aggregateTypes = append(aggregateTypes, aggReducer.Aggregate)
|
aggregateTypes = append(aggregateTypes, aggReducer.Aggregate)
|
||||||
|
if aggReducer.Aggregate == pseudo.AggregateType {
|
||||||
|
reduceScheduledPseudoEvent = true
|
||||||
|
if len(config.Reducers) != 1 ||
|
||||||
|
len(aggReducer.EventRedusers) != 1 ||
|
||||||
|
aggReducer.EventRedusers[0].Event != pseudo.ScheduledEventType {
|
||||||
|
panic("if a pseudo.AggregateType is reduced, exactly one event reducer for pseudo.ScheduledEventType is supported and no other aggregate can be reduced")
|
||||||
|
}
|
||||||
|
}
|
||||||
for _, eventReducer := range aggReducer.EventRedusers {
|
for _, eventReducer := range aggReducer.EventRedusers {
|
||||||
reduces[eventReducer.Event] = eventReducer.Reduce
|
reduces[eventReducer.Event] = eventReducer.Reduce
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h := StatementHandler{
|
h := StatementHandler{
|
||||||
client: config.Client,
|
client: config.Client,
|
||||||
sequenceTable: config.SequenceTable,
|
sequenceTable: config.SequenceTable,
|
||||||
maxFailureCount: config.MaxFailureCount,
|
maxFailureCount: config.MaxFailureCount,
|
||||||
currentSequenceStmt: fmt.Sprintf(currentSequenceStmtFormat, config.SequenceTable),
|
currentSequenceStmt: fmt.Sprintf(currentSequenceStmtFormat, config.SequenceTable),
|
||||||
updateSequencesBaseStmt: fmt.Sprintf(updateCurrentSequencesStmtFormat, config.SequenceTable),
|
updateSequencesBaseStmt: fmt.Sprintf(updateCurrentSequencesStmtFormat, config.SequenceTable),
|
||||||
failureCountStmt: fmt.Sprintf(failureCountStmtFormat, config.FailedEventsTable),
|
failureCountStmt: fmt.Sprintf(failureCountStmtFormat, config.FailedEventsTable),
|
||||||
setFailureCountStmt: fmt.Sprintf(setFailureCountStmtFormat, config.FailedEventsTable),
|
setFailureCountStmt: fmt.Sprintf(setFailureCountStmtFormat, config.FailedEventsTable),
|
||||||
aggregates: aggregateTypes,
|
aggregates: aggregateTypes,
|
||||||
reduces: reduces,
|
reduces: reduces,
|
||||||
bulkLimit: config.BulkLimit,
|
bulkLimit: config.BulkLimit,
|
||||||
Locker: NewLocker(config.Client.DB, config.LockTable, config.ProjectionName),
|
Locker: NewLocker(config.Client.DB, config.LockTable, config.ProjectionName),
|
||||||
initCheck: config.InitCheck,
|
initCheck: config.InitCheck,
|
||||||
initialized: make(chan bool),
|
initialized: make(chan bool),
|
||||||
|
reduceScheduledPseudoEvent: reduceScheduledPseudoEvent,
|
||||||
}
|
}
|
||||||
|
|
||||||
h.ProjectionHandler = handler.NewProjectionHandler(ctx, config.ProjectionHandlerConfig, h.reduce, h.Update, h.SearchQuery, h.Lock, h.Unlock, h.initialized)
|
h.ProjectionHandler = handler.NewProjectionHandler(ctx, config.ProjectionHandlerConfig, h.reduce, h.Update, h.searchQuery, h.Lock, h.Unlock, h.initialized, reduceScheduledPseudoEvent)
|
||||||
|
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
@ -88,10 +101,19 @@ func NewStatementHandler(
|
|||||||
func (h *StatementHandler) Start() {
|
func (h *StatementHandler) Start() {
|
||||||
h.initialized <- true
|
h.initialized <- true
|
||||||
close(h.initialized)
|
close(h.initialized)
|
||||||
h.Subscribe(h.aggregates...)
|
if !h.reduceScheduledPseudoEvent {
|
||||||
|
h.Subscribe(h.aggregates...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *StatementHandler) SearchQuery(ctx context.Context, instanceIDs []string) (*eventstore.SearchQueryBuilder, uint64, error) {
|
func (h *StatementHandler) searchQuery(ctx context.Context, instanceIDs []string) (*eventstore.SearchQueryBuilder, uint64, error) {
|
||||||
|
if h.reduceScheduledPseudoEvent {
|
||||||
|
return nil, 1, nil
|
||||||
|
}
|
||||||
|
return h.dbSearchQuery(ctx, instanceIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *StatementHandler) dbSearchQuery(ctx context.Context, instanceIDs []string) (*eventstore.SearchQueryBuilder, uint64, error) {
|
||||||
sequences, err := h.currentSequences(ctx, h.client.QueryContext, instanceIDs)
|
sequences, err := h.currentSequences(ctx, h.client.QueryContext, instanceIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@ -115,7 +137,6 @@ func (h *StatementHandler) SearchQuery(ctx context.Context, instanceIDs []string
|
|||||||
InstanceID(instanceID)
|
InstanceID(instanceID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return queryBuilder, h.bulkLimit, nil
|
return queryBuilder, h.bulkLimit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/zitadel/zitadel/internal/eventstore/repository"
|
"github.com/zitadel/zitadel/internal/eventstore/repository"
|
||||||
es_repo_mock "github.com/zitadel/zitadel/internal/eventstore/repository/mock"
|
es_repo_mock "github.com/zitadel/zitadel/internal/eventstore/repository/mock"
|
||||||
"github.com/zitadel/zitadel/internal/id"
|
"github.com/zitadel/zitadel/internal/id"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/pseudo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -60,7 +61,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
type fields struct {
|
type fields struct {
|
||||||
sequenceTable string
|
sequenceTable string
|
||||||
projectionName string
|
projectionName string
|
||||||
aggregates []eventstore.AggregateType
|
reducers []handler.AggregateReducer
|
||||||
bulkLimit uint64
|
bulkLimit uint64
|
||||||
}
|
}
|
||||||
type args struct {
|
type args struct {
|
||||||
@ -77,7 +78,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
fields: fields{
|
fields: fields{
|
||||||
sequenceTable: "my_sequences",
|
sequenceTable: "my_sequences",
|
||||||
projectionName: "my_projection",
|
projectionName: "my_projection",
|
||||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
reducers: failingAggregateReducers("testAgg"),
|
||||||
bulkLimit: 5,
|
bulkLimit: 5,
|
||||||
},
|
},
|
||||||
args: args{
|
args: args{
|
||||||
@ -99,7 +100,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
fields: fields{
|
fields: fields{
|
||||||
sequenceTable: "my_sequences",
|
sequenceTable: "my_sequences",
|
||||||
projectionName: "my_projection",
|
projectionName: "my_projection",
|
||||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
reducers: failingAggregateReducers("testAgg"),
|
||||||
bulkLimit: 5,
|
bulkLimit: 5,
|
||||||
},
|
},
|
||||||
args: args{
|
args: args{
|
||||||
@ -129,7 +130,7 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
fields: fields{
|
fields: fields{
|
||||||
sequenceTable: "my_sequences",
|
sequenceTable: "my_sequences",
|
||||||
projectionName: "my_projection",
|
projectionName: "my_projection",
|
||||||
aggregates: []eventstore.AggregateType{"testAgg"},
|
reducers: failingAggregateReducers("testAgg"),
|
||||||
bulkLimit: 5,
|
bulkLimit: 5,
|
||||||
},
|
},
|
||||||
args: args{
|
args: args{
|
||||||
@ -158,6 +159,32 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
Limit(5),
|
Limit(5),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "scheduled pseudo event",
|
||||||
|
fields: fields{
|
||||||
|
sequenceTable: "my_sequences",
|
||||||
|
projectionName: "my_projection",
|
||||||
|
reducers: []handler.AggregateReducer{{
|
||||||
|
Aggregate: pseudo.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{
|
||||||
|
{
|
||||||
|
Event: pseudo.ScheduledEventType,
|
||||||
|
Reduce: testReduceErr(errors.New("should not be called")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
bulkLimit: 5,
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
instanceIDs: []string{"instanceID1", "instanceID2"},
|
||||||
|
},
|
||||||
|
want: want{
|
||||||
|
limit: 1,
|
||||||
|
isErr: func(err error) bool {
|
||||||
|
return err == nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
@ -177,15 +204,14 @@ func TestProjectionHandler_SearchQuery(t *testing.T) {
|
|||||||
Client: &database.DB{
|
Client: &database.DB{
|
||||||
DB: client,
|
DB: client,
|
||||||
},
|
},
|
||||||
|
Reducers: tt.fields.reducers,
|
||||||
})
|
})
|
||||||
|
|
||||||
h.aggregates = tt.fields.aggregates
|
|
||||||
|
|
||||||
for _, expectation := range tt.want.expectations {
|
for _, expectation := range tt.want.expectations {
|
||||||
expectation(mock)
|
expectation(mock)
|
||||||
}
|
}
|
||||||
|
|
||||||
query, limit, err := h.SearchQuery(context.Background(), tt.args.instanceIDs)
|
query, limit, err := h.searchQuery(context.Background(), tt.args.instanceIDs)
|
||||||
if !tt.want.isErr(err) {
|
if !tt.want.isErr(err) {
|
||||||
t.Errorf("ProjectionHandler.prepareBulkStmts() error = %v", err)
|
t.Errorf("ProjectionHandler.prepareBulkStmts() error = %v", err)
|
||||||
return
|
return
|
||||||
@ -1768,3 +1794,17 @@ func testReduceErr(err error) handler.Reduce {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func failingAggregateReducers(aggregates ...eventstore.AggregateType) []handler.AggregateReducer {
|
||||||
|
reducers := make([]handler.AggregateReducer, len(aggregates))
|
||||||
|
for idx := range aggregates {
|
||||||
|
reducers[idx] = handler.AggregateReducer{
|
||||||
|
Aggregate: aggregates[idx],
|
||||||
|
EventRedusers: []handler.EventReducer{{
|
||||||
|
Event: "any.event",
|
||||||
|
Reduce: testReduceErr(errors.New("should not be called")),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reducers
|
||||||
|
}
|
||||||
|
@ -235,12 +235,6 @@ func AddDeleteStatement(conditions []handler.Condition, opts ...execOption) func
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddCopyStatement(conflict, from, to []handler.Column, conditions []handler.Condition, opts ...execOption) func(eventstore.Event) Exec {
|
|
||||||
return func(event eventstore.Event) Exec {
|
|
||||||
return NewCopyStatement(event, conflict, from, to, conditions, opts...).Execute
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewArrayAppendCol(column string, value interface{}) handler.Column {
|
func NewArrayAppendCol(column string, value interface{}) handler.Column {
|
||||||
return handler.Column{
|
return handler.Column{
|
||||||
Name: column,
|
Name: column,
|
||||||
@ -286,12 +280,30 @@ func NewCopyCol(column, from string) handler.Column {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewLessThanCond(column string, value interface{}) handler.Condition {
|
func NewLessThanCond(column string, value interface{}) handler.Condition {
|
||||||
return handler.Condition{
|
return func(param string) (string, interface{}) {
|
||||||
Name: column,
|
return column + " < " + param, value
|
||||||
Value: value,
|
}
|
||||||
ParameterOpt: func(placeholder string) string {
|
}
|
||||||
return " < " + placeholder
|
|
||||||
},
|
func NewIsNullCond(column string) handler.Condition {
|
||||||
|
return func(param string) (string, interface{}) {
|
||||||
|
return column + " IS NULL", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTextArrayContainsCond returns a handler.Condition that checks if the column that stores an array of text contains the given value
|
||||||
|
func NewTextArrayContainsCond(column string, value string) handler.Condition {
|
||||||
|
return func(param string) (string, interface{}) {
|
||||||
|
return column + " @> " + param, database.StringArray{value}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not is a function and not a method, so that calling it is well readable
|
||||||
|
// For example conditions := []handler.Condition{ Not(NewTextArrayContainsCond())}
|
||||||
|
func Not(condition handler.Condition) handler.Condition {
|
||||||
|
return func(param string) (string, interface{}) {
|
||||||
|
cond, value := condition(param)
|
||||||
|
return "NOT (" + cond + ")", value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +312,7 @@ func NewLessThanCond(column string, value interface{}) handler.Condition {
|
|||||||
// if the value of a col is empty the data will be copied from the selected row
|
// if the value of a col is empty the data will be copied from the selected row
|
||||||
// if the value of a col is not empty the data will be set by the static value
|
// if the value of a col is not empty the data will be set by the static value
|
||||||
// conds represent the conditions for the selection subquery
|
// conds represent the conditions for the selection subquery
|
||||||
func NewCopyStatement(event eventstore.Event, conflictCols, from, to []handler.Column, conds []handler.Condition, opts ...execOption) *handler.Statement {
|
func NewCopyStatement(event eventstore.Event, conflictCols, from, to []handler.Column, nsCond []handler.NamespacedCondition, opts ...execOption) *handler.Statement {
|
||||||
columnNames := make([]string, len(to))
|
columnNames := make([]string, len(to))
|
||||||
selectColumns := make([]string, len(from))
|
selectColumns := make([]string, len(from))
|
||||||
updateColumns := make([]string, len(columnNames))
|
updateColumns := make([]string, len(columnNames))
|
||||||
@ -319,13 +331,12 @@ func NewCopyStatement(event eventstore.Event, conflictCols, from, to []handler.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
cond := make([]handler.Condition, len(nsCond))
|
||||||
wheres := make([]string, len(conds))
|
for i := range nsCond {
|
||||||
for i, cond := range conds {
|
cond[i] = nsCond[i]("copy_table")
|
||||||
argCounter++
|
|
||||||
wheres[i] = "copy_table." + cond.Name + " = $" + strconv.Itoa(argCounter)
|
|
||||||
args = append(args, cond.Value)
|
|
||||||
}
|
}
|
||||||
|
wheres, values := conditionsToWhere(cond, len(args))
|
||||||
|
args = append(args, values...)
|
||||||
|
|
||||||
conflictTargets := make([]string, len(conflictCols))
|
conflictTargets := make([]string, len(conflictCols))
|
||||||
for i, conflictCol := range conflictCols {
|
for i, conflictCol := range conflictCols {
|
||||||
@ -340,7 +351,7 @@ func NewCopyStatement(event eventstore.Event, conflictCols, from, to []handler.C
|
|||||||
config.err = handler.ErrNoValues
|
config.err = handler.ErrNoValues
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(conds) == 0 {
|
if len(cond) == 0 {
|
||||||
config.err = handler.ErrNoCondition
|
config.err = handler.ErrNoCondition
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,18 +405,16 @@ func columnsToQuery(cols []handler.Column) (names []string, parameters []string,
|
|||||||
return names, parameters, values[:parameterIndex]
|
return names, parameters, values[:parameterIndex]
|
||||||
}
|
}
|
||||||
|
|
||||||
func conditionsToWhere(cols []handler.Condition, paramOffset int) (wheres []string, values []interface{}) {
|
func conditionsToWhere(conditions []handler.Condition, paramOffset int) (wheres []string, values []interface{}) {
|
||||||
wheres = make([]string, len(cols))
|
wheres = make([]string, len(conditions))
|
||||||
values = make([]interface{}, len(cols))
|
values = make([]interface{}, 0, len(conditions))
|
||||||
|
for i, conditionFunc := range conditions {
|
||||||
for i, col := range cols {
|
condition, value := conditionFunc("$" + strconv.Itoa(i+1+paramOffset))
|
||||||
wheres[i] = "(" + col.Name + " = $" + strconv.Itoa(i+1+paramOffset) + ")"
|
wheres[i] = "(" + condition + ")"
|
||||||
if col.ParameterOpt != nil {
|
if value != nil {
|
||||||
wheres[i] = "(" + col.Name + col.ParameterOpt("$"+strconv.Itoa(i+1+paramOffset)) + ")"
|
values = append(values, value)
|
||||||
}
|
}
|
||||||
values[i] = col.Value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return wheres, values
|
return wheres, values
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/database"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||||
)
|
)
|
||||||
@ -420,10 +421,7 @@ func TestNewUpdateStatement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -450,10 +448,7 @@ func TestNewUpdateStatement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
values: []handler.Column{},
|
values: []handler.Column{},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -515,10 +510,7 @@ func TestNewUpdateStatement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -560,10 +552,7 @@ func TestNewUpdateStatement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -630,10 +619,7 @@ func TestNewDeleteStatement(t *testing.T) {
|
|||||||
previousSequence: 0,
|
previousSequence: 0,
|
||||||
},
|
},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -683,10 +669,7 @@ func TestNewDeleteStatement(t *testing.T) {
|
|||||||
aggregateType: "agg",
|
aggregateType: "agg",
|
||||||
},
|
},
|
||||||
conditions: []handler.Condition{
|
conditions: []handler.Condition{
|
||||||
{
|
handler.NewCond("col1", 1),
|
||||||
Name: "col1",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -842,11 +825,9 @@ func TestNewMultiStatement(t *testing.T) {
|
|||||||
execs: []func(eventstore.Event) Exec{
|
execs: []func(eventstore.Event) Exec{
|
||||||
AddDeleteStatement(
|
AddDeleteStatement(
|
||||||
[]handler.Condition{
|
[]handler.Condition{
|
||||||
{
|
handler.NewCond("col1", 1),
|
||||||
Name: "col1",
|
},
|
||||||
Value: 1,
|
),
|
||||||
},
|
|
||||||
}),
|
|
||||||
AddCreateStatement(
|
AddCreateStatement(
|
||||||
[]handler.Column{
|
[]handler.Column{
|
||||||
{
|
{
|
||||||
@ -876,11 +857,9 @@ func TestNewMultiStatement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
[]handler.Condition{
|
[]handler.Condition{
|
||||||
{
|
handler.NewCond("col1", 1),
|
||||||
Name: "col1",
|
},
|
||||||
Value: 1,
|
),
|
||||||
},
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -942,7 +921,7 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
conflictingCols []handler.Column
|
conflictingCols []handler.Column
|
||||||
from []handler.Column
|
from []handler.Column
|
||||||
to []handler.Column
|
to []handler.Column
|
||||||
conds []handler.Condition
|
conds []handler.NamespacedCondition
|
||||||
}
|
}
|
||||||
type want struct {
|
type want struct {
|
||||||
aggregateType eventstore.AggregateType
|
aggregateType eventstore.AggregateType
|
||||||
@ -966,11 +945,8 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
sequence: 1,
|
sequence: 1,
|
||||||
previousSequence: 0,
|
previousSequence: 0,
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{
|
conds: []handler.NamespacedCondition{
|
||||||
{
|
handler.NewNamespacedCondition("col2", 1),
|
||||||
Name: "col2",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -995,7 +971,7 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
sequence: 1,
|
sequence: 1,
|
||||||
previousSequence: 0,
|
previousSequence: 0,
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{},
|
conds: []handler.NamespacedCondition{},
|
||||||
from: []handler.Column{
|
from: []handler.Column{
|
||||||
{
|
{
|
||||||
Name: "col",
|
Name: "col",
|
||||||
@ -1029,7 +1005,7 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
sequence: 1,
|
sequence: 1,
|
||||||
previousSequence: 0,
|
previousSequence: 0,
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{},
|
conds: []handler.NamespacedCondition{},
|
||||||
from: []handler.Column{
|
from: []handler.Column{
|
||||||
{
|
{
|
||||||
Name: "col",
|
Name: "col",
|
||||||
@ -1066,10 +1042,8 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
sequence: 1,
|
sequence: 1,
|
||||||
previousSequence: 0,
|
previousSequence: 0,
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{
|
conds: []handler.NamespacedCondition{
|
||||||
{
|
handler.NewNamespacedCondition("col2", nil),
|
||||||
Name: "col",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
from: []handler.Column{},
|
from: []handler.Column{},
|
||||||
},
|
},
|
||||||
@ -1124,15 +1098,9 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
Name: "col_b",
|
Name: "col_b",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{
|
conds: []handler.NamespacedCondition{
|
||||||
{
|
handler.NewNamespacedCondition("id", 2),
|
||||||
Name: "id",
|
handler.NewNamespacedCondition("state", 3),
|
||||||
Value: 2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "state",
|
|
||||||
Value: 3,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -1143,7 +1111,7 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
executer: &wantExecuter{
|
executer: &wantExecuter{
|
||||||
params: []params{
|
params: []params{
|
||||||
{
|
{
|
||||||
query: "INSERT INTO my_table (state, id, col_a, col_b) SELECT $1, id, col_a, col_b FROM my_table AS copy_table WHERE copy_table.id = $2 AND copy_table.state = $3 ON CONFLICT () DO UPDATE SET (state, id, col_a, col_b) = ($1, EXCLUDED.id, EXCLUDED.col_a, EXCLUDED.col_b)",
|
query: "INSERT INTO my_table (state, id, col_a, col_b) SELECT $1, id, col_a, col_b FROM my_table AS copy_table WHERE (copy_table.id = $2) AND (copy_table.state = $3) ON CONFLICT () DO UPDATE SET (state, id, col_a, col_b) = ($1, EXCLUDED.id, EXCLUDED.col_a, EXCLUDED.col_b)",
|
||||||
args: []interface{}{1, 2, 3},
|
args: []interface{}{1, 2, 3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1191,15 +1159,9 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
Name: "col_d",
|
Name: "col_d",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
conds: []handler.Condition{
|
conds: []handler.NamespacedCondition{
|
||||||
{
|
handler.NewNamespacedCondition("id", 2),
|
||||||
Name: "id",
|
handler.NewNamespacedCondition("state", 3),
|
||||||
Value: 2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "state",
|
|
||||||
Value: 3,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: want{
|
want: want{
|
||||||
@ -1210,7 +1172,7 @@ func TestNewCopyStatement(t *testing.T) {
|
|||||||
executer: &wantExecuter{
|
executer: &wantExecuter{
|
||||||
params: []params{
|
params: []params{
|
||||||
{
|
{
|
||||||
query: "INSERT INTO my_table (state, id, col_c, col_d) SELECT $1, id, col_a, col_b FROM my_table AS copy_table WHERE copy_table.id = $2 AND copy_table.state = $3 ON CONFLICT () DO UPDATE SET (state, id, col_c, col_d) = ($1, EXCLUDED.id, EXCLUDED.col_a, EXCLUDED.col_b)",
|
query: "INSERT INTO my_table (state, id, col_c, col_d) SELECT $1, id, col_a, col_b FROM my_table AS copy_table WHERE (copy_table.id = $2) AND (copy_table.state = $3) ON CONFLICT () DO UPDATE SET (state, id, col_c, col_d) = ($1, EXCLUDED.id, EXCLUDED.col_a, EXCLUDED.col_b)",
|
||||||
args: []interface{}{1, 2, 3},
|
args: []interface{}{1, 2, 3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1395,7 +1357,7 @@ func Test_columnsToQuery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_columnsToWhere(t *testing.T) {
|
func Test_conditionsToWhere(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
conds []handler.Condition
|
conds []handler.Condition
|
||||||
paramOffset int
|
paramOffset int
|
||||||
@ -1421,10 +1383,7 @@ func Test_columnsToWhere(t *testing.T) {
|
|||||||
name: "no offset",
|
name: "no offset",
|
||||||
args: args{
|
args: args{
|
||||||
conds: []handler.Condition{
|
conds: []handler.Condition{
|
||||||
{
|
handler.NewCond("col1", "val1"),
|
||||||
Name: "col1",
|
|
||||||
Value: "val1",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
paramOffset: 0,
|
paramOffset: 0,
|
||||||
},
|
},
|
||||||
@ -1437,14 +1396,8 @@ func Test_columnsToWhere(t *testing.T) {
|
|||||||
name: "multiple cols",
|
name: "multiple cols",
|
||||||
args: args{
|
args: args{
|
||||||
conds: []handler.Condition{
|
conds: []handler.Condition{
|
||||||
{
|
handler.NewCond("col1", "val1"),
|
||||||
Name: "col1",
|
handler.NewCond("col2", "val2"),
|
||||||
Value: "val1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "col2",
|
|
||||||
Value: "val2",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
paramOffset: 0,
|
paramOffset: 0,
|
||||||
},
|
},
|
||||||
@ -1457,10 +1410,7 @@ func Test_columnsToWhere(t *testing.T) {
|
|||||||
name: "2 offset",
|
name: "2 offset",
|
||||||
args: args{
|
args: args{
|
||||||
conds: []handler.Condition{
|
conds: []handler.Condition{
|
||||||
{
|
handler.NewCond("col1", "val1"),
|
||||||
Name: "col1",
|
|
||||||
Value: "val1",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
paramOffset: 2,
|
paramOffset: 2,
|
||||||
},
|
},
|
||||||
@ -1469,6 +1419,54 @@ func Test_columnsToWhere(t *testing.T) {
|
|||||||
values: []interface{}{"val1"},
|
values: []interface{}{"val1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "less than",
|
||||||
|
args: args{
|
||||||
|
conds: []handler.Condition{
|
||||||
|
NewLessThanCond("col1", "val1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: want{
|
||||||
|
wheres: []string{"(col1 < $1)"},
|
||||||
|
values: []interface{}{"val1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "is null",
|
||||||
|
args: args{
|
||||||
|
conds: []handler.Condition{
|
||||||
|
NewIsNullCond("col1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: want{
|
||||||
|
wheres: []string{"(col1 IS NULL)"},
|
||||||
|
values: []interface{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "text array contains",
|
||||||
|
args: args{
|
||||||
|
conds: []handler.Condition{
|
||||||
|
NewTextArrayContainsCond("col1", "val1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: want{
|
||||||
|
wheres: []string{"(col1 @> $1)"},
|
||||||
|
values: []interface{}{database.StringArray{"val1"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not",
|
||||||
|
args: args{
|
||||||
|
conds: []handler.Condition{
|
||||||
|
Not(handler.NewCond("col1", "val1")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: want{
|
||||||
|
wheres: []string{"(NOT (col1 = $1))"},
|
||||||
|
values: []interface{}{"val1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/zitadel/zitadel/internal/api/authz"
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/pseudo"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -49,19 +50,20 @@ type NowFunc func() time.Time
|
|||||||
|
|
||||||
type ProjectionHandler struct {
|
type ProjectionHandler struct {
|
||||||
Handler
|
Handler
|
||||||
ProjectionName string
|
ProjectionName string
|
||||||
reduce Reduce
|
reduce Reduce
|
||||||
update Update
|
update Update
|
||||||
searchQuery SearchQuery
|
searchQuery SearchQuery
|
||||||
triggerProjection *time.Timer
|
triggerProjection *time.Timer
|
||||||
lock Lock
|
lock Lock
|
||||||
unlock Unlock
|
unlock Unlock
|
||||||
requeueAfter time.Duration
|
requeueAfter time.Duration
|
||||||
retryFailedAfter time.Duration
|
retryFailedAfter time.Duration
|
||||||
retries int
|
retries int
|
||||||
concurrentInstances int
|
concurrentInstances int
|
||||||
handleActiveInstances time.Duration
|
handleActiveInstances time.Duration
|
||||||
nowFunc NowFunc
|
nowFunc NowFunc
|
||||||
|
reduceScheduledPseudoEvent bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProjectionHandler(
|
func NewProjectionHandler(
|
||||||
@ -73,32 +75,35 @@ func NewProjectionHandler(
|
|||||||
lock Lock,
|
lock Lock,
|
||||||
unlock Unlock,
|
unlock Unlock,
|
||||||
initialized <-chan bool,
|
initialized <-chan bool,
|
||||||
|
reduceScheduledPseudoEvent bool,
|
||||||
) *ProjectionHandler {
|
) *ProjectionHandler {
|
||||||
concurrentInstances := int(config.ConcurrentInstances)
|
concurrentInstances := int(config.ConcurrentInstances)
|
||||||
if concurrentInstances < 1 {
|
if concurrentInstances < 1 {
|
||||||
concurrentInstances = 1
|
concurrentInstances = 1
|
||||||
}
|
}
|
||||||
h := &ProjectionHandler{
|
h := &ProjectionHandler{
|
||||||
Handler: NewHandler(config.HandlerConfig),
|
Handler: NewHandler(config.HandlerConfig),
|
||||||
ProjectionName: config.ProjectionName,
|
ProjectionName: config.ProjectionName,
|
||||||
reduce: reduce,
|
reduce: reduce,
|
||||||
update: update,
|
update: update,
|
||||||
searchQuery: query,
|
searchQuery: query,
|
||||||
lock: lock,
|
lock: lock,
|
||||||
unlock: unlock,
|
unlock: unlock,
|
||||||
requeueAfter: config.RequeueEvery,
|
requeueAfter: config.RequeueEvery,
|
||||||
triggerProjection: time.NewTimer(0), // first trigger is instant on startup
|
triggerProjection: time.NewTimer(0), // first trigger is instant on startup
|
||||||
retryFailedAfter: config.RetryFailedAfter,
|
retryFailedAfter: config.RetryFailedAfter,
|
||||||
retries: int(config.Retries),
|
retries: int(config.Retries),
|
||||||
concurrentInstances: concurrentInstances,
|
concurrentInstances: concurrentInstances,
|
||||||
handleActiveInstances: config.HandleActiveInstances,
|
handleActiveInstances: config.HandleActiveInstances,
|
||||||
nowFunc: time.Now,
|
nowFunc: time.Now,
|
||||||
|
reduceScheduledPseudoEvent: reduceScheduledPseudoEvent,
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-initialized
|
<-initialized
|
||||||
go h.subscribe(ctx)
|
if !h.reduceScheduledPseudoEvent {
|
||||||
|
go h.subscribe(ctx)
|
||||||
|
}
|
||||||
go h.schedule(ctx)
|
go h.schedule(ctx)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -158,6 +163,13 @@ func (h *ProjectionHandler) Process(ctx context.Context, events ...eventstore.Ev
|
|||||||
|
|
||||||
// FetchEvents checks the current sequences and filters for newer events
|
// FetchEvents checks the current sequences and filters for newer events
|
||||||
func (h *ProjectionHandler) FetchEvents(ctx context.Context, instances ...string) ([]eventstore.Event, bool, error) {
|
func (h *ProjectionHandler) FetchEvents(ctx context.Context, instances ...string) ([]eventstore.Event, bool, error) {
|
||||||
|
if h.reduceScheduledPseudoEvent {
|
||||||
|
return h.fetchPseudoEvents(ctx, instances...)
|
||||||
|
}
|
||||||
|
return h.fetchDBEvents(ctx, instances...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ProjectionHandler) fetchDBEvents(ctx context.Context, instances ...string) ([]eventstore.Event, bool, error) {
|
||||||
eventQuery, eventsLimit, err := h.searchQuery(ctx, instances)
|
eventQuery, eventsLimit, err := h.searchQuery(ctx, instances)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
@ -169,6 +181,10 @@ func (h *ProjectionHandler) FetchEvents(ctx context.Context, instances ...string
|
|||||||
return events, int(eventsLimit) == len(events), err
|
return events, int(eventsLimit) == len(events), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *ProjectionHandler) fetchPseudoEvents(ctx context.Context, instances ...string) ([]eventstore.Event, bool, error) {
|
||||||
|
return []eventstore.Event{pseudo.NewScheduledEvent(ctx, time.Now(), instances...)}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (h *ProjectionHandler) subscribe(ctx context.Context) {
|
func (h *ProjectionHandler) subscribe(ctx context.Context) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -342,6 +342,7 @@ func TestProjectionHandler_Process(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
index, err := h.Process(tt.args.ctx, tt.args.events...)
|
index, err := h.Process(tt.args.ctx, tt.args.events...)
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/zitadel/logging"
|
"github.com/zitadel/logging"
|
||||||
|
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
@ -62,11 +61,18 @@ func NewJSONCol(name string, value interface{}) Column {
|
|||||||
return NewCol(name, marshalled)
|
return NewCol(name, marshalled)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Condition Column
|
type Condition func(param string) (string, interface{})
|
||||||
|
|
||||||
|
type NamespacedCondition func(namespace string) Condition
|
||||||
|
|
||||||
func NewCond(name string, value interface{}) Condition {
|
func NewCond(name string, value interface{}) Condition {
|
||||||
return Condition{
|
return func(param string) (string, interface{}) {
|
||||||
Name: name,
|
return name + " = " + param, value
|
||||||
Value: value,
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNamespacedCondition(name string, value interface{}) NamespacedCondition {
|
||||||
|
return func(namespace string) Condition {
|
||||||
|
return NewCond(namespace+"."+name, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
mgmt "github.com/zitadel/zitadel/pkg/grpc/management"
|
mgmt "github.com/zitadel/zitadel/pkg/grpc/management"
|
||||||
object "github.com/zitadel/zitadel/pkg/grpc/object/v2alpha"
|
object "github.com/zitadel/zitadel/pkg/grpc/object/v2alpha"
|
||||||
session "github.com/zitadel/zitadel/pkg/grpc/session/v2alpha"
|
session "github.com/zitadel/zitadel/pkg/grpc/session/v2alpha"
|
||||||
|
"github.com/zitadel/zitadel/pkg/grpc/system"
|
||||||
user "github.com/zitadel/zitadel/pkg/grpc/user/v2alpha"
|
user "github.com/zitadel/zitadel/pkg/grpc/user/v2alpha"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,6 +30,7 @@ type Client struct {
|
|||||||
Mgmt mgmt.ManagementServiceClient
|
Mgmt mgmt.ManagementServiceClient
|
||||||
UserV2 user.UserServiceClient
|
UserV2 user.UserServiceClient
|
||||||
SessionV2 session.SessionServiceClient
|
SessionV2 session.SessionServiceClient
|
||||||
|
System system.SystemServiceClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(cc *grpc.ClientConn) Client {
|
func newClient(cc *grpc.ClientConn) Client {
|
||||||
@ -38,9 +40,36 @@ func newClient(cc *grpc.ClientConn) Client {
|
|||||||
Mgmt: mgmt.NewManagementServiceClient(cc),
|
Mgmt: mgmt.NewManagementServiceClient(cc),
|
||||||
UserV2: user.NewUserServiceClient(cc),
|
UserV2: user.NewUserServiceClient(cc),
|
||||||
SessionV2: session.NewSessionServiceClient(cc),
|
SessionV2: session.NewSessionServiceClient(cc),
|
||||||
|
System: system.NewSystemServiceClient(cc),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tester) UseIsolatedInstance(iamOwnerCtx, systemCtx context.Context) (primaryDomain, instanceId string, authenticatedIamOwnerCtx context.Context) {
|
||||||
|
primaryDomain = randString(5) + ".integration"
|
||||||
|
instance, err := t.Client.System.CreateInstance(systemCtx, &system.CreateInstanceRequest{
|
||||||
|
InstanceName: "testinstance",
|
||||||
|
CustomDomain: primaryDomain,
|
||||||
|
Owner: &system.CreateInstanceRequest_Machine_{
|
||||||
|
Machine: &system.CreateInstanceRequest_Machine{
|
||||||
|
UserName: "owner",
|
||||||
|
Name: "owner",
|
||||||
|
PersonalAccessToken: &system.CreateInstanceRequest_PersonalAccessToken{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
t.createClientConn(iamOwnerCtx, grpc.WithAuthority(primaryDomain))
|
||||||
|
instanceId = instance.GetInstanceId()
|
||||||
|
t.Users[instanceId] = map[UserType]User{
|
||||||
|
IAMOwner: {
|
||||||
|
Token: instance.GetPat(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return primaryDomain, instanceId, t.WithInstanceAuthorization(iamOwnerCtx, IAMOwner, instanceId)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Tester) CreateHumanUser(ctx context.Context) *user.AddHumanUserResponse {
|
func (s *Tester) CreateHumanUser(ctx context.Context) *user.AddHumanUserResponse {
|
||||||
resp, err := s.Client.UserV2.AddHumanUser(ctx, &user.AddHumanUserRequest{
|
resp, err := s.Client.UserV2.AddHumanUser(ctx, &user.AddHumanUserRequest{
|
||||||
Organisation: &object.Organisation{
|
Organisation: &object.Organisation{
|
||||||
|
27
internal/integration/config/system-user-key.pem
Normal file
27
internal/integration/config/system-user-key.pem
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEowIBAAKCAQEAzi+FFSJL7f5yw4KTwzgMP34ePGycm/M+kT0M7V4Cgx5V3EaD
|
||||||
|
IvTQKTLfBaEB45zb9LtjIXzDw0rXRoS2hO6th+CYQCz3KCvh09C0IzxZiB2IS3H/
|
||||||
|
aT+5Bx9EFY+vnAkZjccbyG5YNRvmtOlnvIeIH7qZ0tEwkPfF5GEZNPJPtmy3UGV7
|
||||||
|
iofdVQS1xRj73+aMw5rvH4D8IdyiAC3VekIbpt0Vj0SUX3DwKtog337BzTiPk3aX
|
||||||
|
RF0sbFhQoqdJRI8NqgZjCwjq9yfI5tyxYswn+JGzHGdHvW3idODlmwEt5K2pasiR
|
||||||
|
IWK2OGfq+w0EcltQHabuqEPgZlmhCkRdNfixBwIDAQABAoIBAA9jNoBkRdxmH/R9
|
||||||
|
Wz+3gBqA9Aq4ZFuzJJk8QCm62V8ltWyyCnliYeKhPEm0QWrWOwghr/1AzW9Wt4g4
|
||||||
|
wVJcabD5TwODF5L0626eZcM3bsscwR44TMJzEgD5EWC2j3mKqFCPaoBj08tq4KXh
|
||||||
|
wW8tgjgz+eTk3cYD583qfTIZX1+SzSMBpetTBsssQtGhhOB/xPiuL7hi+fXmV2rh
|
||||||
|
8mc9X6+wJ5u3zepsyK0vBeEDmurD4ZUIXFrZ0WCB/wNkSW9VKyoH+RC1asQAgqTz
|
||||||
|
glJ/NPbDJSKGvSBQydoKkqoXx7MVJ8VObFddfgo4dtOoz6YCfUVBHt8qy+E5rz5y
|
||||||
|
CICjL/kCgYEA9MnHntVVKNXtEFZPo02xgCwS3eG27ZwjYgJ1ZkCHM5BuL4MS7qbr
|
||||||
|
743/POs1Ctaok0udHl1PFB4uAG0URnmkUnWzcoJYb6Plv03F0LRdsnfuhehfIxLP
|
||||||
|
nWvxSm5n21H4ytfxm0BWY09JkLDnJZtXrgTILbuqb9Wy6TmAvUaF2YUCgYEA16Ec
|
||||||
|
ywSaLVdqPaVpsTxi7XpRJAB2Isjp6RffNEecta4S0LL7s/IO3QXDH9SYpgmgCTah
|
||||||
|
3aXhpT4hIFlpg3eBjVfbOwgqub8DgirnSQyQt99edUtHIK+K8nMdGxz6X6pfTKzK
|
||||||
|
asSH7qPlt5tz1621vC0ocXSZR7zm99/FgwILwBsCgYBOsP8nJFV4By1qbxSy3qsN
|
||||||
|
FR4LjiAMSoFlZHzxHhVYkjmZtH1FkwuNuwwuPT6T+WW/1DLyK/Tb9se7A1XdQgV9
|
||||||
|
LLE/Qn/Dg+C7mvjYmuL0GHHpQkYzNDzh0m2DC/L/Il7kdn8I9anPyxFPHk9wW3vY
|
||||||
|
SVlAum+T/BLDvuSP9DfbMQKBgCc1j7PG8XYfOB1fj7l/volqPYjrYI/wssAE7Dxo
|
||||||
|
bTGIJrm2YhiVgmhkXNfT47IFfAlQ2twgBsjyZDmqqIoUWAVonV+9m29NMYkg3g+l
|
||||||
|
bkdRIa74ckWaRgzSK8+7VDfDFjMuFFyXwhP9z460gLsORkaie4Et75Vg3yrhkNvC
|
||||||
|
qnpTAoGBAMguDSWBbCewXnHlKGFpm+LH+OIvVKGEhtCSvfZojtNrg/JBeBebSL1n
|
||||||
|
mmT1cONO+0O5bz7uVaRd3JdnH2JFevY698zFfhVsjVCrm+fz31i5cxAgC39G2Lfl
|
||||||
|
YkTaa1AFLstnf348ZjuvBN3USUYZo3X3mxnS+uluVuRSGwIKsN0a
|
||||||
|
-----END RSA PRIVATE KEY-----
|
@ -4,6 +4,16 @@ Log:
|
|||||||
TLS:
|
TLS:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
||||||
|
Telemetry:
|
||||||
|
Enabled: true
|
||||||
|
Endpoints:
|
||||||
|
- http://localhost:8081
|
||||||
|
Headers:
|
||||||
|
single-value: "single-value"
|
||||||
|
multi-value:
|
||||||
|
- "multi-value-1"
|
||||||
|
- "multi-value-2"
|
||||||
|
|
||||||
FirstInstance:
|
FirstInstance:
|
||||||
Org:
|
Org:
|
||||||
Human:
|
Human:
|
||||||
@ -31,7 +41,13 @@ Projections:
|
|||||||
Customizations:
|
Customizations:
|
||||||
NotificationsQuotas:
|
NotificationsQuotas:
|
||||||
RequeueEvery: 1s
|
RequeueEvery: 1s
|
||||||
|
Telemetry:
|
||||||
|
RequeueEvery: 5s
|
||||||
|
|
||||||
DefaultInstance:
|
DefaultInstance:
|
||||||
LoginPolicy:
|
LoginPolicy:
|
||||||
MfaInitSkipLifetime: "0"
|
MfaInitSkipLifetime: "0"
|
||||||
|
|
||||||
|
SystemAPIUsers:
|
||||||
|
- tester:
|
||||||
|
KeyData: "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF6aStGRlNKTDdmNXl3NEtUd3pnTQpQMzRlUEd5Y20vTStrVDBNN1Y0Q2d4NVYzRWFESXZUUUtUTGZCYUVCNDV6YjlMdGpJWHpEdzByWFJvUzJoTzZ0CmgrQ1lRQ3ozS0N2aDA5QzBJenhaaUIySVMzSC9hVCs1Qng5RUZZK3ZuQWtaamNjYnlHNVlOUnZtdE9sbnZJZUkKSDdxWjB0RXdrUGZGNUdFWk5QSlB0bXkzVUdWN2lvZmRWUVMxeFJqNzMrYU13NXJ2SDREOElkeWlBQzNWZWtJYgpwdDBWajBTVVgzRHdLdG9nMzM3QnpUaVBrM2FYUkYwc2JGaFFvcWRKUkk4TnFnWmpDd2pxOXlmSTV0eXhZc3duCitKR3pIR2RIdlczaWRPRGxtd0V0NUsycGFzaVJJV0syT0dmcSt3MEVjbHRRSGFidXFFUGdabG1oQ2tSZE5maXgKQndJREFRQUIKLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg=="
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/zitadel/logging"
|
"github.com/zitadel/logging"
|
||||||
|
"github.com/zitadel/oidc/v2/pkg/client"
|
||||||
"github.com/zitadel/oidc/v2/pkg/oidc"
|
"github.com/zitadel/oidc/v2/pkg/oidc"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
@ -23,6 +24,7 @@ import (
|
|||||||
"github.com/zitadel/zitadel/cmd"
|
"github.com/zitadel/zitadel/cmd"
|
||||||
"github.com/zitadel/zitadel/cmd/start"
|
"github.com/zitadel/zitadel/cmd/start"
|
||||||
"github.com/zitadel/zitadel/internal/api/authz"
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
|
http_util "github.com/zitadel/zitadel/internal/api/http"
|
||||||
z_oidc "github.com/zitadel/zitadel/internal/api/oidc"
|
z_oidc "github.com/zitadel/zitadel/internal/api/oidc"
|
||||||
"github.com/zitadel/zitadel/internal/command"
|
"github.com/zitadel/zitadel/internal/command"
|
||||||
"github.com/zitadel/zitadel/internal/domain"
|
"github.com/zitadel/zitadel/internal/domain"
|
||||||
@ -40,6 +42,8 @@ var (
|
|||||||
cockroachYAML []byte
|
cockroachYAML []byte
|
||||||
//go:embed config/postgres.yaml
|
//go:embed config/postgres.yaml
|
||||||
postgresYAML []byte
|
postgresYAML []byte
|
||||||
|
//go:embed config/system-user-key.pem
|
||||||
|
systemUserKey []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
// UserType provides constants that give
|
// UserType provides constants that give
|
||||||
@ -53,6 +57,12 @@ type UserType int
|
|||||||
const (
|
const (
|
||||||
Unspecified UserType = iota
|
Unspecified UserType = iota
|
||||||
OrgOwner
|
OrgOwner
|
||||||
|
IAMOwner
|
||||||
|
SystemUser // SystemUser is a user with access to the system service.
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FirstInstanceUsersKey = "first"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User information with a Personal Access Token.
|
// User information with a Personal Access Token.
|
||||||
@ -67,7 +77,7 @@ type Tester struct {
|
|||||||
|
|
||||||
Instance authz.Instance
|
Instance authz.Instance
|
||||||
Organisation *query.Org
|
Organisation *query.Org
|
||||||
Users map[UserType]User
|
Users map[string]map[UserType]User
|
||||||
|
|
||||||
Client Client
|
Client Client
|
||||||
WebAuthN *webauthn.Client
|
WebAuthN *webauthn.Client
|
||||||
@ -80,11 +90,12 @@ func (s *Tester) Host() string {
|
|||||||
return fmt.Sprintf("%s:%d", s.Config.ExternalDomain, s.Config.Port)
|
return fmt.Sprintf("%s:%d", s.Config.ExternalDomain, s.Config.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Tester) createClientConn(ctx context.Context) {
|
func (s *Tester) createClientConn(ctx context.Context, opts ...grpc.DialOption) {
|
||||||
target := s.Host()
|
target := s.Host()
|
||||||
cc, err := grpc.DialContext(ctx, target,
|
cc, err := grpc.DialContext(ctx, target, append(opts,
|
||||||
grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
grpc.WithBlock(),
|
||||||
)
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
|
)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Shutdown <- os.Interrupt
|
s.Shutdown <- os.Interrupt
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
@ -124,10 +135,10 @@ func (s *Tester) pollHealth(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
SystemUser = "integration"
|
MachineUser = "integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Tester) createSystemUser(ctx context.Context) {
|
func (s *Tester) createMachineUser(ctx context.Context, instanceId string) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
s.Instance, err = s.Queries.InstanceByHost(ctx, s.Host())
|
s.Instance, err = s.Queries.InstanceByHost(ctx, s.Host())
|
||||||
@ -137,7 +148,7 @@ func (s *Tester) createSystemUser(ctx context.Context) {
|
|||||||
s.Organisation, err = s.Queries.OrgByID(ctx, true, s.Instance.DefaultOrganisationID())
|
s.Organisation, err = s.Queries.OrgByID(ctx, true, s.Instance.DefaultOrganisationID())
|
||||||
logging.OnError(err).Fatal("query organisation")
|
logging.OnError(err).Fatal("query organisation")
|
||||||
|
|
||||||
query, err := query.NewUserUsernameSearchQuery(SystemUser, query.TextEquals)
|
query, err := query.NewUserUsernameSearchQuery(MachineUser, query.TextEquals)
|
||||||
logging.OnError(err).Fatal("user query")
|
logging.OnError(err).Fatal("user query")
|
||||||
user, err := s.Queries.GetUser(ctx, true, true, query)
|
user, err := s.Queries.GetUser(ctx, true, true, query)
|
||||||
|
|
||||||
@ -146,8 +157,8 @@ func (s *Tester) createSystemUser(ctx context.Context) {
|
|||||||
ObjectRoot: models.ObjectRoot{
|
ObjectRoot: models.ObjectRoot{
|
||||||
ResourceOwner: s.Organisation.ID,
|
ResourceOwner: s.Organisation.ID,
|
||||||
},
|
},
|
||||||
Username: SystemUser,
|
Username: MachineUser,
|
||||||
Name: SystemUser,
|
Name: MachineUser,
|
||||||
Description: "who cares?",
|
Description: "who cares?",
|
||||||
AccessTokenType: domain.OIDCTokenTypeJWT,
|
AccessTokenType: domain.OIDCTokenTypeJWT,
|
||||||
})
|
})
|
||||||
@ -168,16 +179,43 @@ func (s *Tester) createSystemUser(ctx context.Context) {
|
|||||||
_, err = s.Commands.AddPersonalAccessToken(ctx, pat)
|
_, err = s.Commands.AddPersonalAccessToken(ctx, pat)
|
||||||
logging.OnError(err).Fatal("add pat")
|
logging.OnError(err).Fatal("add pat")
|
||||||
|
|
||||||
s.Users = map[UserType]User{
|
if s.Users == nil {
|
||||||
OrgOwner: {
|
s.Users = make(map[string]map[UserType]User)
|
||||||
User: user,
|
}
|
||||||
Token: pat.Token,
|
if s.Users[instanceId] == nil {
|
||||||
},
|
s.Users[instanceId] = make(map[UserType]User)
|
||||||
|
}
|
||||||
|
s.Users[instanceId][OrgOwner] = User{
|
||||||
|
User: user,
|
||||||
|
Token: pat.Token,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Tester) WithSystemAuthorization(ctx context.Context, u UserType) context.Context {
|
func (s *Tester) WithAuthorization(ctx context.Context, u UserType) context.Context {
|
||||||
return metadata.AppendToOutgoingContext(ctx, "Authorization", fmt.Sprintf("Bearer %s", s.Users[u].Token))
|
return s.WithInstanceAuthorization(ctx, u, FirstInstanceUsersKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Tester) WithInstanceAuthorization(ctx context.Context, u UserType, instanceID string) context.Context {
|
||||||
|
if u == SystemUser {
|
||||||
|
s.ensureSystemUser()
|
||||||
|
}
|
||||||
|
return metadata.AppendToOutgoingContext(ctx, "Authorization", fmt.Sprintf("Bearer %s", s.Users[instanceID][u].Token))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Tester) ensureSystemUser() {
|
||||||
|
const ISSUER = "tester"
|
||||||
|
if s.Users[FirstInstanceUsersKey] == nil {
|
||||||
|
s.Users[FirstInstanceUsersKey] = make(map[UserType]User)
|
||||||
|
}
|
||||||
|
if _, ok := s.Users[FirstInstanceUsersKey][SystemUser]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
audience := http_util.BuildOrigin(s.Host(), s.Server.Config.ExternalSecure)
|
||||||
|
signer, err := client.NewSignerFromPrivateKeyByte(systemUserKey, "")
|
||||||
|
logging.OnError(err).Fatal("system key signer")
|
||||||
|
jwt, err := client.SignedJWTProfileAssertion(ISSUER, []string{audience}, time.Hour, signer)
|
||||||
|
logging.OnError(err).Fatal("system key jwt")
|
||||||
|
s.Users[FirstInstanceUsersKey][SystemUser] = User{Token: jwt}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done send an interrupt signal to cleanly shutdown the server.
|
// Done send an interrupt signal to cleanly shutdown the server.
|
||||||
@ -224,7 +262,11 @@ func NewTester(ctx context.Context) *Tester {
|
|||||||
}
|
}
|
||||||
logging.OnError(err).Fatal()
|
logging.OnError(err).Fatal()
|
||||||
|
|
||||||
tester := new(Tester)
|
tester := Tester{
|
||||||
|
Users: map[string]map[UserType]User{
|
||||||
|
FirstInstanceUsersKey: make(map[UserType]User),
|
||||||
|
},
|
||||||
|
}
|
||||||
tester.wg.Add(1)
|
tester.wg.Add(1)
|
||||||
go func(wg *sync.WaitGroup) {
|
go func(wg *sync.WaitGroup) {
|
||||||
logging.OnError(cmd.Execute()).Fatal()
|
logging.OnError(cmd.Execute()).Fatal()
|
||||||
@ -237,10 +279,10 @@ func NewTester(ctx context.Context) *Tester {
|
|||||||
logging.OnError(ctx.Err()).Fatal("waiting for integration tester server")
|
logging.OnError(ctx.Err()).Fatal("waiting for integration tester server")
|
||||||
}
|
}
|
||||||
tester.createClientConn(ctx)
|
tester.createClientConn(ctx)
|
||||||
tester.createSystemUser(ctx)
|
tester.createMachineUser(ctx, FirstInstanceUsersKey)
|
||||||
tester.WebAuthN = webauthn.NewClient(tester.Config.WebAuthNName, tester.Config.ExternalDomain, "https://"+tester.Host())
|
tester.WebAuthN = webauthn.NewClient(tester.Config.WebAuthNName, tester.Config.ExternalDomain, "https://"+tester.Host())
|
||||||
|
|
||||||
return tester
|
return &tester
|
||||||
}
|
}
|
||||||
|
|
||||||
func Contexts(timeout time.Duration) (ctx, errCtx context.Context, cancel context.CancelFunc) {
|
func Contexts(timeout time.Duration) (ctx, errCtx context.Context, cancel context.CancelFunc) {
|
||||||
|
20
internal/integration/rand.go
Normal file
20
internal/integration/rand.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
|
||||||
|
|
||||||
|
func randString(n int) string {
|
||||||
|
b := make([]rune, n)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
@ -21,10 +21,8 @@ func InitChannel(ctx context.Context, cfg Config) (channels.NotificationChannel,
|
|||||||
|
|
||||||
logging.Debug("successfully initialized webhook json channel")
|
logging.Debug("successfully initialized webhook json channel")
|
||||||
return channels.HandleMessageFunc(func(message channels.Message) error {
|
return channels.HandleMessageFunc(func(message channels.Message) error {
|
||||||
|
|
||||||
requestCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
requestCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
msg, ok := message.(*messages.JSON)
|
msg, ok := message.(*messages.JSON)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.ThrowInternal(nil, "WEBH-K686U", "message is not JSON")
|
return errors.ThrowInternal(nil, "WEBH-K686U", "message is not JSON")
|
||||||
@ -33,27 +31,24 @@ func InitChannel(ctx context.Context, cfg Config) (channels.NotificationChannel,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(requestCtx, cfg.Method, cfg.CallURL, strings.NewReader(payload))
|
req, err := http.NewRequestWithContext(requestCtx, cfg.Method, cfg.CallURL, strings.NewReader(payload))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if cfg.Headers != nil {
|
||||||
|
req.Header = cfg.Headers
|
||||||
|
}
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = resp.Body.Close(); err != nil {
|
if err = resp.Body.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
return errors.ThrowUnknown(fmt.Errorf("calling url %s returned %s", cfg.CallURL, resp.Status), "WEBH-LBxU0", "webhook didn't return a success status")
|
return errors.ThrowUnknown(fmt.Errorf("calling url %s returned %s", cfg.CallURL, resp.Status), "WEBH-LBxU0", "webhook didn't return a success status")
|
||||||
}
|
}
|
||||||
|
|
||||||
logging.WithFields("calling_url", cfg.CallURL, "method", cfg.Method).Debug("webhook called")
|
logging.WithFields("calling_url", cfg.CallURL, "method", cfg.Method).Debug("webhook called")
|
||||||
return nil
|
return nil
|
||||||
}), nil
|
}), nil
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
package webhook
|
package webhook
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
CallURL string
|
CallURL string
|
||||||
Method string
|
Method string
|
||||||
|
Headers http.Header
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Config) Validate() error {
|
func (w *Config) Validate() error {
|
||||||
|
@ -4,16 +4,15 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
"github.com/zitadel/zitadel/internal/repository/user"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (n *NotificationQueries) IsAlreadyHandled(ctx context.Context, event eventstore.Event, data map[string]interface{}, eventTypes ...eventstore.EventType) (bool, error) {
|
func (n *NotificationQueries) IsAlreadyHandled(ctx context.Context, event eventstore.Event, data map[string]interface{}, aggregateType eventstore.AggregateType, eventTypes ...eventstore.EventType) (bool, error) {
|
||||||
events, err := n.es.Filter(
|
events, err := n.es.Filter(
|
||||||
ctx,
|
ctx,
|
||||||
eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||||
InstanceID(event.Aggregate().InstanceID).
|
InstanceID(event.Aggregate().InstanceID).
|
||||||
AddQuery().
|
AddQuery().
|
||||||
AggregateTypes(user.AggregateType).
|
AggregateTypes(aggregateType).
|
||||||
AggregateIDs(event.Aggregate().ID).
|
AggregateIDs(event.Aggregate().ID).
|
||||||
SequenceGreater(event.Sequence()).
|
SequenceGreater(event.Sequence()).
|
||||||
EventTypes(eventTypes...).
|
EventTypes(eventTypes...).
|
||||||
|
30
internal/notification/handlers/handlers_integration_test.go
Normal file
30
internal/notification/handlers/handlers_integration_test.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package handlers_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/integration"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CTX context.Context
|
||||||
|
SystemCTX context.Context
|
||||||
|
Tester *integration.Tester
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
os.Exit(func() int {
|
||||||
|
ctx, _, cancel := integration.Contexts(5 * time.Minute)
|
||||||
|
CTX = ctx
|
||||||
|
defer cancel()
|
||||||
|
Tester = integration.NewTester(ctx)
|
||||||
|
SystemCTX = Tester.WithAuthorization(ctx, integration.SystemUser)
|
||||||
|
defer Tester.Done()
|
||||||
|
return m.Run()
|
||||||
|
}())
|
||||||
|
}
|
@ -68,7 +68,7 @@ func (u *quotaNotifier) reduceNotificationDue(event eventstore.Event) (*handler.
|
|||||||
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-DLxdE", "reduce.wrong.event.type %s", quota.NotificationDueEventType)
|
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-DLxdE", "reduce.wrong.event.type %s", quota.NotificationDueEventType)
|
||||||
}
|
}
|
||||||
ctx := HandlerContext(event.Aggregate())
|
ctx := HandlerContext(event.Aggregate())
|
||||||
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, map[string]interface{}{"dueEventID": e.ID}, quota.NotifiedEventType)
|
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, map[string]interface{}{"dueEventID": e.ID}, quota.AggregateType, quota.NotifiedEventType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
150
internal/notification/handlers/telemetry_pusher.go
Normal file
150
internal/notification/handlers/telemetry_pusher.go
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/zitadel/logging"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
|
"github.com/zitadel/zitadel/internal/api/call"
|
||||||
|
"github.com/zitadel/zitadel/internal/command"
|
||||||
|
"github.com/zitadel/zitadel/internal/errors"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/handler/crdb"
|
||||||
|
"github.com/zitadel/zitadel/internal/notification/channels/webhook"
|
||||||
|
_ "github.com/zitadel/zitadel/internal/notification/statik"
|
||||||
|
"github.com/zitadel/zitadel/internal/notification/types"
|
||||||
|
"github.com/zitadel/zitadel/internal/query"
|
||||||
|
"github.com/zitadel/zitadel/internal/query/projection"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/pseudo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TelemetryProjectionTable = "projections.telemetry"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TelemetryPusherConfig struct {
|
||||||
|
Enabled bool
|
||||||
|
Endpoints []string
|
||||||
|
Headers http.Header
|
||||||
|
Limit uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type telemetryPusher struct {
|
||||||
|
crdb.StatementHandler
|
||||||
|
cfg TelemetryPusherConfig
|
||||||
|
commands *command.Commands
|
||||||
|
queries *NotificationQueries
|
||||||
|
metricSuccessfulDeliveriesJSON string
|
||||||
|
metricFailedDeliveriesJSON string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTelemetryPusher(
|
||||||
|
ctx context.Context,
|
||||||
|
telemetryCfg TelemetryPusherConfig,
|
||||||
|
handlerCfg crdb.StatementHandlerConfig,
|
||||||
|
commands *command.Commands,
|
||||||
|
queries *NotificationQueries,
|
||||||
|
metricSuccessfulDeliveriesJSON,
|
||||||
|
metricFailedDeliveriesJSON string,
|
||||||
|
) *telemetryPusher {
|
||||||
|
p := new(telemetryPusher)
|
||||||
|
handlerCfg.ProjectionName = TelemetryProjectionTable
|
||||||
|
handlerCfg.Reducers = p.reducers()
|
||||||
|
p.cfg = telemetryCfg
|
||||||
|
p.StatementHandler = crdb.NewStatementHandler(ctx, handlerCfg)
|
||||||
|
p.commands = commands
|
||||||
|
p.queries = queries
|
||||||
|
p.metricSuccessfulDeliveriesJSON = metricSuccessfulDeliveriesJSON
|
||||||
|
p.metricFailedDeliveriesJSON = metricFailedDeliveriesJSON
|
||||||
|
projection.TelemetryPusherProjection = p
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *telemetryPusher) reducers() []handler.AggregateReducer {
|
||||||
|
return []handler.AggregateReducer{{
|
||||||
|
Aggregate: pseudo.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{{
|
||||||
|
Event: pseudo.ScheduledEventType,
|
||||||
|
Reduce: t.pushMilestones,
|
||||||
|
}},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *telemetryPusher) pushMilestones(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
ctx := call.WithTimestamp(context.Background())
|
||||||
|
scheduledEvent, ok := event.(*pseudo.ScheduledEvent)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-lDTs5", "reduce.wrong.event.type %s", event.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
isReached, err := query.NewNotNullQuery(query.MilestoneReachedDateColID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
isNotPushed, err := query.NewIsNullQuery(query.MilestonePushedDateColID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hasPrimaryDomain, err := query.NewNotNullQuery(query.MilestonePrimaryDomainColID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
unpushedMilestones, err := t.queries.Queries.SearchMilestones(ctx, scheduledEvent.InstanceIDs, &query.MilestonesSearchQueries{
|
||||||
|
SearchRequest: query.SearchRequest{
|
||||||
|
Limit: t.cfg.Limit,
|
||||||
|
SortingColumn: query.MilestoneReachedDateColID,
|
||||||
|
Asc: true,
|
||||||
|
},
|
||||||
|
Queries: []query.SearchQuery{isReached, isNotPushed, hasPrimaryDomain},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var errs int
|
||||||
|
for _, ms := range unpushedMilestones.Milestones {
|
||||||
|
if err = t.pushMilestone(ctx, scheduledEvent, ms); err != nil {
|
||||||
|
errs++
|
||||||
|
logging.Warnf("pushing milestone %+v failed: %s", *ms, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errs > 0 {
|
||||||
|
return nil, fmt.Errorf("pushing %d of %d milestones failed", errs, unpushedMilestones.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
return crdb.NewNoOpStatement(scheduledEvent), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *telemetryPusher) pushMilestone(ctx context.Context, event *pseudo.ScheduledEvent, ms *query.Milestone) error {
|
||||||
|
ctx = authz.WithInstanceID(ctx, ms.InstanceID)
|
||||||
|
alreadyHandled, err := t.queries.IsAlreadyHandled(ctx, event, map[string]interface{}{"type": ms.Type.String()}, milestone.AggregateType, milestone.PushedEventType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if alreadyHandled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, endpoint := range t.cfg.Endpoints {
|
||||||
|
if err := types.SendJSON(
|
||||||
|
ctx,
|
||||||
|
webhook.Config{
|
||||||
|
CallURL: endpoint,
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Headers: t.cfg.Headers,
|
||||||
|
},
|
||||||
|
t.queries.GetFileSystemProvider,
|
||||||
|
t.queries.GetLogProvider,
|
||||||
|
ms,
|
||||||
|
event,
|
||||||
|
t.metricSuccessfulDeliveriesJSON,
|
||||||
|
t.metricFailedDeliveriesJSON,
|
||||||
|
).WithoutTemplate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t.commands.MilestonePushed(ctx, ms.Type, t.cfg.Endpoints, ms.PrimaryDomain)
|
||||||
|
}
|
@ -0,0 +1,89 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package handlers_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/pkg/grpc/management"
|
||||||
|
"github.com/zitadel/zitadel/pkg/grpc/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServer_TelemetryPushMilestones(t *testing.T) {
|
||||||
|
bodies := make(chan []byte, 0)
|
||||||
|
mockServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if r.Header.Get("single-value") != "single-value" {
|
||||||
|
t.Error("single-value header not set")
|
||||||
|
}
|
||||||
|
if reflect.DeepEqual(r.Header.Get("multi-value"), "multi-value-1,multi-value-2") {
|
||||||
|
t.Error("single-value header not set")
|
||||||
|
}
|
||||||
|
bodies <- body
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
listener, err := net.Listen("tcp", "localhost:8081")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mockServer.Listener = listener
|
||||||
|
mockServer.Start()
|
||||||
|
t.Cleanup(mockServer.Close)
|
||||||
|
primaryDomain, instanceID, iamOwnerCtx := Tester.UseIsolatedInstance(CTX, SystemCTX)
|
||||||
|
t.Log("testing against instance with primary domain", primaryDomain)
|
||||||
|
awaitMilestone(t, bodies, primaryDomain, "InstanceCreated")
|
||||||
|
project, err := Tester.Client.Mgmt.AddProject(iamOwnerCtx, &management.AddProjectRequest{Name: "integration"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
awaitMilestone(t, bodies, primaryDomain, "ProjectCreated")
|
||||||
|
if _, err = Tester.Client.Mgmt.AddOIDCApp(iamOwnerCtx, &management.AddOIDCAppRequest{
|
||||||
|
ProjectId: project.GetId(),
|
||||||
|
Name: "integration",
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
awaitMilestone(t, bodies, primaryDomain, "ApplicationCreated")
|
||||||
|
// TODO: trigger and await milestone AuthenticationSucceededOnInstance
|
||||||
|
// TODO: trigger and await milestone AuthenticationSucceededOnApplication
|
||||||
|
if _, err = Tester.Client.System.RemoveInstance(SystemCTX, &system.RemoveInstanceRequest{InstanceId: instanceID}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
awaitMilestone(t, bodies, primaryDomain, "InstanceDeleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func awaitMilestone(t *testing.T, bodies chan []byte, primaryDomain, expectMilestoneType string) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case body := <-bodies:
|
||||||
|
plain := new(bytes.Buffer)
|
||||||
|
if err := json.Indent(plain, body, "", " "); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Log("received milestone", plain.String())
|
||||||
|
milestone := struct {
|
||||||
|
Type string
|
||||||
|
PrimaryDomain string
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(body, &milestone); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if milestone.Type == expectMilestoneType && milestone.PrimaryDomain == primaryDomain {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-time.After(60 * time.Second):
|
||||||
|
t.Fatalf("timed out waiting for milestone %s in domain %s", expectMilestoneType, primaryDomain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -337,7 +337,7 @@ func (u *userNotifier) reduceDomainClaimed(event eventstore.Event) (*handler.Sta
|
|||||||
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-Drh5w", "reduce.wrong.event.type %s", user.UserDomainClaimedType)
|
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-Drh5w", "reduce.wrong.event.type %s", user.UserDomainClaimedType)
|
||||||
}
|
}
|
||||||
ctx := HandlerContext(event.Aggregate())
|
ctx := HandlerContext(event.Aggregate())
|
||||||
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, nil,
|
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, nil, user.AggregateType,
|
||||||
user.UserDomainClaimedType, user.UserDomainClaimedSentType)
|
user.UserDomainClaimedType, user.UserDomainClaimedSentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -465,7 +465,7 @@ func (u *userNotifier) reducePasswordChanged(event eventstore.Event) (*handler.S
|
|||||||
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-Yko2z8", "reduce.wrong.event.type %s", user.HumanPasswordChangedType)
|
return nil, errors.ThrowInvalidArgumentf(nil, "HANDL-Yko2z8", "reduce.wrong.event.type %s", user.HumanPasswordChangedType)
|
||||||
}
|
}
|
||||||
ctx := HandlerContext(event.Aggregate())
|
ctx := HandlerContext(event.Aggregate())
|
||||||
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, nil, user.HumanPasswordChangeSentType)
|
alreadyHandled, err := u.queries.IsAlreadyHandled(ctx, event, nil, user.AggregateType, user.HumanPasswordChangeSentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -594,5 +594,5 @@ func (u *userNotifier) checkIfCodeAlreadyHandledOrExpired(ctx context.Context, e
|
|||||||
if event.CreationDate().Add(expiry).Before(time.Now().UTC()) {
|
if event.CreationDate().Add(expiry).Before(time.Now().UTC()) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return u.queries.IsAlreadyHandled(ctx, event, data, eventTypes...)
|
return u.queries.IsAlreadyHandled(ctx, event, data, user.AggregateType, eventTypes...)
|
||||||
}
|
}
|
@ -29,6 +29,8 @@ func Start(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userHandlerCustomConfig projection.CustomConfig,
|
userHandlerCustomConfig projection.CustomConfig,
|
||||||
quotaHandlerCustomConfig projection.CustomConfig,
|
quotaHandlerCustomConfig projection.CustomConfig,
|
||||||
|
telemetryHandlerCustomConfig projection.CustomConfig,
|
||||||
|
telemetryCfg handlers.TelemetryPusherConfig,
|
||||||
externalPort uint16,
|
externalPort uint16,
|
||||||
externalSecure bool,
|
externalSecure bool,
|
||||||
commands *command.Commands,
|
commands *command.Commands,
|
||||||
@ -74,4 +76,15 @@ func Start(
|
|||||||
metricSuccessfulDeliveriesJSON,
|
metricSuccessfulDeliveriesJSON,
|
||||||
metricFailedDeliveriesJSON,
|
metricFailedDeliveriesJSON,
|
||||||
).Start()
|
).Start()
|
||||||
|
if telemetryCfg.Enabled {
|
||||||
|
handlers.NewTelemetryPusher(
|
||||||
|
ctx,
|
||||||
|
telemetryCfg,
|
||||||
|
projection.ApplyCustomConfig(telemetryHandlerCustomConfig),
|
||||||
|
commands,
|
||||||
|
q,
|
||||||
|
metricSuccessfulDeliveriesJSON,
|
||||||
|
metricFailedDeliveriesJSON,
|
||||||
|
).Start()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
146
internal/query/milestone.go
Normal file
146
internal/query/milestone.go
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
package query
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
sq "github.com/Masterminds/squirrel"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
|
"github.com/zitadel/zitadel/internal/api/call"
|
||||||
|
"github.com/zitadel/zitadel/internal/errors"
|
||||||
|
"github.com/zitadel/zitadel/internal/query/projection"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
|
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Milestones struct {
|
||||||
|
SearchResponse
|
||||||
|
Milestones []*Milestone
|
||||||
|
}
|
||||||
|
|
||||||
|
type Milestone struct {
|
||||||
|
InstanceID string
|
||||||
|
Type milestone.Type
|
||||||
|
ReachedDate time.Time
|
||||||
|
PushedDate time.Time
|
||||||
|
PrimaryDomain string
|
||||||
|
}
|
||||||
|
|
||||||
|
type MilestonesSearchQueries struct {
|
||||||
|
SearchRequest
|
||||||
|
Queries []SearchQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *MilestonesSearchQueries) toQuery(query sq.SelectBuilder) sq.SelectBuilder {
|
||||||
|
query = q.SearchRequest.toQuery(query)
|
||||||
|
for _, q := range q.Queries {
|
||||||
|
query = q.toQuery(query)
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
milestonesTable = table{
|
||||||
|
name: projection.MilestonesProjectionTable,
|
||||||
|
instanceIDCol: projection.MilestoneColumnInstanceID,
|
||||||
|
}
|
||||||
|
MilestoneInstanceIDColID = Column{
|
||||||
|
name: projection.MilestoneColumnInstanceID,
|
||||||
|
table: milestonesTable,
|
||||||
|
}
|
||||||
|
MilestoneTypeColID = Column{
|
||||||
|
name: projection.MilestoneColumnType,
|
||||||
|
table: milestonesTable,
|
||||||
|
}
|
||||||
|
MilestonePrimaryDomainColID = Column{
|
||||||
|
name: projection.MilestoneColumnPrimaryDomain,
|
||||||
|
table: milestonesTable,
|
||||||
|
}
|
||||||
|
MilestoneReachedDateColID = Column{
|
||||||
|
name: projection.MilestoneColumnReachedDate,
|
||||||
|
table: milestonesTable,
|
||||||
|
}
|
||||||
|
MilestonePushedDateColID = Column{
|
||||||
|
name: projection.MilestoneColumnPushedDate,
|
||||||
|
table: milestonesTable,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchMilestones tries to defer the instanceID from the passed context if no instanceIDs are passed
|
||||||
|
func (q *Queries) SearchMilestones(ctx context.Context, instanceIDs []string, queries *MilestonesSearchQueries) (_ *Milestones, err error) {
|
||||||
|
ctx, span := tracing.NewSpan(ctx)
|
||||||
|
defer func() { span.EndWithError(err) }()
|
||||||
|
query, scan := prepareMilestonesQuery(ctx, q.client)
|
||||||
|
if len(instanceIDs) == 0 {
|
||||||
|
instanceIDs = []string{authz.GetInstance(ctx).InstanceID()}
|
||||||
|
}
|
||||||
|
stmt, args, err := queries.toQuery(query).Where(sq.Eq{MilestoneInstanceIDColID.identifier(): instanceIDs}).ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.ThrowInternal(err, "QUERY-A9i5k", "Errors.Query.SQLStatement")
|
||||||
|
}
|
||||||
|
rows, err := q.client.QueryContext(ctx, stmt, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if closeErr := rows.Close(); closeErr != nil && err == nil {
|
||||||
|
err = errors.ThrowInternal(closeErr, "QUERY-CK9mI", "Errors.Query.CloseRows")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
milestones, err := scan(rows)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = rows.Err(); err != nil {
|
||||||
|
return nil, errors.ThrowInternal(err, "QUERY-asLsI", "Errors.Internal")
|
||||||
|
}
|
||||||
|
milestones.LatestSequence, err = q.latestSequence(ctx, milestonesTable)
|
||||||
|
return milestones, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareMilestonesQuery(ctx context.Context, db prepareDatabase) (sq.SelectBuilder, func(*sql.Rows) (*Milestones, error)) {
|
||||||
|
return sq.Select(
|
||||||
|
MilestoneInstanceIDColID.identifier(),
|
||||||
|
MilestonePrimaryDomainColID.identifier(),
|
||||||
|
MilestoneReachedDateColID.identifier(),
|
||||||
|
MilestonePushedDateColID.identifier(),
|
||||||
|
MilestoneTypeColID.identifier(),
|
||||||
|
countColumn.identifier(),
|
||||||
|
).
|
||||||
|
From(milestonesTable.identifier() + db.Timetravel(call.Took(ctx))).
|
||||||
|
PlaceholderFormat(sq.Dollar),
|
||||||
|
func(rows *sql.Rows) (*Milestones, error) {
|
||||||
|
milestones := make([]*Milestone, 0)
|
||||||
|
var count uint64
|
||||||
|
for rows.Next() {
|
||||||
|
m := new(Milestone)
|
||||||
|
reachedDate := sql.NullTime{}
|
||||||
|
pushedDate := sql.NullTime{}
|
||||||
|
primaryDomain := sql.NullString{}
|
||||||
|
err := rows.Scan(
|
||||||
|
&m.InstanceID,
|
||||||
|
&primaryDomain,
|
||||||
|
&reachedDate,
|
||||||
|
&pushedDate,
|
||||||
|
&m.Type,
|
||||||
|
&count,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.PrimaryDomain = primaryDomain.String
|
||||||
|
m.ReachedDate = reachedDate.Time
|
||||||
|
m.PushedDate = pushedDate.Time
|
||||||
|
milestones = append(milestones, m)
|
||||||
|
}
|
||||||
|
return &Milestones{
|
||||||
|
Milestones: milestones,
|
||||||
|
SearchResponse: SearchResponse{
|
||||||
|
Count: count,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
189
internal/query/milestone_test.go
Normal file
189
internal/query/milestone_test.go
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
package query
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
expectedMilestoneQuery = regexp.QuoteMeta(`
|
||||||
|
SELECT projections.milestones.instance_id,
|
||||||
|
projections.milestones.primary_domain,
|
||||||
|
projections.milestones.reached_date,
|
||||||
|
projections.milestones.last_pushed_date,
|
||||||
|
projections.milestones.type,
|
||||||
|
COUNT(*) OVER ()
|
||||||
|
FROM projections.milestones AS OF SYSTEM TIME '-1 ms'
|
||||||
|
`)
|
||||||
|
|
||||||
|
milestoneCols = []string{
|
||||||
|
"instance_id",
|
||||||
|
"primary_domain",
|
||||||
|
"reached_date",
|
||||||
|
"last_pushed_date",
|
||||||
|
"type",
|
||||||
|
"ignore_client_ids",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_MilestonesPrepare(t *testing.T) {
|
||||||
|
type want struct {
|
||||||
|
sqlExpectations sqlExpectation
|
||||||
|
err checkErr
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
prepare interface{}
|
||||||
|
want want
|
||||||
|
object interface{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "prepareMilestonesQuery no result",
|
||||||
|
prepare: prepareMilestonesQuery,
|
||||||
|
want: want{
|
||||||
|
sqlExpectations: mockQueries(
|
||||||
|
expectedMilestoneQuery,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
object: &Milestones{Milestones: []*Milestone{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prepareMilestonesQuery",
|
||||||
|
prepare: prepareMilestonesQuery,
|
||||||
|
want: want{
|
||||||
|
sqlExpectations: mockQueries(
|
||||||
|
expectedMilestoneQuery,
|
||||||
|
milestoneCols,
|
||||||
|
[][]driver.Value{
|
||||||
|
{
|
||||||
|
"instance-id",
|
||||||
|
"primary.domain",
|
||||||
|
testNow,
|
||||||
|
testNow,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
object: &Milestones{
|
||||||
|
SearchResponse: SearchResponse{
|
||||||
|
Count: 1,
|
||||||
|
},
|
||||||
|
Milestones: []*Milestone{
|
||||||
|
{
|
||||||
|
InstanceID: "instance-id",
|
||||||
|
Type: 1,
|
||||||
|
ReachedDate: testNow,
|
||||||
|
PushedDate: testNow,
|
||||||
|
PrimaryDomain: "primary.domain",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prepareMilestonesQuery multiple result",
|
||||||
|
prepare: prepareMilestonesQuery,
|
||||||
|
want: want{
|
||||||
|
sqlExpectations: mockQueries(
|
||||||
|
expectedMilestoneQuery,
|
||||||
|
milestoneCols,
|
||||||
|
[][]driver.Value{
|
||||||
|
{
|
||||||
|
"instance-id",
|
||||||
|
"primary.domain",
|
||||||
|
testNow,
|
||||||
|
testNow,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"instance-id",
|
||||||
|
"primary.domain",
|
||||||
|
testNow,
|
||||||
|
testNow,
|
||||||
|
2,
|
||||||
|
2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"instance-id",
|
||||||
|
"primary.domain",
|
||||||
|
testNow,
|
||||||
|
nil,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"instance-id",
|
||||||
|
"primary.domain",
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
object: &Milestones{
|
||||||
|
SearchResponse: SearchResponse{
|
||||||
|
Count: 4,
|
||||||
|
},
|
||||||
|
Milestones: []*Milestone{
|
||||||
|
{
|
||||||
|
InstanceID: "instance-id",
|
||||||
|
Type: 1,
|
||||||
|
ReachedDate: testNow,
|
||||||
|
PushedDate: testNow,
|
||||||
|
PrimaryDomain: "primary.domain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
InstanceID: "instance-id",
|
||||||
|
Type: 2,
|
||||||
|
ReachedDate: testNow,
|
||||||
|
PushedDate: testNow,
|
||||||
|
PrimaryDomain: "primary.domain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
InstanceID: "instance-id",
|
||||||
|
Type: 3,
|
||||||
|
ReachedDate: testNow,
|
||||||
|
PrimaryDomain: "primary.domain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
InstanceID: "instance-id",
|
||||||
|
Type: 4,
|
||||||
|
PrimaryDomain: "primary.domain",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prepareMilestonesQuery sql err",
|
||||||
|
prepare: prepareMilestonesQuery,
|
||||||
|
want: want{
|
||||||
|
sqlExpectations: mockQueryErr(
|
||||||
|
expectedMilestoneQuery,
|
||||||
|
sql.ErrConnDone,
|
||||||
|
),
|
||||||
|
err: func(err error) (error, bool) {
|
||||||
|
if !errors.Is(err, sql.ErrConnDone) {
|
||||||
|
return fmt.Errorf("err should be sql.ErrConnDone got: %w", err), false
|
||||||
|
}
|
||||||
|
return nil, true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
object: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assertPrepare(t, tt.prepare, tt.object, tt.want.sqlExpectations, tt.want.err, defaultPrepareArgs...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
14
internal/query/projection/assert.go
Normal file
14
internal/query/projection/assert.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package projection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/zitadel/zitadel/internal/errors"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func assertEvent[T eventstore.Event](event eventstore.Event) (T, error) {
|
||||||
|
e, ok := event.(T)
|
||||||
|
if !ok {
|
||||||
|
return e, errors.ThrowInvalidArgumentf(nil, "HANDL-1m9fS", "reduce.wrong.event.type %T", event)
|
||||||
|
}
|
||||||
|
return e, nil
|
||||||
|
}
|
52
internal/query/projection/assert_test.go
Normal file
52
internal/query/projection/assert_test.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package projection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/instance"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_assertEvent(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
event eventstore.Event
|
||||||
|
assertFunc func(eventstore.Event) (eventstore.Event, error)
|
||||||
|
}
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr assert.ErrorAssertionFunc
|
||||||
|
}
|
||||||
|
tests := []testCase{
|
||||||
|
{
|
||||||
|
name: "correct event type",
|
||||||
|
args: args{
|
||||||
|
event: instance.NewInstanceAddedEvent(context.Background(), &instance.NewAggregate("instance-id").Aggregate, "instance-name"),
|
||||||
|
assertFunc: func(event eventstore.Event) (eventstore.Event, error) {
|
||||||
|
return assertEvent[*instance.InstanceAddedEvent](event)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
}, {
|
||||||
|
name: "wrong event type",
|
||||||
|
args: args{
|
||||||
|
event: instance.NewInstanceRemovedEvent(context.Background(), &instance.NewAggregate("instance-id").Aggregate, "instance-name", nil),
|
||||||
|
assertFunc: func(event eventstore.Event) (eventstore.Event, error) {
|
||||||
|
return assertEvent[*instance.InstanceAddedEvent](event)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: assert.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
_, err := tt.args.assertFunc(tt.args.event)
|
||||||
|
if !tt.wantErr(t, err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -14,12 +14,26 @@ func testEvent(
|
|||||||
eventType repository.EventType,
|
eventType repository.EventType,
|
||||||
aggregateType repository.AggregateType,
|
aggregateType repository.AggregateType,
|
||||||
data []byte,
|
data []byte,
|
||||||
|
) *repository.Event {
|
||||||
|
return timedTestEvent(eventType, aggregateType, data, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
func toSystemEvent(event *repository.Event) *repository.Event {
|
||||||
|
event.EditorService = "SYSTEM"
|
||||||
|
return event
|
||||||
|
}
|
||||||
|
|
||||||
|
func timedTestEvent(
|
||||||
|
eventType repository.EventType,
|
||||||
|
aggregateType repository.AggregateType,
|
||||||
|
data []byte,
|
||||||
|
creationDate time.Time,
|
||||||
) *repository.Event {
|
) *repository.Event {
|
||||||
return &repository.Event{
|
return &repository.Event{
|
||||||
Sequence: 15,
|
Sequence: 15,
|
||||||
PreviousAggregateSequence: 10,
|
PreviousAggregateSequence: 10,
|
||||||
PreviousAggregateTypeSequence: 10,
|
PreviousAggregateTypeSequence: 10,
|
||||||
CreationDate: time.Now(),
|
CreationDate: creationDate,
|
||||||
Type: eventType,
|
Type: eventType,
|
||||||
AggregateType: aggregateType,
|
AggregateType: aggregateType,
|
||||||
Data: data,
|
Data: data,
|
||||||
|
@ -402,10 +402,10 @@ func (p *labelPolicyProjection) reduceActivated(event eventstore.Event) (*handle
|
|||||||
handler.NewCol(LabelPolicyDarkLogoURLCol, nil),
|
handler.NewCol(LabelPolicyDarkLogoURLCol, nil),
|
||||||
handler.NewCol(LabelPolicyDarkIconURLCol, nil),
|
handler.NewCol(LabelPolicyDarkIconURLCol, nil),
|
||||||
},
|
},
|
||||||
[]handler.Condition{
|
[]handler.NamespacedCondition{
|
||||||
handler.NewCond(LabelPolicyIDCol, event.Aggregate().ID),
|
handler.NewNamespacedCondition(LabelPolicyIDCol, event.Aggregate().ID),
|
||||||
handler.NewCond(LabelPolicyStateCol, domain.LabelPolicyStatePreview),
|
handler.NewNamespacedCondition(LabelPolicyStateCol, domain.LabelPolicyStatePreview),
|
||||||
handler.NewCond(LabelPolicyInstanceIDCol, event.Aggregate().InstanceID),
|
handler.NewNamespacedCondition(LabelPolicyInstanceIDCol, event.Aggregate().InstanceID),
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ func TestLabelPolicyProjection_reduces(t *testing.T) {
|
|||||||
executer: &testExecuter{
|
executer: &testExecuter{
|
||||||
executions: []execution{
|
executions: []execution{
|
||||||
{
|
{
|
||||||
expectedStmt: "INSERT INTO projections.label_policies2 (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) SELECT $1, $2, $3, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url FROM projections.label_policies2 AS copy_table WHERE copy_table.id = $4 AND copy_table.state = $5 AND copy_table.instance_id = $6 ON CONFLICT (instance_id, id, state) DO UPDATE SET (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) = ($1, $2, $3, EXCLUDED.creation_date, EXCLUDED.resource_owner, EXCLUDED.instance_id, EXCLUDED.id, EXCLUDED.is_default, EXCLUDED.hide_login_name_suffix, EXCLUDED.font_url, EXCLUDED.watermark_disabled, EXCLUDED.should_error_popup, EXCLUDED.light_primary_color, EXCLUDED.light_warn_color, EXCLUDED.light_background_color, EXCLUDED.light_font_color, EXCLUDED.light_logo_url, EXCLUDED.light_icon_url, EXCLUDED.dark_primary_color, EXCLUDED.dark_warn_color, EXCLUDED.dark_background_color, EXCLUDED.dark_font_color, EXCLUDED.dark_logo_url, EXCLUDED.dark_icon_url)",
|
expectedStmt: "INSERT INTO projections.label_policies2 (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) SELECT $1, $2, $3, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url FROM projections.label_policies2 AS copy_table WHERE (copy_table.id = $4) AND (copy_table.state = $5) AND (copy_table.instance_id = $6) ON CONFLICT (instance_id, id, state) DO UPDATE SET (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) = ($1, $2, $3, EXCLUDED.creation_date, EXCLUDED.resource_owner, EXCLUDED.instance_id, EXCLUDED.id, EXCLUDED.is_default, EXCLUDED.hide_login_name_suffix, EXCLUDED.font_url, EXCLUDED.watermark_disabled, EXCLUDED.should_error_popup, EXCLUDED.light_primary_color, EXCLUDED.light_warn_color, EXCLUDED.light_background_color, EXCLUDED.light_font_color, EXCLUDED.light_logo_url, EXCLUDED.light_icon_url, EXCLUDED.dark_primary_color, EXCLUDED.dark_warn_color, EXCLUDED.dark_background_color, EXCLUDED.dark_font_color, EXCLUDED.dark_logo_url, EXCLUDED.dark_icon_url)",
|
||||||
expectedArgs: []interface{}{
|
expectedArgs: []interface{}{
|
||||||
anyArg{},
|
anyArg{},
|
||||||
uint64(15),
|
uint64(15),
|
||||||
@ -631,7 +631,7 @@ func TestLabelPolicyProjection_reduces(t *testing.T) {
|
|||||||
executer: &testExecuter{
|
executer: &testExecuter{
|
||||||
executions: []execution{
|
executions: []execution{
|
||||||
{
|
{
|
||||||
expectedStmt: "INSERT INTO projections.label_policies2 (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) SELECT $1, $2, $3, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url FROM projections.label_policies2 AS copy_table WHERE copy_table.id = $4 AND copy_table.state = $5 AND copy_table.instance_id = $6 ON CONFLICT (instance_id, id, state) DO UPDATE SET (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) = ($1, $2, $3, EXCLUDED.creation_date, EXCLUDED.resource_owner, EXCLUDED.instance_id, EXCLUDED.id, EXCLUDED.is_default, EXCLUDED.hide_login_name_suffix, EXCLUDED.font_url, EXCLUDED.watermark_disabled, EXCLUDED.should_error_popup, EXCLUDED.light_primary_color, EXCLUDED.light_warn_color, EXCLUDED.light_background_color, EXCLUDED.light_font_color, EXCLUDED.light_logo_url, EXCLUDED.light_icon_url, EXCLUDED.dark_primary_color, EXCLUDED.dark_warn_color, EXCLUDED.dark_background_color, EXCLUDED.dark_font_color, EXCLUDED.dark_logo_url, EXCLUDED.dark_icon_url)",
|
expectedStmt: "INSERT INTO projections.label_policies2 (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) SELECT $1, $2, $3, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url FROM projections.label_policies2 AS copy_table WHERE (copy_table.id = $4) AND (copy_table.state = $5) AND (copy_table.instance_id = $6) ON CONFLICT (instance_id, id, state) DO UPDATE SET (change_date, sequence, state, creation_date, resource_owner, instance_id, id, is_default, hide_login_name_suffix, font_url, watermark_disabled, should_error_popup, light_primary_color, light_warn_color, light_background_color, light_font_color, light_logo_url, light_icon_url, dark_primary_color, dark_warn_color, dark_background_color, dark_font_color, dark_logo_url, dark_icon_url) = ($1, $2, $3, EXCLUDED.creation_date, EXCLUDED.resource_owner, EXCLUDED.instance_id, EXCLUDED.id, EXCLUDED.is_default, EXCLUDED.hide_login_name_suffix, EXCLUDED.font_url, EXCLUDED.watermark_disabled, EXCLUDED.should_error_popup, EXCLUDED.light_primary_color, EXCLUDED.light_warn_color, EXCLUDED.light_background_color, EXCLUDED.light_font_color, EXCLUDED.light_logo_url, EXCLUDED.light_icon_url, EXCLUDED.dark_primary_color, EXCLUDED.dark_warn_color, EXCLUDED.dark_background_color, EXCLUDED.dark_font_color, EXCLUDED.dark_logo_url, EXCLUDED.dark_icon_url)",
|
||||||
expectedArgs: []interface{}{
|
expectedArgs: []interface{}{
|
||||||
anyArg{},
|
anyArg{},
|
||||||
uint64(15),
|
uint64(15),
|
||||||
|
295
internal/query/projection/milestones.go
Normal file
295
internal/query/projection/milestones.go
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
package projection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/handler/crdb"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/instance"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/project"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MilestonesProjectionTable = "projections.milestones"
|
||||||
|
|
||||||
|
MilestoneColumnInstanceID = "instance_id"
|
||||||
|
MilestoneColumnType = "type"
|
||||||
|
MilestoneColumnPrimaryDomain = "primary_domain"
|
||||||
|
MilestoneColumnReachedDate = "reached_date"
|
||||||
|
MilestoneColumnPushedDate = "last_pushed_date"
|
||||||
|
MilestoneColumnIgnoreClientIDs = "ignore_client_ids"
|
||||||
|
)
|
||||||
|
|
||||||
|
type milestoneProjection struct {
|
||||||
|
crdb.StatementHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMilestoneProjection(ctx context.Context, config crdb.StatementHandlerConfig) *milestoneProjection {
|
||||||
|
p := new(milestoneProjection)
|
||||||
|
config.ProjectionName = MilestonesProjectionTable
|
||||||
|
config.Reducers = p.reducers()
|
||||||
|
config.InitCheck = crdb.NewMultiTableCheck(
|
||||||
|
crdb.NewTable([]*crdb.Column{
|
||||||
|
crdb.NewColumn(MilestoneColumnInstanceID, crdb.ColumnTypeText),
|
||||||
|
crdb.NewColumn(MilestoneColumnType, crdb.ColumnTypeEnum),
|
||||||
|
crdb.NewColumn(MilestoneColumnReachedDate, crdb.ColumnTypeTimestamp, crdb.Nullable()),
|
||||||
|
crdb.NewColumn(MilestoneColumnPushedDate, crdb.ColumnTypeTimestamp, crdb.Nullable()),
|
||||||
|
crdb.NewColumn(MilestoneColumnPrimaryDomain, crdb.ColumnTypeText, crdb.Nullable()),
|
||||||
|
crdb.NewColumn(MilestoneColumnIgnoreClientIDs, crdb.ColumnTypeTextArray, crdb.Nullable()),
|
||||||
|
},
|
||||||
|
crdb.NewPrimaryKey(MilestoneColumnInstanceID, MilestoneColumnType),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.StatementHandler = crdb.NewStatementHandler(ctx, config)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reducers() []handler.AggregateReducer {
|
||||||
|
return []handler.AggregateReducer{
|
||||||
|
{
|
||||||
|
Aggregate: instance.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{
|
||||||
|
{
|
||||||
|
Event: instance.InstanceAddedEventType,
|
||||||
|
Reduce: p.reduceInstanceAdded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Event: instance.InstanceDomainPrimarySetEventType,
|
||||||
|
Reduce: p.reduceInstanceDomainPrimarySet,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Event: instance.InstanceRemovedEventType,
|
||||||
|
Reduce: p.reduceInstanceRemoved,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Aggregate: project.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{
|
||||||
|
{
|
||||||
|
Event: project.ProjectAddedType,
|
||||||
|
Reduce: p.reduceProjectAdded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Event: project.ApplicationAddedType,
|
||||||
|
Reduce: p.reduceApplicationAdded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Event: project.OIDCConfigAddedType,
|
||||||
|
Reduce: p.reduceOIDCConfigAdded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Event: project.APIConfigAddedType,
|
||||||
|
Reduce: p.reduceAPIConfigAdded,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Aggregate: user.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{
|
||||||
|
{
|
||||||
|
// user.UserTokenAddedType is not emitted on creation of personal access tokens
|
||||||
|
// PATs have no effect on milestone.AuthenticationSucceededOnApplication or milestone.AuthenticationSucceededOnInstance
|
||||||
|
Event: user.UserTokenAddedType,
|
||||||
|
Reduce: p.reduceUserTokenAdded,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Aggregate: milestone.AggregateType,
|
||||||
|
EventRedusers: []handler.EventReducer{
|
||||||
|
{
|
||||||
|
Event: milestone.PushedEventType,
|
||||||
|
Reduce: p.reduceMilestonePushed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceInstanceAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*instance.InstanceAddedEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
allTypes := milestone.AllTypes()
|
||||||
|
statements := make([]func(eventstore.Event) crdb.Exec, 0, len(allTypes))
|
||||||
|
for _, msType := range allTypes {
|
||||||
|
createColumns := []handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnInstanceID, e.Aggregate().InstanceID),
|
||||||
|
handler.NewCol(MilestoneColumnType, msType),
|
||||||
|
}
|
||||||
|
if msType == milestone.InstanceCreated {
|
||||||
|
createColumns = append(createColumns, handler.NewCol(MilestoneColumnReachedDate, event.CreationDate()))
|
||||||
|
}
|
||||||
|
statements = append(statements, crdb.AddCreateStatement(createColumns))
|
||||||
|
}
|
||||||
|
return crdb.NewMultiStatement(e, statements...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceInstanceDomainPrimarySet(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*instance.DomainPrimarySetEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return crdb.NewUpdateStatement(
|
||||||
|
e,
|
||||||
|
[]handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnPrimaryDomain, e.Domain),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, e.Aggregate().InstanceID),
|
||||||
|
crdb.NewIsNullCond(MilestoneColumnPushedDate),
|
||||||
|
},
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceProjectAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
if _, err := assertEvent[*project.ProjectAddedEvent](event); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.reduceReachedIfUserEventFunc(milestone.ProjectCreated)(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceApplicationAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
if _, err := assertEvent[*project.ApplicationAddedEvent](event); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.reduceReachedIfUserEventFunc(milestone.ApplicationCreated)(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceOIDCConfigAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*project.OIDCConfigAddedEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.reduceAppConfigAdded(e, e.ClientID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceAPIConfigAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*project.APIConfigAddedEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.reduceAppConfigAdded(e, e.ClientID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceUserTokenAdded(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*user.UserTokenAddedEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
statements := []func(eventstore.Event) crdb.Exec{
|
||||||
|
crdb.AddUpdateStatement(
|
||||||
|
[]handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnReachedDate, event.CreationDate()),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnInstance),
|
||||||
|
crdb.NewIsNullCond(MilestoneColumnReachedDate),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
// We ignore authentications without app, for example JWT profile or PAT
|
||||||
|
if e.ApplicationID != "" {
|
||||||
|
statements = append(statements, crdb.AddUpdateStatement(
|
||||||
|
[]handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnReachedDate, event.CreationDate()),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnApplication),
|
||||||
|
crdb.Not(crdb.NewTextArrayContainsCond(MilestoneColumnIgnoreClientIDs, e.ApplicationID)),
|
||||||
|
crdb.NewIsNullCond(MilestoneColumnReachedDate),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return crdb.NewMultiStatement(e, statements...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceInstanceRemoved(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
if _, err := assertEvent[*instance.InstanceRemovedEvent](event); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.reduceReachedFunc(milestone.InstanceDeleted)(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceMilestonePushed(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
e, err := assertEvent[*milestone.PushedEvent](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if e.MilestoneType != milestone.InstanceDeleted {
|
||||||
|
return crdb.NewUpdateStatement(
|
||||||
|
event,
|
||||||
|
[]handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnPushedDate, event.CreationDate()),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
handler.NewCond(MilestoneColumnType, e.MilestoneType),
|
||||||
|
},
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
return crdb.NewDeleteStatement(
|
||||||
|
event,
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
},
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceReachedIfUserEventFunc(msType milestone.Type) func(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
return func(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
if p.isSystemEvent(event) {
|
||||||
|
return crdb.NewNoOpStatement(event), nil
|
||||||
|
}
|
||||||
|
return p.reduceReachedFunc(msType)(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceReachedFunc(msType milestone.Type) func(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
return func(event eventstore.Event) (*handler.Statement, error) {
|
||||||
|
return crdb.NewUpdateStatement(event, []handler.Column{
|
||||||
|
handler.NewCol(MilestoneColumnReachedDate, event.CreationDate()),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
handler.NewCond(MilestoneColumnType, msType),
|
||||||
|
crdb.NewIsNullCond(MilestoneColumnReachedDate),
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) reduceAppConfigAdded(event eventstore.Event, clientID string) (*handler.Statement, error) {
|
||||||
|
if !p.isSystemEvent(event) {
|
||||||
|
return crdb.NewNoOpStatement(event), nil
|
||||||
|
}
|
||||||
|
return crdb.NewUpdateStatement(
|
||||||
|
event,
|
||||||
|
[]handler.Column{
|
||||||
|
crdb.NewArrayAppendCol(MilestoneColumnIgnoreClientIDs, clientID),
|
||||||
|
},
|
||||||
|
[]handler.Condition{
|
||||||
|
handler.NewCond(MilestoneColumnInstanceID, event.Aggregate().InstanceID),
|
||||||
|
handler.NewCond(MilestoneColumnType, milestone.AuthenticationSucceededOnApplication),
|
||||||
|
crdb.NewIsNullCond(MilestoneColumnReachedDate),
|
||||||
|
},
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *milestoneProjection) isSystemEvent(event eventstore.Event) bool {
|
||||||
|
if userId, err := strconv.Atoi(event.EditorUser()); err == nil && userId > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lowerEditorService := strings.ToLower(event.EditorService())
|
||||||
|
return lowerEditorService == "" ||
|
||||||
|
lowerEditorService == "system" ||
|
||||||
|
lowerEditorService == "system-api"
|
||||||
|
}
|
404
internal/query/projection/milestones_test.go
Normal file
404
internal/query/projection/milestones_test.go
Normal file
@ -0,0 +1,404 @@
|
|||||||
|
package projection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/database"
|
||||||
|
"github.com/zitadel/zitadel/internal/errors"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/handler"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore/repository"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/instance"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/milestone"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/project"
|
||||||
|
"github.com/zitadel/zitadel/internal/repository/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMilestonesProjection_reduces(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
event func(t *testing.T) eventstore.Event
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
reduce func(event eventstore.Event) (*handler.Statement, error)
|
||||||
|
want wantReduce
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "reduceInstanceAdded",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(instance.InstanceAddedEventType),
|
||||||
|
instance.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
now,
|
||||||
|
), instance.InstanceAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceInstanceAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("instance"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type, reached_date) VALUES ($1, $2, $3)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.InstanceCreated,
|
||||||
|
now,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnInstance,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.ProjectCreated,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.ApplicationCreated,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnApplication,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "INSERT INTO projections.milestones (instance_id, type) VALUES ($1, $2)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
milestone.InstanceDeleted,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceInstancePrimaryDomainSet",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(testEvent(
|
||||||
|
repository.EventType(instance.InstanceDomainPrimarySetEventType),
|
||||||
|
instance.AggregateType,
|
||||||
|
[]byte(`{"domain": "my.domain"}`),
|
||||||
|
), instance.DomainPrimarySetEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceInstanceDomainPrimarySet,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("instance"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET primary_domain = $1 WHERE (instance_id = $2) AND (last_pushed_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"my.domain",
|
||||||
|
"instance-id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceProjectAdded",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(project.ProjectAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
now,
|
||||||
|
), project.ProjectAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceProjectAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.ProjectCreated,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceApplicationAdded",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(project.ApplicationAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
now,
|
||||||
|
), project.ApplicationAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceApplicationAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.ApplicationCreated,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceOIDCConfigAdded user event",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(testEvent(
|
||||||
|
repository.EventType(project.OIDCConfigAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
), project.OIDCConfigAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceOIDCConfigAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceOIDCConfigAdded system event",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(toSystemEvent(testEvent(
|
||||||
|
repository.EventType(project.OIDCConfigAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{"clientId": "client-id"}`),
|
||||||
|
)), project.OIDCConfigAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceOIDCConfigAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET ignore_client_ids = array_append(ignore_client_ids, $1) WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"client-id",
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnApplication,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceAPIConfigAdded user event",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(testEvent(
|
||||||
|
repository.EventType(project.APIConfigAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
), project.APIConfigAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceAPIConfigAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceAPIConfigAdded system event",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(toSystemEvent(testEvent(
|
||||||
|
repository.EventType(project.APIConfigAddedType),
|
||||||
|
project.AggregateType,
|
||||||
|
[]byte(`{"clientId": "client-id"}`),
|
||||||
|
)), project.APIConfigAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceAPIConfigAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("project"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET ignore_client_ids = array_append(ignore_client_ids, $1) WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"client-id",
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnApplication,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceUserTokenAdded",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(user.UserTokenAddedType),
|
||||||
|
user.AggregateType,
|
||||||
|
[]byte(`{"applicationId": "client-id"}`),
|
||||||
|
now,
|
||||||
|
), user.UserTokenAddedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceUserTokenAdded,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("user"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
// TODO: This can be optimized to only use one statement with OR
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnInstance,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (NOT (ignore_client_ids @> $4)) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.AuthenticationSucceededOnApplication,
|
||||||
|
database.StringArray{"client-id"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceInstanceRemoved",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(instance.InstanceRemovedEventType),
|
||||||
|
instance.AggregateType,
|
||||||
|
[]byte(`{}`),
|
||||||
|
now,
|
||||||
|
), instance.InstanceRemovedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceInstanceRemoved,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("instance"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET reached_date = $1 WHERE (instance_id = $2) AND (type = $3) AND (reached_date IS NULL)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.InstanceDeleted,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceMilestonePushed normal milestone",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(timedTestEvent(
|
||||||
|
repository.EventType(milestone.PushedEventType),
|
||||||
|
milestone.AggregateType,
|
||||||
|
[]byte(`{"type": "ProjectCreated"}`),
|
||||||
|
now,
|
||||||
|
), milestone.PushedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceMilestonePushed,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("milestone"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "UPDATE projections.milestones SET last_pushed_date = $1 WHERE (instance_id = $2) AND (type = $3)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
now,
|
||||||
|
"instance-id",
|
||||||
|
milestone.ProjectCreated,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reduceMilestonePushed instance deleted milestone",
|
||||||
|
args: args{
|
||||||
|
event: getEvent(testEvent(
|
||||||
|
repository.EventType(milestone.PushedEventType),
|
||||||
|
milestone.AggregateType,
|
||||||
|
[]byte(`{"type": "InstanceDeleted"}`),
|
||||||
|
), milestone.PushedEventMapper),
|
||||||
|
},
|
||||||
|
reduce: (&milestoneProjection{}).reduceMilestonePushed,
|
||||||
|
want: wantReduce{
|
||||||
|
aggregateType: eventstore.AggregateType("milestone"),
|
||||||
|
sequence: 15,
|
||||||
|
previousSequence: 10,
|
||||||
|
executer: &testExecuter{
|
||||||
|
executions: []execution{
|
||||||
|
{
|
||||||
|
expectedStmt: "DELETE FROM projections.milestones WHERE (instance_id = $1)",
|
||||||
|
expectedArgs: []interface{}{
|
||||||
|
"instance-id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
event := baseEvent(t)
|
||||||
|
got, err := tt.reduce(event)
|
||||||
|
if !errors.IsErrorInvalidArgument(err) {
|
||||||
|
t.Errorf("no wrong event mapping: %v, got: %v", err, got)
|
||||||
|
}
|
||||||
|
event = tt.args.event(t)
|
||||||
|
got, err = tt.reduce(event)
|
||||||
|
assertReduce(t, got, err, MilestonesProjectionTable, tt.want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -64,8 +64,10 @@ var (
|
|||||||
NotificationPolicyProjection *notificationPolicyProjection
|
NotificationPolicyProjection *notificationPolicyProjection
|
||||||
NotificationsProjection interface{}
|
NotificationsProjection interface{}
|
||||||
NotificationsQuotaProjection interface{}
|
NotificationsQuotaProjection interface{}
|
||||||
|
TelemetryPusherProjection interface{}
|
||||||
DeviceAuthProjection *deviceAuthProjection
|
DeviceAuthProjection *deviceAuthProjection
|
||||||
SessionProjection *sessionProjection
|
SessionProjection *sessionProjection
|
||||||
|
MilestoneProjection *milestoneProjection
|
||||||
)
|
)
|
||||||
|
|
||||||
type projection interface {
|
type projection interface {
|
||||||
@ -143,6 +145,7 @@ func Create(ctx context.Context, sqlClient *database.DB, es *eventstore.Eventsto
|
|||||||
NotificationPolicyProjection = newNotificationPolicyProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["notification_policies"]))
|
NotificationPolicyProjection = newNotificationPolicyProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["notification_policies"]))
|
||||||
DeviceAuthProjection = newDeviceAuthProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["device_auth"]))
|
DeviceAuthProjection = newDeviceAuthProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["device_auth"]))
|
||||||
SessionProjection = newSessionProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["sessions"]))
|
SessionProjection = newSessionProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["sessions"]))
|
||||||
|
MilestoneProjection = newMilestoneProjection(ctx, applyCustomConfig(projectionConfig, config.Customizations["milestones"]))
|
||||||
newProjectionsList()
|
newProjectionsList()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -191,7 +194,7 @@ func applyCustomConfig(config crdb.StatementHandlerConfig, customConfig CustomCo
|
|||||||
// as setup and start currently create them individually, we make sure we get the right one
|
// as setup and start currently create them individually, we make sure we get the right one
|
||||||
// will be refactored when changing to new id based projections
|
// will be refactored when changing to new id based projections
|
||||||
//
|
//
|
||||||
// NotificationsProjection is not added here, because it does not statement based / has no proprietary projection table
|
// Event handlers NotificationsProjection, NotificationsQuotaProjection and NotificationsProjection are not added here, because they do not reduce to database statements
|
||||||
func newProjectionsList() {
|
func newProjectionsList() {
|
||||||
projections = []projection{
|
projections = []projection{
|
||||||
OrgProjection,
|
OrgProjection,
|
||||||
@ -240,5 +243,6 @@ func newProjectionsList() {
|
|||||||
NotificationPolicyProjection,
|
NotificationPolicyProjection,
|
||||||
DeviceAuthProjection,
|
DeviceAuthProjection,
|
||||||
SessionProjection,
|
SessionProjection,
|
||||||
|
MilestoneProjection,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -66,6 +66,27 @@ func (q *NotNullQuery) comp() sq.Sqlizer {
|
|||||||
return sq.NotEq{q.Column.identifier(): nil}
|
return sq.NotEq{q.Column.identifier(): nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type IsNullQuery struct {
|
||||||
|
Column Column
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIsNullQuery(col Column) (*IsNullQuery, error) {
|
||||||
|
if col.isZero() {
|
||||||
|
return nil, ErrMissingColumn
|
||||||
|
}
|
||||||
|
return &IsNullQuery{
|
||||||
|
Column: col,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *IsNullQuery) toQuery(query sq.SelectBuilder) sq.SelectBuilder {
|
||||||
|
return query.Where(q.comp())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *IsNullQuery) comp() sq.Sqlizer {
|
||||||
|
return sq.Eq{q.Column.identifier(): nil}
|
||||||
|
}
|
||||||
|
|
||||||
type orQuery struct {
|
type orQuery struct {
|
||||||
queries []SearchQuery
|
queries []SearchQuery
|
||||||
}
|
}
|
||||||
|
30
internal/repository/milestone/aggregate.go
Normal file
30
internal/repository/milestone/aggregate.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package milestone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AggregateType = "milestone"
|
||||||
|
AggregateVersion = "v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Aggregate struct {
|
||||||
|
eventstore.Aggregate
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAggregate(ctx context.Context, id string) *Aggregate {
|
||||||
|
instanceID := authz.GetInstance(ctx).InstanceID()
|
||||||
|
return &Aggregate{
|
||||||
|
Aggregate: eventstore.Aggregate{
|
||||||
|
Type: AggregateType,
|
||||||
|
Version: AggregateVersion,
|
||||||
|
ID: id,
|
||||||
|
ResourceOwner: instanceID,
|
||||||
|
InstanceID: instanceID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
54
internal/repository/milestone/events.go
Normal file
54
internal/repository/milestone/events.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package milestone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
eventTypePrefix = eventstore.EventType("milestone.")
|
||||||
|
PushedEventType = eventTypePrefix + "pushed"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PushedEvent struct {
|
||||||
|
*eventstore.BaseEvent `json:"-"`
|
||||||
|
MilestoneType Type `json:"type"`
|
||||||
|
ExternalDomain string `json:"externalDomain"`
|
||||||
|
PrimaryDomain string `json:"primaryDomain"`
|
||||||
|
Endpoints []string `json:"endpoints"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PushedEvent) Data() interface{} {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PushedEvent) UniqueConstraints() []*eventstore.EventUniqueConstraint {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PushedEvent) SetBaseEvent(b *eventstore.BaseEvent) {
|
||||||
|
p.BaseEvent = b
|
||||||
|
}
|
||||||
|
|
||||||
|
var PushedEventMapper = eventstore.GenericEventMapper[PushedEvent]
|
||||||
|
|
||||||
|
func NewPushedEvent(
|
||||||
|
ctx context.Context,
|
||||||
|
aggregate *Aggregate,
|
||||||
|
msType Type,
|
||||||
|
endpoints []string,
|
||||||
|
externalDomain, primaryDomain string,
|
||||||
|
) *PushedEvent {
|
||||||
|
return &PushedEvent{
|
||||||
|
BaseEvent: eventstore.NewBaseEventForPush(
|
||||||
|
ctx,
|
||||||
|
&aggregate.Aggregate,
|
||||||
|
PushedEventType,
|
||||||
|
),
|
||||||
|
MilestoneType: msType,
|
||||||
|
Endpoints: endpoints,
|
||||||
|
ExternalDomain: externalDomain,
|
||||||
|
PrimaryDomain: primaryDomain,
|
||||||
|
}
|
||||||
|
}
|
9
internal/repository/milestone/eventstore.go
Normal file
9
internal/repository/milestone/eventstore.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package milestone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RegisterEventMappers(es *eventstore.Eventstore) {
|
||||||
|
es.RegisterFilterEventMapper(AggregateType, PushedEventType, PushedEventMapper)
|
||||||
|
}
|
59
internal/repository/milestone/type.go
Normal file
59
internal/repository/milestone/type.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
//go:generate stringer -type Type
|
||||||
|
|
||||||
|
package milestone
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknown Type = iota
|
||||||
|
|
||||||
|
InstanceCreated
|
||||||
|
AuthenticationSucceededOnInstance
|
||||||
|
ProjectCreated
|
||||||
|
ApplicationCreated
|
||||||
|
AuthenticationSucceededOnApplication
|
||||||
|
InstanceDeleted
|
||||||
|
|
||||||
|
typesCount
|
||||||
|
)
|
||||||
|
|
||||||
|
func AllTypes() []Type {
|
||||||
|
types := make([]Type, typesCount-1)
|
||||||
|
for i := Type(1); i < typesCount; i++ {
|
||||||
|
types[i-1] = i
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Type) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf(`"%s"`, t.String())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Type) UnmarshalJSON(data []byte) error {
|
||||||
|
*t = typeFromString(strings.Trim(string(data), `"`))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeFromString(t string) Type {
|
||||||
|
switch t {
|
||||||
|
case InstanceCreated.String():
|
||||||
|
return InstanceCreated
|
||||||
|
case AuthenticationSucceededOnInstance.String():
|
||||||
|
return AuthenticationSucceededOnInstance
|
||||||
|
case ProjectCreated.String():
|
||||||
|
return ProjectCreated
|
||||||
|
case ApplicationCreated.String():
|
||||||
|
return ApplicationCreated
|
||||||
|
case AuthenticationSucceededOnApplication.String():
|
||||||
|
return AuthenticationSucceededOnApplication
|
||||||
|
case InstanceDeleted.String():
|
||||||
|
return InstanceDeleted
|
||||||
|
default:
|
||||||
|
return unknown
|
||||||
|
}
|
||||||
|
}
|
30
internal/repository/milestone/type_string.go
Normal file
30
internal/repository/milestone/type_string.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Code generated by "stringer -type Type"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package milestone
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[unknown-0]
|
||||||
|
_ = x[InstanceCreated-1]
|
||||||
|
_ = x[AuthenticationSucceededOnInstance-2]
|
||||||
|
_ = x[ProjectCreated-3]
|
||||||
|
_ = x[ApplicationCreated-4]
|
||||||
|
_ = x[AuthenticationSucceededOnApplication-5]
|
||||||
|
_ = x[InstanceDeleted-6]
|
||||||
|
_ = x[typesCount-7]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _Type_name = "unknownInstanceCreatedAuthenticationSucceededOnInstanceProjectCreatedApplicationCreatedAuthenticationSucceededOnApplicationInstanceDeletedtypesCount"
|
||||||
|
|
||||||
|
var _Type_index = [...]uint8{0, 7, 22, 55, 69, 87, 123, 138, 148}
|
||||||
|
|
||||||
|
func (i Type) String() string {
|
||||||
|
if i < 0 || i >= Type(len(_Type_index)-1) {
|
||||||
|
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _Type_name[_Type_index[i]:_Type_index[i+1]]
|
||||||
|
}
|
21
internal/repository/pseudo/aggregate.go
Normal file
21
internal/repository/pseudo/aggregate.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package pseudo
|
||||||
|
|
||||||
|
import "github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
|
||||||
|
const (
|
||||||
|
AggregateType = "pseudo"
|
||||||
|
AggregateVersion = "v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Aggregate struct {
|
||||||
|
eventstore.Aggregate
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAggregate() *Aggregate {
|
||||||
|
return &Aggregate{
|
||||||
|
Aggregate: eventstore.Aggregate{
|
||||||
|
Type: AggregateType,
|
||||||
|
Version: AggregateVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
40
internal/repository/pseudo/events.go
Normal file
40
internal/repository/pseudo/events.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Package pseudo contains virtual events, that are not stored in the eventstore.
|
||||||
|
package pseudo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
eventTypePrefix = eventstore.EventType("pseudo.")
|
||||||
|
ScheduledEventType = eventTypePrefix + "timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ eventstore.Event = (*ScheduledEvent)(nil)
|
||||||
|
|
||||||
|
type ScheduledEvent struct {
|
||||||
|
*eventstore.BaseEvent `json:"-"`
|
||||||
|
Timestamp time.Time `json:"-"`
|
||||||
|
InstanceIDs []string `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewScheduledEvent returns an event that can be processed by event handlers like any other event.
|
||||||
|
// It receives the current timestamp and an ID list of instances that are active and should be processed.
|
||||||
|
func NewScheduledEvent(
|
||||||
|
ctx context.Context,
|
||||||
|
timestamp time.Time,
|
||||||
|
instanceIDs ...string,
|
||||||
|
) *ScheduledEvent {
|
||||||
|
return &ScheduledEvent{
|
||||||
|
BaseEvent: eventstore.NewBaseEventForPush(
|
||||||
|
ctx,
|
||||||
|
&NewAggregate().Aggregate,
|
||||||
|
ScheduledEventType,
|
||||||
|
),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
InstanceIDs: instanceIDs,
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user