Merge branch 'main' into next

# Conflicts:
#	internal/api/grpc/admin/integration_test/server_test.go
#	internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go
#	internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go
#	internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go
#	internal/api/grpc/user/v2/integration_test/query_test.go
This commit is contained in:
Livio Spring 2024-11-12 13:56:05 +01:00
commit 9a05e671fb
No known key found for this signature in database
GPG Key ID: 26BB1C2FA5952CF0
232 changed files with 5764 additions and 1900 deletions

View File

@ -8,25 +8,24 @@ services:
network_mode: service:db network_mode: service:db
command: sleep infinity command: sleep infinity
environment: environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db' ZITADEL_DATABASE_POSTGRES_HOST: db
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432' ZITADEL_DATABASE_POSTGRES_PORT: 5432
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
- 'ZITADEL_EXTERNALSECURE=false' ZITADEL_EXTERNALSECURE: false
db: db:
image: postgres:latest image: postgres:latest
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- postgres-data:/var/lib/postgresql/data - postgres-data:/var/lib/postgresql/data
environment: environment:
PGUSER: postgres
POSTGRES_PASSWORD: postgres POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: postgres
volumes: volumes:
postgres-data: postgres-data:

View File

@ -1,6 +1,7 @@
name: 📄 Documentation name: 📄 Documentation
description: Create an issue for missing or wrong documentation. description: Create an issue for missing or wrong documentation.
labels: ["docs"] labels: ["docs"]
type: task
body: body:
- type: markdown - type: markdown
attributes: attributes:

View File

@ -1,6 +1,7 @@
name: 💡 Proposal / Feature request name: 💡 Proposal / Feature request
description: "Create an issue for a feature request/proposal." description: "Create an issue for a feature request/proposal."
labels: ["enhancement"] labels: ["enhancement"]
type: enhancement
body: body:
- type: markdown - type: markdown
attributes: attributes:

View File

@ -1,54 +0,0 @@
name: 🛠️ Improvement
description: "Create an new issue for an improvment in ZITADEL"
labels: ["improvement"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this improvement request
- type: checkboxes
id: preflight
attributes:
label: Preflight Checklist
options:
- label:
I could not find a solution in the existing issues, docs, nor discussions
required: true
- label:
I have joined the [ZITADEL chat](https://zitadel.com/chat)
- type: textarea
id: problem
attributes:
label: Describe your problem
description: Please describe your problem this improvement is supposed to solve.
placeholder: Describe the problem you have
validations:
required: true
- type: textarea
id: solution
attributes:
label: Describe your ideal solution
description: Which solution do you propose?
placeholder: As a [type of user], I want [some goal] so that [some reason].
validations:
required: true
- type: input
id: version
attributes:
label: Version
description: Which version of the ZITADEL are you using.
- type: dropdown
id: environment
attributes:
label: Environment
description: How do you use ZITADEL?
options:
- ZITADEL Cloud
- Self-hosted
validations:
required: true
- type: textarea
id: additional
attributes:
label: Additional Context
description: Please add any other infos that could be useful.

View File

@ -36,6 +36,10 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
--health-start-period 10s --health-start-period 10s
cache:
image: redis:latest
ports:
- 6379:6379
steps: steps:
- -
uses: actions/checkout@v4 uses: actions/checkout@v4

1
.gitignore vendored
View File

@ -87,4 +87,5 @@ go.work.sum
load-test/node_modules load-test/node_modules
load-test/yarn-error.log load-test/yarn-error.log
load-test/dist load-test/dist
load-test/output/*
.vercel .vercel

View File

@ -2,7 +2,7 @@ go_bin := "$$(go env GOPATH)/bin"
gen_authopt_path := "$(go_bin)/protoc-gen-authoption" gen_authopt_path := "$(go_bin)/protoc-gen-authoption"
gen_zitadel_path := "$(go_bin)/protoc-gen-zitadel" gen_zitadel_path := "$(go_bin)/protoc-gen-zitadel"
now := $(shell date --rfc-3339=seconds | sed 's/ /T/') now := $(shell date '+%Y-%m-%dT%T%z' | sed -E 's/.([0-9]{2})([0-9]{2})$$/-\1:\2/')
VERSION ?= development-$(now) VERSION ?= development-$(now)
COMMIT_SHA ?= $(shell git rev-parse HEAD) COMMIT_SHA ?= $(shell git rev-parse HEAD)
ZITADEL_IMAGE ?= zitadel:local ZITADEL_IMAGE ?= zitadel:local
@ -63,12 +63,12 @@ endif
.PHONY: core_grpc_dependencies .PHONY: core_grpc_dependencies
core_grpc_dependencies: core_grpc_dependencies:
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.35.1 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions
go install github.com/envoyproxy/protoc-gen-validate@v1.0.4 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions go install github.com/envoyproxy/protoc-gen-validate@v1.1.0 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions
go install github.com/bufbuild/buf/cmd/buf@v1.34.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions go install github.com/bufbuild/buf/cmd/buf@v1.45.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions
.PHONY: core_api .PHONY: core_api
core_api: core_api_generator core_grpc_dependencies core_api: core_api_generator core_grpc_dependencies
@ -113,7 +113,7 @@ core_unit_test:
.PHONY: core_integration_db_up .PHONY: core_integration_db_up
core_integration_db_up: core_integration_db_up:
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} cache
.PHONY: core_integration_db_down .PHONY: core_integration_db_down
core_integration_db_down: core_integration_db_down:

View File

@ -185,34 +185,136 @@ Database:
# Caches are EXPERIMENTAL. The following config may have breaking changes in the future. # Caches are EXPERIMENTAL. The following config may have breaking changes in the future.
# If no config is provided, caching is disabled by default. # If no config is provided, caching is disabled by default.
# Caches: Caches:
# Connectors are reused by caches. # Connectors are reused by caches.
# Connectors: Connectors:
# Memory connector works with local server memory. # Memory connector works with local server memory.
# It is the simplest (and probably fastest) cache implementation. # It is the simplest (and probably fastest) cache implementation.
# Unsuitable for deployments with multiple containers, # Unsuitable for deployments with multiple containers,
# as each container's cache may hold a different state of the same object. # as each container's cache may hold a different state of the same object.
# Memory: Memory:
# Enabled: true Enabled: false
# AutoPrune removes invalidated or expired object from the cache. # AutoPrune removes invalidated or expired object from the cache.
# AutoPrune: AutoPrune:
# Interval: 15m Interval: 1m
# TimeOut: 30s TimeOut: 5s
Postgres:
Enabled: false
AutoPrune:
Interval: 15m
TimeOut: 30s
Redis:
Enabled: false
# The network type, either tcp or unix.
# Default is tcp.
# Network string
# host:port address.
Addr: localhost:6379
# ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName: ZITADEL_cache
# Use the specified Username to authenticate the current connection
# with one of the connections defined in the ACL list when connecting
# to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
Username: zitadel
# Optional password. Must match the password specified in the
# requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
# or the User Password when connecting to a Redis 6.0 instance, or greater,
# that is using the Redis ACL system.
Password: ""
# Each ZITADEL cache uses an incremental DB namespace.
# This option offsets the first DB so it doesn't conflict with other databases on the same server.
# Note that ZITADEL uses FLUSHDB command to truncate a cache.
# This can have destructive consequences when overlapping DB namespaces are used.
DBOffset: 10
# Maximum number of retries before giving up.
# Default is 3 retries; -1 (not 0) disables retries.
MaxRetries: 3
# Minimum backoff between each retry.
# Default is 8 milliseconds; -1 disables backoff.
MinRetryBackoff: 8ms
# Maximum backoff between each retry.
# Default is 512 milliseconds; -1 disables backoff.
MaxRetryBackoff: 512ms
# Dial timeout for establishing new connections.
# Default is 5 seconds.
DialTimeout: 1s
# Timeout for socket reads. If reached, commands will fail
# with a timeout instead of blocking. Supported values:
# - `0` - default timeout (3 seconds).
# - `-1` - no timeout (block indefinitely).
# - `-2` - disables SetReadDeadline calls completely.
ReadTimeout: 100ms
# Timeout for socket writes. If reached, commands will fail
# with a timeout instead of blocking. Supported values:
# - `0` - default timeout (3 seconds).
# - `-1` - no timeout (block indefinitely).
# - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout: 100ms
# Type of connection pool.
# true for FIFO pool, false for LIFO pool.
# Note that FIFO has slightly higher overhead compared to LIFO,
# but it helps closing idle connections faster reducing the pool size.
PoolFIFO: false
# Base number of socket connections.
# Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
# If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
# you can limit it through MaxActiveConns
PoolSize: 20
# Amount of time client waits for connection if all connections
# are busy before returning an error.
# Default is ReadTimeout + 1 second.
PoolTimeout: 100ms
# Minimum number of idle connections which is useful when establishing
# new connection is slow.
# Default is 0. the idle connections are not closed by default.
MinIdleConns: 5
# Maximum number of idle connections.
# Default is 0. the idle connections are not closed by default.
MaxIdleConns: 10
# Maximum number of connections allocated by the pool at a given time.
# When zero, there is no limit on the number of connections in the pool.
MaxActiveConns: 40
# ConnMaxIdleTime is the maximum amount of time a connection may be idle.
# Should be less than server's timeout.
# Expired connections may be closed lazily before reuse.
# If d <= 0, connections are not closed due to a connection's idle time.
# Default is 30 minutes. -1 disables idle timeout check.
ConnMaxIdleTime: 30m
# ConnMaxLifetime is the maximum amount of time a connection may be reused.
# Expired connections may be closed lazily before reuse.
# If <= 0, connections are not closed due to a connection's age.
# Default is to not close idle connections.
ConnMaxLifetime: -1
# Enable TLS server authentication using the default system bundle.
EnableTLS: false
# Disable set-lib on connect. Default is false.
DisableIndentity: false
# Add suffix to client name. Default is empty.
IdentitySuffix: ""
# Instance caches auth middleware instances, gettable by domain or ID. # Instance caches auth middleware instances, gettable by domain or ID.
# Instance: Instance:
# Connector must be enabled above. # Connector must be enabled above.
# When connector is empty, this cache will be disabled. # When connector is empty, this cache will be disabled.
# Connector: "memory" Connector: ""
# MaxAge: 1h MaxAge: 1h
# LastUsage: 10m LastUsage: 10m
# # Log enables cache-specific logging. Default to error log to stderr when omitted.
# Log enables cache-specific logging. Default to error log to stdout when omitted. Log:
# Log: Level: error
# Level: debug AddSource: true
# AddSource: true Formatter:
# Formatter: Format: text
# Format: text # Milestones caches instance milestone state, gettable by instance ID
Milestones:
Connector: ""
MaxAge: 1h
LastUsage: 10m
Log:
Level: error
AddSource: true
Formatter:
Format: text
Machine: Machine:
# Cloud-hosted VMs need to specify their metadata endpoint so that the machine can be uniquely identified. # Cloud-hosted VMs need to specify their metadata endpoint so that the machine can be uniquely identified.
@ -411,6 +513,7 @@ OIDC:
DefaultLoginURLV2: "/login?authRequest=" # ZITADEL_OIDC_DEFAULTLOGINURLV2 DefaultLoginURLV2: "/login?authRequest=" # ZITADEL_OIDC_DEFAULTLOGINURLV2
DefaultLogoutURLV2: "/logout?post_logout_redirect=" # ZITADEL_OIDC_DEFAULTLOGOUTURLV2 DefaultLogoutURLV2: "/logout?post_logout_redirect=" # ZITADEL_OIDC_DEFAULTLOGOUTURLV2
PublicKeyCacheMaxAge: 24h # ZITADEL_OIDC_PUBLICKEYCACHEMAXAGE PublicKeyCacheMaxAge: 24h # ZITADEL_OIDC_PUBLICKEYCACHEMAXAGE
DefaultBackChannelLogoutLifetime: 15m # ZITADEL_OIDC_DEFAULTBACKCHANNELLOGOUTLIFETIME
SAML: SAML:
ProviderConfig: ProviderConfig:
@ -921,7 +1024,7 @@ DefaultInstance:
PreHeader: Verify email PreHeader: Verify email
Subject: Verify email Subject: Verify email
Greeting: Hello {{.DisplayName}}, Greeting: Hello {{.DisplayName}},
Text: A new email has been added. Please use the button below to verify your email. (Code {{.Code}}) If you din't add a new email, please ignore this email. Text: A new email has been added. Please use the button below to verify your email. (Code {{.Code}}) If you didn't add a new email, please ignore this email.
ButtonText: Verify email ButtonText: Verify email
- MessageTextType: VerifyPhone - MessageTextType: VerifyPhone
Language: en Language: en

View File

@ -25,7 +25,7 @@ import (
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/authz" "github.com/zitadel/zitadel/internal/authz"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache" "github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
crypto_db "github.com/zitadel/zitadel/internal/crypto/database" crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
@ -72,7 +72,7 @@ type ProjectionsConfig struct {
EncryptionKeys *encryption.EncryptionKeyConfig EncryptionKeys *encryption.EncryptionKeyConfig
SystemAPIUsers map[string]*internal_authz.SystemAPIUser SystemAPIUsers map[string]*internal_authz.SystemAPIUser
Eventstore *eventstore.Config Eventstore *eventstore.Config
Caches *cache.CachesConfig Caches *connector.CachesConfig
Admin admin_es.Config Admin admin_es.Config
Auth auth_es.Config Auth auth_es.Config
@ -128,13 +128,16 @@ func projections(
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, client)
logging.OnError(err).Fatal("unable to start caches")
queries, err := query.StartQueries( queries, err := query.StartQueries(
ctx, ctx,
es, es,
esV4.Querier, esV4.Querier,
client, client,
client, client,
config.Caches, cacheConnectors,
config.Projections, config.Projections,
config.SystemDefaults, config.SystemDefaults,
keys.IDPConfig, keys.IDPConfig,
@ -161,8 +164,9 @@ func projections(
DisplayName: config.WebAuthNName, DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure, ExternalSecure: config.ExternalSecure,
} }
commands, err := command.StartCommands( commands, err := command.StartCommands(ctx,
es, es,
cacheConnectors,
config.SystemDefaults, config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings,
staticStorage, staticStorage,
@ -199,6 +203,7 @@ func projections(
ctx, ctx,
config.Projections.Customizations["notifications"], config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"], config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"], config.Projections.Customizations["telemetry"],
*config.Telemetry, *config.Telemetry,
config.ExternalDomain, config.ExternalDomain,
@ -212,6 +217,8 @@ func projections(
keys.User, keys.User,
keys.SMTP, keys.SMTP,
keys.SMS, keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
) )
config.Auth.Spooler.Client = client config.Auth.Spooler.Client = client

View File

@ -9,6 +9,7 @@ import (
"golang.org/x/text/language" "golang.org/x/text/language"
"github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/crypto"
@ -64,7 +65,9 @@ func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error
return err return err
} }
cmd, err := command.StartCommands(mig.es, cmd, err := command.StartCommands(ctx,
mig.es,
connector.Connectors{},
mig.defaults, mig.defaults,
mig.zitadelRoles, mig.zitadelRoles,
nil, nil,

118
cmd/setup/36.go Normal file
View File

@ -0,0 +1,118 @@
package setup
import (
"context"
_ "embed"
"errors"
"fmt"
"slices"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/repository/milestone"
)
var (
//go:embed 36.sql
getProjectedMilestones string
)
type FillV3Milestones struct {
dbClient *database.DB
eventstore *eventstore.Eventstore
}
type instanceMilestone struct {
Type milestone.Type
Reached time.Time
Pushed *time.Time
}
func (mig *FillV3Milestones) Execute(ctx context.Context, _ eventstore.Event) error {
im, err := mig.getProjectedMilestones(ctx)
if err != nil {
return err
}
return mig.pushEventsByInstance(ctx, im)
}
func (mig *FillV3Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) {
type row struct {
InstanceID string
Type milestone.Type
Reached time.Time
Pushed *time.Time
}
rows, _ := mig.dbClient.Pool.Query(ctx, getProjectedMilestones)
scanned, err := pgx.CollectRows(rows, pgx.RowToStructByPos[row])
var pgError *pgconn.PgError
// catch ERROR: relation "projections.milestones" does not exist
if errors.As(err, &pgError) && pgError.SQLState() == "42P01" {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("milestones get: %w", err)
}
milestoneMap := make(map[string][]instanceMilestone)
for _, s := range scanned {
milestoneMap[s.InstanceID] = append(milestoneMap[s.InstanceID], instanceMilestone{
Type: s.Type,
Reached: s.Reached,
Pushed: s.Pushed,
})
}
return milestoneMap, nil
}
// pushEventsByInstance creates the v2 milestone events by instance.
// This prevents we will try to push 6*N(instance) events in one push.
func (mig *FillV3Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error {
// keep a deterministic order by instance ID.
order := make([]string, 0, len(milestoneMap))
for k := range milestoneMap {
order = append(order, k)
}
slices.Sort(order)
for i, instanceID := range order {
logging.WithFields("instance_id", instanceID, "migration", mig.String(), "progress", fmt.Sprintf("%d/%d", i+1, len(order))).Info("filter existing milestone events")
// because each Push runs in a separate TX, we need to make sure that events
// from a partially executed migration are pushed again.
model := command.NewMilestonesReachedWriteModel(instanceID)
if err := mig.eventstore.FilterToQueryReducer(ctx, model); err != nil {
return fmt.Errorf("milestones filter: %w", err)
}
if model.InstanceCreated {
logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("milestone events already migrated")
continue // This instance was migrated, skip
}
logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("push milestone events")
aggregate := milestone.NewInstanceAggregate(instanceID)
cmds := make([]eventstore.Command, 0, len(milestoneMap[instanceID])*2)
for _, m := range milestoneMap[instanceID] {
cmds = append(cmds, milestone.NewReachedEventWithDate(ctx, aggregate, m.Type, &m.Reached))
if m.Pushed != nil {
cmds = append(cmds, milestone.NewPushedEventWithDate(ctx, aggregate, m.Type, nil, "", m.Pushed))
}
}
if _, err := mig.eventstore.Push(ctx, cmds...); err != nil {
return fmt.Errorf("milestones push: %w", err)
}
}
return nil
}
func (mig *FillV3Milestones) String() string {
return "36_fill_v3_milestones"
}

4
cmd/setup/36.sql Normal file
View File

@ -0,0 +1,4 @@
SELECT instance_id, type, reached_date, last_pushed_date
FROM projections.milestones
WHERE reached_date IS NOT NULL
ORDER BY instance_id, reached_date;

27
cmd/setup/37.go Normal file
View File

@ -0,0 +1,27 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 37.sql
addBackChannelLogoutURI string
)
type Apps7OIDConfigsBackChannelLogoutURI struct {
dbClient *database.DB
}
func (mig *Apps7OIDConfigsBackChannelLogoutURI) Execute(ctx context.Context, _ eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, addBackChannelLogoutURI)
return err
}
func (mig *Apps7OIDConfigsBackChannelLogoutURI) String() string {
return "37_apps7_oidc_configs_add_back_channel_logout_uri"
}

1
cmd/setup/37.sql Normal file
View File

@ -0,0 +1 @@
ALTER TABLE IF EXISTS projections.apps7_oidc_configs ADD COLUMN IF NOT EXISTS back_channel_logout_uri TEXT;

28
cmd/setup/38.go Normal file
View File

@ -0,0 +1,28 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 38.sql
backChannelLogoutCurrentState string
)
type BackChannelLogoutNotificationStart struct {
dbClient *database.DB
esClient *eventstore.Eventstore
}
func (mig *BackChannelLogoutNotificationStart) Execute(ctx context.Context, e eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, backChannelLogoutCurrentState, e.Sequence(), e.CreatedAt(), e.Position())
return err
}
func (mig *BackChannelLogoutNotificationStart) String() string {
return "38_back_channel_logout_notification_start_"
}

20
cmd/setup/38.sql Normal file
View File

@ -0,0 +1,20 @@
INSERT INTO projections.current_states (
instance_id
, projection_name
, last_updated
, sequence
, event_date
, position
, filter_offset
)
SELECT instance_id
, 'projections.notifications_back_channel_logout'
, now()
, $1
, $2
, $3
, 0
FROM eventstore.events2
WHERE aggregate_type = 'instance'
AND event_type = 'instance.added'
ON CONFLICT DO NOTHING;

View File

@ -15,7 +15,7 @@ import (
internal_authz "github.com/zitadel/zitadel/internal/api/authz" internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/oidc" "github.com/zitadel/zitadel/internal/api/oidc"
"github.com/zitadel/zitadel/internal/api/ui/login" "github.com/zitadel/zitadel/internal/api/ui/login"
"github.com/zitadel/zitadel/internal/cache" "github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/hook" "github.com/zitadel/zitadel/internal/config/hook"
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
@ -31,7 +31,7 @@ import (
type Config struct { type Config struct {
ForMirror bool ForMirror bool
Database database.Config Database database.Config
Caches *cache.CachesConfig Caches *connector.CachesConfig
SystemDefaults systemdefaults.SystemDefaults SystemDefaults systemdefaults.SystemDefaults
InternalAuthZ internal_authz.Config InternalAuthZ internal_authz.Config
ExternalDomain string ExternalDomain string
@ -122,6 +122,9 @@ type Steps struct {
s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid
s34AddCacheSchema *AddCacheSchema s34AddCacheSchema *AddCacheSchema
s35AddPositionToIndexEsWm *AddPositionToIndexEsWm s35AddPositionToIndexEsWm *AddPositionToIndexEsWm
s36FillV2Milestones *FillV3Milestones
s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI
s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart
} }
func MustNewSteps(v *viper.Viper) *Steps { func MustNewSteps(v *viper.Viper) *Steps {

View File

@ -3,6 +3,7 @@ package setup
import ( import (
"context" "context"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
@ -31,8 +32,9 @@ func (mig *externalConfigChange) Check(lastRun map[string]interface{}) bool {
} }
func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error { func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error {
cmd, err := command.StartCommands( cmd, err := command.StartCommands(ctx,
mig.es, mig.es,
connector.Connectors{},
mig.defaults, mig.defaults,
nil, nil,
nil, nil,

View File

@ -22,6 +22,7 @@ import (
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/authz" "github.com/zitadel/zitadel/internal/authz"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database" cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
"github.com/zitadel/zitadel/internal/database" "github.com/zitadel/zitadel/internal/database"
@ -165,6 +166,9 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient} steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient}
steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient} steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient}
steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient} steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient}
steps.s36FillV2Milestones = &FillV3Milestones{dbClient: queryDBClient, eventstore: eventstoreClient}
steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient}
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient}
err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil) err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections") logging.OnError(err).Fatal("unable to start projections")
@ -209,6 +213,8 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s30FillFieldsForOrgDomainVerified, steps.s30FillFieldsForOrgDomainVerified,
steps.s34AddCacheSchema, steps.s34AddCacheSchema,
steps.s35AddPositionToIndexEsWm, steps.s35AddPositionToIndexEsWm,
steps.s36FillV2Milestones,
steps.s38BackChannelLogoutNotificationStart,
} { } {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed") mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
} }
@ -225,6 +231,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s27IDPTemplate6SAMLNameIDFormat, steps.s27IDPTemplate6SAMLNameIDFormat,
steps.s32AddAuthSessionID, steps.s32AddAuthSessionID,
steps.s33SMSConfigs3TwilioAddVerifyServiceSid, steps.s33SMSConfigs3TwilioAddVerifyServiceSid,
steps.s37Apps7OIDConfigsBackChannelLogoutURI,
} { } {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed") mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
} }
@ -340,13 +347,17 @@ func initProjections(
} }
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
logging.OnError(err).Fatal("unable to start caches")
queries, err := query.StartQueries( queries, err := query.StartQueries(
ctx, ctx,
eventstoreClient, eventstoreClient,
eventstoreV4.Querier, eventstoreV4.Querier,
queryDBClient, queryDBClient,
projectionDBClient, projectionDBClient,
config.Caches, cacheConnectors,
config.Projections, config.Projections,
config.SystemDefaults, config.SystemDefaults,
keys.IDPConfig, keys.IDPConfig,
@ -388,8 +399,9 @@ func initProjections(
permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) { permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID) return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
} }
commands, err := command.StartCommands( commands, err := command.StartCommands(ctx,
eventstoreClient, eventstoreClient,
cacheConnectors,
config.SystemDefaults, config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings,
staticStorage, staticStorage,
@ -421,6 +433,7 @@ func initProjections(
ctx, ctx,
config.Projections.Customizations["notifications"], config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"], config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"], config.Projections.Customizations["telemetry"],
*config.Telemetry, *config.Telemetry,
config.ExternalDomain, config.ExternalDomain,
@ -434,6 +447,8 @@ func initProjections(
keys.User, keys.User,
keys.SMTP, keys.SMTP,
keys.SMS, keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
) )
for _, p := range notify_handler.Projections() { for _, p := range notify_handler.Projections() {
err := migration.Migrate(ctx, eventstoreClient, p) err := migration.Migrate(ctx, eventstoreClient, p)

View File

@ -18,7 +18,7 @@ import (
"github.com/zitadel/zitadel/internal/api/ui/console" "github.com/zitadel/zitadel/internal/api/ui/console"
"github.com/zitadel/zitadel/internal/api/ui/login" "github.com/zitadel/zitadel/internal/api/ui/login"
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing" auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
"github.com/zitadel/zitadel/internal/cache" "github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/hook" "github.com/zitadel/zitadel/internal/config/hook"
"github.com/zitadel/zitadel/internal/config/network" "github.com/zitadel/zitadel/internal/config/network"
@ -49,7 +49,7 @@ type Config struct {
HTTP1HostHeader string HTTP1HostHeader string
WebAuthNName string WebAuthNName string
Database database.Config Database database.Config
Caches *cache.CachesConfig Caches *connector.CachesConfig
Tracing tracing.Config Tracing tracing.Config
Metrics metrics.Config Metrics metrics.Config
Profiler profiler.Config Profiler profiler.Config

View File

@ -69,6 +69,7 @@ import (
"github.com/zitadel/zitadel/internal/authz" "github.com/zitadel/zitadel/internal/authz"
authz_repo "github.com/zitadel/zitadel/internal/authz/repository" authz_repo "github.com/zitadel/zitadel/internal/authz/repository"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore" authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command" "github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/crypto" "github.com/zitadel/zitadel/internal/crypto"
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database" cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
@ -177,6 +178,10 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
})) }))
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC) sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
if err != nil {
return fmt.Errorf("unable to start caches: %w", err)
}
queries, err := query.StartQueries( queries, err := query.StartQueries(
ctx, ctx,
@ -184,7 +189,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
eventstoreV4.Querier, eventstoreV4.Querier,
queryDBClient, queryDBClient,
projectionDBClient, projectionDBClient,
config.Caches, cacheConnectors,
config.Projections, config.Projections,
config.SystemDefaults, config.SystemDefaults,
keys.IDPConfig, keys.IDPConfig,
@ -222,8 +227,9 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
DisplayName: config.WebAuthNName, DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure, ExternalSecure: config.ExternalSecure,
} }
commands, err := command.StartCommands( commands, err := command.StartCommands(ctx,
eventstoreClient, eventstoreClient,
cacheConnectors,
config.SystemDefaults, config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings, config.InternalAuthZ.RolePermissionMappings,
storage, storage,
@ -269,6 +275,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
ctx, ctx,
config.Projections.Customizations["notifications"], config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"], config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"], config.Projections.Customizations["telemetry"],
*config.Telemetry, *config.Telemetry,
config.ExternalDomain, config.ExternalDomain,
@ -282,6 +289,8 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
keys.User, keys.User,
keys.SMTP, keys.SMTP,
keys.SMS, keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
) )
notification.Start(ctx) notification.Start(ctx)

View File

@ -155,7 +155,7 @@
> >
<cnsl-form-field class="lifetime-form-field" label="Password Check Lifetime" required="true"> <cnsl-form-field class="lifetime-form-field" label="Password Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label> <cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="passwordCheckLifetime" formControlName="passwordCheckLifetime" /> <input cnslInput type="number" name="passwordCheckLifetime" formControlName="passwordCheckLifetime" min="1" step="1" />
</cnsl-form-field> </cnsl-form-field>
</cnsl-card> </cnsl-card>
<cnsl-card <cnsl-card
@ -164,7 +164,14 @@
> >
<cnsl-form-field class="lifetime-form-field" label="external Login Check Lifetime" required="true"> <cnsl-form-field class="lifetime-form-field" label="external Login Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label> <cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="externalLoginCheckLifetime" formControlName="externalLoginCheckLifetime" /> <input
cnslInput
type="number"
name="externalLoginCheckLifetime"
formControlName="externalLoginCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field> </cnsl-form-field>
</cnsl-card> </cnsl-card>
<cnsl-card <cnsl-card
@ -173,7 +180,7 @@
> >
<cnsl-form-field class="lifetime-form-field" label="MFA Init Skip Lifetime" required="true"> <cnsl-form-field class="lifetime-form-field" label="MFA Init Skip Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label> <cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="mfaInitSkipLifetime" formControlName="mfaInitSkipLifetime" /> <input cnslInput type="number" name="mfaInitSkipLifetime" formControlName="mfaInitSkipLifetime" min="0" step="1" />
</cnsl-form-field> </cnsl-form-field>
</cnsl-card> </cnsl-card>
<cnsl-card <cnsl-card
@ -182,16 +189,30 @@
> >
<cnsl-form-field class="lifetime-form-field" label="Second Factor Check Lifetime" required="true"> <cnsl-form-field class="lifetime-form-field" label="Second Factor Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label> <cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="secondFactorCheckLifetime" formControlName="secondFactorCheckLifetime" /> <input
cnslInput
type="number"
name="secondFactorCheckLifetime"
formControlName="secondFactorCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field> </cnsl-form-field>
</cnsl-card> </cnsl-card>
<cnsl-card <cnsl-card
[title]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.TITLE' | translate" [title]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.TITLE' | translate"
[description]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.DESCRIPTION' | translate" [description]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.DESCRIPTION' | translate"
> >
<cnsl-form-field class="lifetime-form-field" label="Multi Factor Check Lifetime" required="true"> <cnsl-form-field class="lifetime-form-field" label="Multi-factor Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label> <cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="multiFactorCheckLifetime" formControlName="multiFactorCheckLifetime" /> <input
cnslInput
type="number"
name="multiFactorCheckLifetime"
formControlName="multiFactorCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field> </cnsl-form-field>
</cnsl-card> </cnsl-card>
</form> </form>

View File

@ -760,7 +760,7 @@
"3": "Deleted" "3": "Deleted"
}, },
"DIALOG": { "DIALOG": {
"MFA_DELETE_TITLE": "Remove Secondfactor", "MFA_DELETE_TITLE": "Remove Second factor",
"MFA_DELETE_DESCRIPTION": "You are about to delete a second factor. Are you sure?", "MFA_DELETE_DESCRIPTION": "You are about to delete a second factor. Are you sure?",
"ADD_MFA_TITLE": "Add Second Factor", "ADD_MFA_TITLE": "Add Second Factor",
"ADD_MFA_DESCRIPTION": "Select one of the following options." "ADD_MFA_DESCRIPTION": "Select one of the following options."
@ -773,9 +773,9 @@
"IDPNAME": "IDP Name", "IDPNAME": "IDP Name",
"USERDISPLAYNAME": "External Name", "USERDISPLAYNAME": "External Name",
"EXTERNALUSERID": "External User ID", "EXTERNALUSERID": "External User ID",
"EMPTY": "No external IDP found", "EMPTY": "No external IdP found",
"DIALOG": { "DIALOG": {
"DELETE_TITLE": "Remove IDP", "DELETE_TITLE": "Remove IdP",
"DELETE_DESCRIPTION": "You are about to delete an Identity Provider from a user. Do you really want to continue?" "DELETE_DESCRIPTION": "You are about to delete an Identity Provider from a user. Do you really want to continue?"
} }
}, },
@ -1691,7 +1691,7 @@
"username": "Username", "username": "Username",
"tempUsername": "Temp username", "tempUsername": "Temp username",
"otp": "One-time password", "otp": "One-time password",
"verifyUrl": "Verify One-time-password URL", "verifyUrl": "Verify One-time password URL",
"expiry": "Expiry", "expiry": "Expiry",
"applicationName": "Application name" "applicationName": "Application name"
}, },
@ -2154,7 +2154,7 @@
"PREFERREDLANGUAGEATTRIBUTE": "Preferred language attribute", "PREFERREDLANGUAGEATTRIBUTE": "Preferred language attribute",
"PREFERREDUSERNAMEATTRIBUTE": "Preferred username attribute", "PREFERREDUSERNAMEATTRIBUTE": "Preferred username attribute",
"PROFILEATTRIBUTE": "Profile attribute", "PROFILEATTRIBUTE": "Profile attribute",
"IDPDISPLAYNAMMAPPING": "IDP Display Name Mapping", "IDPDISPLAYNAMMAPPING": "IdP Display Name Mapping",
"USERNAMEMAPPING": "Username Mapping", "USERNAMEMAPPING": "Username Mapping",
"DATES": "Dates", "DATES": "Dates",
"CREATIONDATE": "Created At", "CREATIONDATE": "Created At",
@ -2162,13 +2162,13 @@
"DEACTIVATE": "Deactivate", "DEACTIVATE": "Deactivate",
"ACTIVATE": "Activate", "ACTIVATE": "Activate",
"DELETE": "Delete", "DELETE": "Delete",
"DELETE_TITLE": "Delete IDP", "DELETE_TITLE": "Delete IdP",
"DELETE_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?", "DELETE_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?",
"REMOVE_WARN_TITLE": "Remove IDP", "REMOVE_WARN_TITLE": "Remove IdP",
"REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IDP for your users and already registered users won't be able to login again. Are you sure to continue?", "REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IdP for your users and already registered users won't be able to login again. Are you sure to continue?",
"DELETE_SELECTION_TITLE": "Delete IDP", "DELETE_SELECTION_TITLE": "Delete IdP",
"DELETE_SELECTION_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?", "DELETE_SELECTION_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?",
"EMPTY": "No IDP available", "EMPTY": "No IdP available",
"OIDC": { "OIDC": {
"GENERAL": "General Information", "GENERAL": "General Information",
"TITLE": "OIDC Configuration", "TITLE": "OIDC Configuration",

View File

@ -80,7 +80,7 @@ Use the function that reflects your log level.
### Example ### Example
```js ```js
logger.info("This is an info log.") logger.log("This is an info log.")
logger.warn("This is a warn log.") logger.warn("This is a warn log.")

View File

@ -92,7 +92,7 @@ Some secrets cannot be hashed because they need to be used in their raw form. Th
- Federation - Federation
- Client Secrets of Identity Providers (IdPs) - Client Secrets of Identity Providers (IdPs)
- Multi Factor Authentication - Multi-factor Authentication
- TOTP Seed Values - TOTP Seed Values
- Validation Secrets - Validation Secrets
- Verifying contact information like eMail, Phonenumbers - Verifying contact information like eMail, Phonenumbers

View File

@ -144,4 +144,4 @@ The storage layer of ZITADEL is responsible for multiple tasks. For example:
- Backup and restore operation for disaster recovery purpose - Backup and restore operation for disaster recovery purpose
ZITADEL currently supports PostgreSQL and CockroachDB.. ZITADEL currently supports PostgreSQL and CockroachDB..
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide on using one of them. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide on using one of them.

View File

@ -11,7 +11,7 @@ Since the storage layer takes the heavy lifting of making sure that data in sync
Depending on your projects needs our general recommendation is to run ZITADEL and ZITADELs storage layer across multiple availability zones in the same region or if you need higher guarantees run the storage layer across multiple regions. Depending on your projects needs our general recommendation is to run ZITADEL and ZITADELs storage layer across multiple availability zones in the same region or if you need higher guarantees run the storage layer across multiple regions.
Consult the [CockroachDB documentation](https://www.cockroachlabs.com/docs/) for more details or use the [CockroachCloud Service](https://www.cockroachlabs.com/docs/cockroachcloud/create-an-account.html) Consult the [CockroachDB documentation](https://www.cockroachlabs.com/docs/) for more details or use the [CockroachCloud Service](https://www.cockroachlabs.com/docs/cockroachcloud/create-an-account.html)
Alternatively you can run ZITADEL also with Postgres which is [Enterprise Supported](/docs/support/software-release-cycles-support#partially-supported). Alternatively you can run ZITADEL also with Postgres which is [Enterprise Supported](/docs/support/software-release-cycles-support#partially-supported).
Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it. Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-postgresql) before you decide to use it.
## Scalability ## Scalability

View File

@ -36,5 +36,5 @@ Possible conditions for the Execution:
## Further reading ## Further reading
- [Actions v2 example execution locally](/apis/actionsv2/execution-local) - [Actions v2 reference](/apis/actions/v3/usage)
- [Actions v2 reference](/apis/actionsv2/introduction#action) - [Actions v2 example execution locally](/apis/actions/v3/testing-locally)

View File

@ -14,7 +14,7 @@ curl --request GET \
``` ```
Response Example: Response Example:
The relevant part for the list is the second factor and multi factor list. The relevant part for the list is the second factor and multi-factor list.
```bash ```bash
{ {

View File

@ -30,7 +30,7 @@ ZITADEL supports different Methods:
### Start TOTP Registration ### Start TOTP Registration
The user has selected to setup Time-based One-Time-Password (TOTP). The user has selected to setup Time-based One-Time Password (TOTP).
To show the user the QR to register TOTP with his Authenticator App like Google/Microsoft Authenticator or Authy you have to start the registration on the ZITADEL API. To show the user the QR to register TOTP with his Authenticator App like Google/Microsoft Authenticator or Authy you have to start the registration on the ZITADEL API.
Generate the QR Code with the URI from the response. Generate the QR Code with the URI from the response.
For users that do not have a QR Code reader make sure to also show the secret, to enable manual configuration. For users that do not have a QR Code reader make sure to also show the secret, to enable manual configuration.
@ -485,7 +485,7 @@ You have successfully registered a new U2F to the user.
### Check User ### Check User
To be able to check the Universal-Second-Factor (U2F) you need a user check and a webAuthN challenge. To be able to check the Universal Second Factor (U2F) you need a user check and a webAuthN challenge.
In the creat session request you can check for the user and directly initiate the webAuthN challenge. In the creat session request you can check for the user and directly initiate the webAuthN challenge.
For U2F you can choose between "USER_VERIFICATION_REQUIREMENT_PREFERRED" and "USER_VERIFICATION_REQUIREMENT_DISCOURAGED" for the challenge. For U2F you can choose between "USER_VERIFICATION_REQUIREMENT_PREFERRED" and "USER_VERIFICATION_REQUIREMENT_DISCOURAGED" for the challenge.

View File

@ -12,7 +12,7 @@ You need to give a user the [manager role](/docs/guides/manage/console/managers)
If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview). If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview).
## Request Events ## Request Events
Call the [ListEvents](/apis/resources/admin) enpoint in the Administration API to get all the events you need. Call the [ListEvents](/apis/resources/admin) endpoint in the Administration API to get all the events you need.
To further restrict your result you can add the following filters: To further restrict your result you can add the following filters:
- sequence - sequence
- editor user id - editor user id
@ -139,10 +139,10 @@ curl --request POST \
The following example shows you how you could use the events search to find out the failed login attempts of your users. The following example shows you how you could use the events search to find out the failed login attempts of your users.
You have to include all the event types that tell you that a login attempt has failed. You have to include all the event types that tell you that a login attempt has failed.
In this case this are the following events: In this case these are the following events:
- Password verification failed - Password verification failed
- One-time-password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc) - One-time password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc)
- Universal-Second-Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc) - Universal Second Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc)
- Passwordless/Passkey check failed (FaceID, WindowsHello, FingerPrint, etc) - Passwordless/Passkey check failed (FaceID, WindowsHello, FingerPrint, etc)
```bash ```bash

View File

@ -178,10 +178,10 @@ Multifactors:
- U2F (Universal Second Factor) with PIN, e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey - U2F (Universal Second Factor) with PIN, e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey
Secondfactors (2FA): Second factors (2FA):
- Time-based One Time Password (TOTP), Authenticator Apps like Google/Microsoft Authenticator, Authy, etc. - Time-based One Time Password (TOTP), Authenticator Apps like Google/Microsoft Authenticator, Authy, etc.
- Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey - Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardware tokens like Yubikey
- One Time Password with Email (Email OTP) - One Time Password with Email (Email OTP)
- One Time Password with SMS (SMS OTP) - One Time Password with SMS (SMS OTP)
@ -195,9 +195,9 @@ Configure the different lifetimes checks for the login process:
- **Password Check Lifetime** specifies after which period a user has to reenter his password during the login process - **Password Check Lifetime** specifies after which period a user has to reenter his password during the login process
- **External Login Check Lifetime** specifies after which period a user will be redirected to the IDP during the login process - **External Login Check Lifetime** specifies after which period a user will be redirected to the IDP during the login process
- **Multifactor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi Factor during the login process (value 0 will deactivate the prompt) - **Multi-factor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi-factor during the login process (value 0 will deactivate the prompt)
- **Second Factor Check Lifetime** specifies after which period a user has to revalidate the 2-Factor during the login process - **Second Factor Check Lifetime** specifies after which period a user has to revalidate the 2-Factor during the login process
- **Multifactor Login Check Lifetime** specifies after which period a user has to revalidate the Multi Factor during the login process - **Multi-factor Login Check Lifetime** specifies after which period a user has to revalidate the Multi-factor during the login process
## Identity Providers ## Identity Providers

View File

@ -16,7 +16,7 @@ The following scripts don't include:
- Global policies - Global policies
- IAM members - IAM members
- Global IDPs - Global IDPs
- Global second/multi factors - Global second factor / multi-factors
- Machine keys - Machine keys
- Personal Access Tokens - Personal Access Tokens
- Application keys - Application keys

View File

@ -173,7 +173,7 @@ In case the hashes can't be transferred directly, you always have the option to
If your legacy system receives the passwords in clear text (eg, login form) you could also directly create users via ZITADEL API. If your legacy system receives the passwords in clear text (eg, login form) you could also directly create users via ZITADEL API.
We will explain this pattern in more detail in this guide. We will explain this pattern in more detail in this guide.
### One-time-passwords (OTP) ### One-time passwords (OTP)
You can pass the OTP secret when creating users: You can pass the OTP secret when creating users:

View File

@ -75,12 +75,6 @@ Data location refers to a region, consisting of one or many countries or territo
We can not guarantee that during transit the data will only remain within this region. We take measures, as outlined in our [privacy policy](../policies/privacy-policy), to protect your data in transit and in rest. We can not guarantee that during transit the data will only remain within this region. We take measures, as outlined in our [privacy policy](../policies/privacy-policy), to protect your data in transit and in rest.
The following regions will be available when using our cloud service. This list is for informational purposes and will be updated in due course, please refer to our website for all available regions at this time.
- **Global**: All available cloud regions offered by our cloud provider
- **Switzerland**: Exclusively on Swiss region
- **GDPR safe countries**: Hosting location is within any of the EU member states and [Adequate Countries](https://ec.europa.eu/info/law/law-topic/data-protection/international-dimension-data-protection/adequacy-decisions_en) as recognized by the European Commission under the GDPR
## Backup ## Backup
Our backup strategy executes daily full backups and differential backups on much higher frequency. Our backup strategy executes daily full backups and differential backups on much higher frequency.

View File

@ -51,6 +51,9 @@ By executing the commands below, you will download the following file:
# Download the docker compose example configuration. # Download the docker compose example configuration.
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose-sa.yaml -O docker-compose.yaml wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose-sa.yaml -O docker-compose.yaml
# create the machine key directory
mkdir machinekey
# Run the database and application containers. # Run the database and application containers.
docker compose up --detach docker compose up --detach

View File

@ -1,27 +1,27 @@
version: '3.8'
services: services:
zitadel: zitadel:
# The user should have the permission to write to ./machinekey
user: "${UID:-1000}"
restart: 'always' restart: 'always'
networks: networks:
- 'zitadel' - 'zitadel'
image: 'ghcr.io/zitadel/zitadel:latest' image: 'ghcr.io/zitadel/zitadel:latest'
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled' command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
environment: environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db' ZITADEL_DATABASE_POSTGRES_HOST: db
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432' ZITADEL_DATABASE_POSTGRES_PORT: 5432
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
- 'ZITADEL_EXTERNALSECURE=false' ZITADEL_EXTERNALSECURE: false
- 'ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH=/machinekey/zitadel-admin-sa.json' ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH: /machinekey/zitadel-admin-sa.json
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME=zitadel-admin-sa' ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME: zitadel-admin-sa
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME=Admin' ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME: Admin
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE=1' ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE: 1
depends_on: depends_on:
db: db:
condition: 'service_healthy' condition: 'service_healthy'
@ -34,12 +34,12 @@ services:
restart: 'always' restart: 'always'
image: postgres:16-alpine image: postgres:16-alpine
environment: environment:
- POSTGRES_USER=postgres PGUSER: postgres
- POSTGRES_PASSWORD=postgres POSTGRES_PASSWORD: postgres
networks: networks:
- 'zitadel' - 'zitadel'
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"] test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
interval: '10s' interval: '10s'
timeout: '30s' timeout: '30s'
retries: 5 retries: 5

View File

@ -1,5 +1,3 @@
version: '3.8'
services: services:
zitadel: zitadel:
restart: 'always' restart: 'always'
@ -8,16 +6,16 @@ services:
image: 'ghcr.io/zitadel/zitadel:latest' image: 'ghcr.io/zitadel/zitadel:latest'
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled' command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
environment: environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db' ZITADEL_DATABASE_POSTGRES_HOST: db
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432' ZITADEL_DATABASE_POSTGRES_PORT: 5432
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel' ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel' ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel' ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres' ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable' ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
- 'ZITADEL_EXTERNALSECURE=false' ZITADEL_EXTERNALSECURE: false
depends_on: depends_on:
db: db:
condition: 'service_healthy' condition: 'service_healthy'
@ -28,9 +26,8 @@ services:
restart: 'always' restart: 'always'
image: postgres:16-alpine image: postgres:16-alpine
environment: environment:
- POSTGRES_USER=postgres PGUSER: postgres
- POSTGRES_PASSWORD=postgres POSTGRES_PASSWORD: postgres
- POSTGRES_DB=zitadel
networks: networks:
- 'zitadel' - 'zitadel'
healthcheck: healthcheck:

View File

@ -14,7 +14,7 @@ Choose your platform and run ZITADEL with the most minimal configuration possibl
## Prerequisites ## Prerequisites
- For test environments, ZITADEL does not need many resources, 1 CPU and 512MB memory are more than enough. (With more CPU, the password hashing might be faster) - For test environments, ZITADEL does not need many resources, 1 CPU and 512MB memory are more than enough. (With more CPU, the password hashing might be faster)
- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use Postgresql. - A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide to use Postgresql.
## Releases ## Releases

View File

@ -1,7 +1,6 @@
## ZITADEL with Postgres ## ZITADEL with Postgres
If you want to use a PostgreSQL database you can [overwrite the default configuration](../configure/configure.mdx). If you want to use a PostgreSQL database you can [overwrite the default configuration](../configure/configure.mdx).
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it.
Currently versions >= 14 are supported. Currently versions >= 14 are supported.

View File

@ -109,17 +109,16 @@ but in the Projections.Customizations.Telemetry section
## Database ## Database
### Prefer CockroachDB ### Prefer PostgreSQL
ZITADEL supports [CockroachDB](https://www.cockroachlabs.com/) and [PostgreSQL](https://www.postgresql.org/). ZITADEL supports [CockroachDB](https://www.cockroachlabs.com/) and [PostgreSQL](https://www.postgresql.org/).
We recommend using CockroachDB, We recommend using PostgreSQL, as it is the better choice when you want to prioritize performance and latency.
as horizontal scaling is much easier than with PostgreSQL.
Also, if you are concerned about multi-regional data locality, However, if [multi-regional data locality](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html) is a critical requirement, CockroachDB might be a suitable option.
[the way to go is with CockroachDB](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html).
The indexes for the database are optimized using load tests from [ZITADEL Cloud](https://zitadel.com), The indexes for the database are optimized using load tests from [ZITADEL Cloud](https://zitadel.com),
which runs with CockroachDB. which runs with PostgreSQL.
If you identify problems with your Postgresql during load tests that indicate that the indexes are not optimized, If you identify problems with your CockroachDB during load tests that indicate that the indexes are not optimized,
please create an issue in our [github repository](https://github.com/zitadel/zitadel). please create an issue in our [github repository](https://github.com/zitadel/zitadel).
### Configure ZITADEL ### Configure ZITADEL
@ -128,7 +127,7 @@ Depending on your environment, you maybe would want to tweak some settings about
```yaml ```yaml
Database: Database:
cockroach: postgres:
Host: localhost Host: localhost
Port: 26257 Port: 26257
Database: zitadel Database: zitadel
@ -140,6 +139,7 @@ Database:
Options: "" Options: ""
``` ```
You also might want to configure how [projections](/concepts/eventstore/implementation#projections) are computed. These are the default values: You also might want to configure how [projections](/concepts/eventstore/implementation#projections) are computed. These are the default values:
```yaml ```yaml

View File

@ -7,19 +7,19 @@ services:
service: zitadel-init service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment: environment:
- ZITADEL_EXTERNALPORT=80 ZITADEL_EXTERNALPORT: 80
- ZITADEL_EXTERNALSECURE=false ZITADEL_EXTERNALSECURE: false
- ZITADEL_TLS_ENABLED=false ZITADEL_TLS_ENABLED: false
# database configuration # database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db ZITADEL_DATABASE_POSTGRES_HOST: db
- ZITADEL_DATABASE_POSTGRES_PORT=5432 ZITADEL_DATABASE_POSTGRES_PORT: 5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks: networks:
- 'zitadel' - 'zitadel'
depends_on: depends_on:
@ -33,19 +33,19 @@ services:
service: zitadel-init service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment: environment:
- ZITADEL_EXTERNALPORT=443 ZITADEL_EXTERNALPORT: 443
- ZITADEL_EXTERNALSECURE=true ZITADEL_EXTERNALSECURE: true
- ZITADEL_TLS_ENABLED=false ZITADEL_TLS_ENABLED: false
# database configuration # database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db ZITADEL_DATABASE_POSTGRES_HOST: db
- ZITADEL_DATABASE_POSTGRES_PORT=5432 ZITADEL_DATABASE_POSTGRES_PORT: 5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks: networks:
- 'zitadel' - 'zitadel'
depends_on: depends_on:
@ -59,21 +59,21 @@ services:
service: zitadel-init service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml' command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment: environment:
- ZITADEL_EXTERNALPORT=443 ZITADEL_EXTERNALPORT: 443
- ZITADEL_EXTERNALSECURE=true ZITADEL_EXTERNALSECURE: true
- ZITADEL_TLS_ENABLED=true ZITADEL_TLS_ENABLED: true
- ZITADEL_TLS_CERTPATH=/etc/certs/selfsigned.crt ZITADEL_TLS_CERTPATH: /etc/certs/selfsigned.crt
- ZITADEL_TLS_KEYPATH=/etc/certs/selfsigned.key ZITADEL_TLS_KEYPATH: /etc/certs/selfsigned.key
# database configuration # database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db ZITADEL_DATABASE_POSTGRES_HOST: db
- ZITADEL_DATABASE_POSTGRES_PORT=5432 ZITADEL_DATABASE_POSTGRES_PORT: 5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
volumes: volumes:
- ./selfsigned.crt:/etc/certs/selfsigned.crt - ./selfsigned.crt:/etc/certs/selfsigned.crt
- ./selfsigned.key:/etc/certs/selfsigned.key - ./selfsigned.key:/etc/certs/selfsigned.key
@ -96,22 +96,22 @@ services:
# Using an external domain other than localhost proofs, that the proxy configuration works. # Using an external domain other than localhost proofs, that the proxy configuration works.
# If ZITADEL can't resolve a requests original host to this domain, # If ZITADEL can't resolve a requests original host to this domain,
# it will return a 404 Instance not found error. # it will return a 404 Instance not found error.
- ZITADEL_EXTERNALDOMAIN=127.0.0.1.sslip.io ZITADEL_EXTERNALDOMAIN: 127.0.0.1.sslip.io
# In case something doesn't work as expected, # In case something doesn't work as expected,
# it can be handy to be able to read the access logs. # it can be handy to be able to read the access logs.
- ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED=true ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED: true
# For convenience, ZITADEL should not ask to change the initial admin users password. # For convenience, ZITADEL should not ask to change the initial admin users password.
- ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED: false
# database configuration # database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db ZITADEL_DATABASE_POSTGRES_HOST: db
- ZITADEL_DATABASE_POSTGRES_PORT=5432 ZITADEL_DATABASE_POSTGRES_PORT: 5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks: networks:
- 'zitadel' - 'zitadel'
healthcheck: healthcheck:
@ -125,10 +125,10 @@ services:
restart: 'always' restart: 'always'
image: postgres:16-alpine image: postgres:16-alpine
environment: environment:
- POSTGRES_USER=root PGUSER: root
- POSTGRES_PASSWORD=postgres POSTGRES_PASSWORD: postgres
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"] test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
interval: 5s interval: 5s
timeout: 60s timeout: 60s
retries: 10 retries: 10

View File

@ -6068,9 +6068,9 @@ http-parser-js@>=0.5.1:
integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q== integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==
http-proxy-middleware@^2.0.3: http-proxy-middleware@^2.0.3:
version "2.0.6" version "2.0.7"
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6"
integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==
dependencies: dependencies:
"@types/http-proxy" "^1.17.8" "@types/http-proxy" "^1.17.8"
http-proxy "^1.18.1" http-proxy "^1.18.1"

39
go.mod
View File

@ -10,6 +10,7 @@ require (
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.24.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.24.0
github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/squirrel v1.5.4
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b
github.com/alicebob/miniredis/v2 v2.33.0
github.com/benbjohnson/clock v1.3.5 github.com/benbjohnson/clock v1.3.5
github.com/boombuler/barcode v1.0.2 github.com/boombuler/barcode v1.0.2
github.com/brianvoe/gofakeit/v6 v6.28.0 github.com/brianvoe/gofakeit/v6 v6.28.0
@ -34,7 +35,7 @@ require (
github.com/gorilla/websocket v1.4.1 github.com/gorilla/websocket v1.4.1
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0
github.com/h2non/gock v1.2.0 github.com/h2non/gock v1.2.0
github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/improbable-eng/grpc-web v0.15.0 github.com/improbable-eng/grpc-web v0.15.0
@ -52,7 +53,8 @@ require (
github.com/pashagolub/pgxmock/v4 v4.3.0 github.com/pashagolub/pgxmock/v4 v4.3.0
github.com/pquerna/otp v1.4.0 github.com/pquerna/otp v1.4.0
github.com/rakyll/statik v0.1.7 github.com/rakyll/statik v0.1.7
github.com/rs/cors v1.11.0 github.com/redis/go-redis/v9 v9.7.0
github.com/rs/cors v1.11.1
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
github.com/sony/sonyflake v1.2.0 github.com/sony/sonyflake v1.2.0
github.com/spf13/cobra v1.8.1 github.com/spf13/cobra v1.8.1
@ -62,29 +64,29 @@ require (
github.com/ttacon/libphonenumber v1.2.1 github.com/ttacon/libphonenumber v1.2.1
github.com/twilio/twilio-go v1.22.2 github.com/twilio/twilio-go v1.22.2
github.com/zitadel/logging v0.6.1 github.com/zitadel/logging v0.6.1
github.com/zitadel/oidc/v3 v3.28.1 github.com/zitadel/oidc/v3 v3.32.0
github.com/zitadel/passwap v0.6.0 github.com/zitadel/passwap v0.6.0
github.com/zitadel/saml v0.2.0 github.com/zitadel/saml v0.2.0
github.com/zitadel/schema v1.3.0 github.com/zitadel/schema v1.3.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel v1.29.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
go.opentelemetry.io/otel/exporters/prometheus v0.50.0 go.opentelemetry.io/otel/exporters/prometheus v0.50.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0
go.opentelemetry.io/otel/metric v1.28.0 go.opentelemetry.io/otel/metric v1.29.0
go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/sdk v1.29.0
go.opentelemetry.io/otel/sdk/metric v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.29.0
go.opentelemetry.io/otel/trace v1.28.0 go.opentelemetry.io/otel/trace v1.29.0
go.uber.org/mock v0.4.0 go.uber.org/mock v0.4.0
golang.org/x/crypto v0.27.0 golang.org/x/crypto v0.27.0
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/net v0.26.0 golang.org/x/net v0.28.0
golang.org/x/oauth2 v0.22.0 golang.org/x/oauth2 v0.23.0
golang.org/x/sync v0.8.0 golang.org/x/sync v0.8.0
golang.org/x/text v0.18.0 golang.org/x/text v0.19.0
google.golang.org/api v0.187.0 google.golang.org/api v0.187.0
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd
google.golang.org/grpc v1.65.0 google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2 google.golang.org/protobuf v1.34.2
sigs.k8s.io/yaml v1.4.0 sigs.k8s.io/yaml v1.4.0
@ -94,8 +96,10 @@ require (
cloud.google.com/go/auth v0.6.1 // indirect cloud.google.com/go/auth v0.6.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
github.com/crewjam/httperr v0.2.0 // indirect github.com/crewjam/httperr v0.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/go-ini/ini v1.67.0 // indirect github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
@ -121,11 +125,12 @@ require (
github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
github.com/zenazn/goji v1.0.1 // indirect github.com/zenazn/goji v1.0.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
) )
require ( require (
@ -197,7 +202,7 @@ require (
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/sys v0.25.0 golang.org/x/sys v0.25.0
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect

82
go.sum
View File

@ -56,6 +56,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA=
github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0=
github.com/amdonov/xmlsig v0.1.0 h1:i0iQ3neKLmUhcfIRgiiR3eRPKgXZj+n5lAfqnfKoeXI= github.com/amdonov/xmlsig v0.1.0 h1:i0iQ3neKLmUhcfIRgiiR3eRPKgXZj+n5lAfqnfKoeXI=
github.com/amdonov/xmlsig v0.1.0/go.mod h1:jTR/jO0E8fSl/cLvMesP+RjxyV4Ux4WL1Ip64ZnQpA0= github.com/amdonov/xmlsig v0.1.0/go.mod h1:jTR/jO0E8fSl/cLvMesP+RjxyV4Ux4WL1Ip64ZnQpA0=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
@ -80,13 +84,17 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMcJ4= github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMcJ4=
github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
@ -127,6 +135,8 @@ github.com/descope/virtualwebauthn v1.0.2/go.mod h1:iJvinjD1iZYqQ09J5lF0+795OdDb
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg= github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg=
@ -354,8 +364,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
@ -620,6 +630,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -628,8 +640,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys= github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys=
@ -719,12 +731,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8= github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8=
github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y=
github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow=
github.com/zitadel/oidc/v3 v3.28.1 h1:PsbFm5CzEMQq9HBXUNJ8yvnWmtVYxpwV5Cinj7TTsHo= github.com/zitadel/oidc/v3 v3.32.0 h1:Mw0EPZRC6h+OXAuT0Uk2BZIjJQNHLqUpaJCm6c3IByc=
github.com/zitadel/oidc/v3 v3.28.1/go.mod h1:WmDFu3dZ9YNKrIoZkmxjGG8QyUR4PbbhsVVSY+rpojM= github.com/zitadel/oidc/v3 v3.32.0/go.mod h1:DyE/XClysRK/ozFaZSqlYamKVnTh4l6Ln25ihSNI03w=
github.com/zitadel/passwap v0.6.0 h1:m9F3epFC0VkBXu25rihSLGyHvWiNlCzU5kk8RoI+SXQ= github.com/zitadel/passwap v0.6.0 h1:m9F3epFC0VkBXu25rihSLGyHvWiNlCzU5kk8RoI+SXQ=
github.com/zitadel/passwap v0.6.0/go.mod h1:kqAiJ4I4eZvm3Y6oAk6hlEqlZZOkjMHraGXF90GG7LI= github.com/zitadel/passwap v0.6.0/go.mod h1:kqAiJ4I4eZvm3Y6oAk6hlEqlZZOkjMHraGXF90GG7LI=
github.com/zitadel/saml v0.2.0 h1:vv7r+Xz43eAPCb+fImMaospD+TWRZQDkb78AbSJRcL4= github.com/zitadel/saml v0.2.0 h1:vv7r+Xz43eAPCb+fImMaospD+TWRZQDkb78AbSJRcL4=
@ -742,24 +756,24 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0=
go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng=
go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@ -857,13 +871,13 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -934,8 +948,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@ -983,10 +997,10 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls=
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View File

@ -114,7 +114,15 @@ func WithConsole(ctx context.Context, projectID, appID string) context.Context {
i.projectID = projectID i.projectID = projectID
i.appID = appID i.appID = appID
//i.clientID = clientID return context.WithValue(ctx, instanceKey, i)
}
func WithConsoleClientID(ctx context.Context, clientID string) context.Context {
i, ok := ctx.Value(instanceKey).(*instance)
if !ok {
i = new(instance)
}
i.clientID = clientID
return context.WithValue(ctx, instanceKey, i) return context.WithValue(ctx, instanceKey, i)
} }

View File

@ -10,6 +10,7 @@ import (
"net/http/cookiejar" "net/http/cookiejar"
"net/url" "net/url"
"testing" "testing"
"time"
"github.com/muhlemmer/gu" "github.com/muhlemmer/gu"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -70,28 +71,34 @@ func awaitPubOrgRegDisallowed(t *testing.T, ctx context.Context, cc *integration
// awaitGetSSRGetResponse cuts the CSRF token from the response body if it exists // awaitGetSSRGetResponse cuts the CSRF token from the response body if it exists
func awaitGetSSRGetResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int) string { func awaitGetSSRGetResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int) string {
var csrfToken []byte var csrfToken []byte
await(t, ctx, func(tt *assert.CollectT) { retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
resp, err := client.Get(parsedURL.String()) require.EventuallyWithT(t,
require.NoError(tt, err) func(tt *assert.CollectT) {
body, err := io.ReadAll(resp.Body) resp, err := client.Get(parsedURL.String())
require.NoError(tt, err) require.NoError(tt, err)
searchField := `<input type="hidden" name="gorilla.csrf.Token" value="` body, err := io.ReadAll(resp.Body)
_, after, hasCsrfToken := bytes.Cut(body, []byte(searchField)) require.NoError(tt, err)
if hasCsrfToken { searchField := `<input type="hidden" name="gorilla.csrf.Token" value="`
csrfToken, _, _ = bytes.Cut(after, []byte(`">`)) _, after, hasCsrfToken := bytes.Cut(body, []byte(searchField))
} if hasCsrfToken {
assert.Equal(tt, resp.StatusCode, expectCode) csrfToken, _, _ = bytes.Cut(after, []byte(`">`))
}) }
assert.Equal(tt, resp.StatusCode, expectCode)
}, retryDuration, tick, "awaiting successful get SSR get response failed",
)
return string(csrfToken) return string(csrfToken)
} }
// awaitPostFormResponse needs a valid CSRF token to make it to the actual endpoint implementation and get the expected status code // awaitPostFormResponse needs a valid CSRF token to make it to the actual endpoint implementation and get the expected status code
func awaitPostFormResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int, csrfToken string) { func awaitPostFormResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int, csrfToken string) {
await(t, ctx, func(tt *assert.CollectT) { retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
resp, err := client.PostForm(parsedURL.String(), url.Values{ require.EventuallyWithT(t,
"gorilla.csrf.Token": {csrfToken}, func(tt *assert.CollectT) {
}) resp, err := client.PostForm(parsedURL.String(), url.Values{
require.NoError(tt, err) "gorilla.csrf.Token": {csrfToken},
assert.Equal(tt, resp.StatusCode, expectCode) })
}) require.NoError(tt, err)
assert.Equal(tt, resp.StatusCode, expectCode)
}, retryDuration, tick, "awaiting successful Post Form failed",
)
} }

View File

@ -51,7 +51,7 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
require.Equal(ttt, language.Make(defaultLang.Language), language.English) require.Equal(ttt, language.Make(defaultLang.Language), language.English)
}) })
tt.Run("the discovery endpoint returns all supported languages", func(ttt *testing.T) { tt.Run("the discovery endpoint returns all supported languages", func(ttt *testing.T) {
awaitDiscoveryEndpoint(ttt, instance.Domain, supportedLanguagesStr, nil) awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, supportedLanguagesStr, nil)
}) })
}) })
t.Run("restricting the default language fails", func(tt *testing.T) { t.Run("restricting the default language fails", func(tt *testing.T) {
@ -92,10 +92,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
require.Condition(tt, contains(supported.GetLanguages(), supportedLanguagesStr)) require.Condition(tt, contains(supported.GetLanguages(), supportedLanguagesStr))
}) })
t.Run("the disallowed language is not listed in the discovery endpoint", func(tt *testing.T) { t.Run("the disallowed language is not listed in the discovery endpoint", func(tt *testing.T) {
awaitDiscoveryEndpoint(tt, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()}) awaitDiscoveryEndpoint(tt, ctx, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()})
}) })
t.Run("the login ui is rendered in the default language", func(tt *testing.T) { t.Run("the login ui is rendered in the default language", func(tt *testing.T) {
awaitLoginUILanguage(tt, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort") awaitLoginUILanguage(tt, ctx, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort")
}) })
t.Run("preferred languages are not restricted by the supported languages", func(tt *testing.T) { t.Run("preferred languages are not restricted by the supported languages", func(tt *testing.T) {
tt.Run("change user profile", func(ttt *testing.T) { tt.Run("change user profile", func(ttt *testing.T) {
@ -153,10 +153,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
t.Run("allowing the language makes it usable again", func(tt *testing.T) { t.Run("allowing the language makes it usable again", func(tt *testing.T) {
tt.Run("the previously disallowed language is listed in the discovery endpoint again", func(ttt *testing.T) { tt.Run("the previously disallowed language is listed in the discovery endpoint again", func(ttt *testing.T) {
awaitDiscoveryEndpoint(ttt, instance.Domain, []string{disallowedLanguage.String()}, nil) awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, []string{disallowedLanguage.String()}, nil)
}) })
tt.Run("the login ui is rendered in the previously disallowed language", func(ttt *testing.T) { tt.Run("the login ui is rendered in the previously disallowed language", func(ttt *testing.T) {
awaitLoginUILanguage(ttt, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña") awaitLoginUILanguage(ttt, ctx, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña")
}) })
}) })
} }
@ -164,36 +164,36 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
func setAndAwaitAllowedLanguages(ctx context.Context, cc *integration.Client, t *testing.T, selectLanguages []string) { func setAndAwaitAllowedLanguages(ctx context.Context, cc *integration.Client, t *testing.T, selectLanguages []string) {
_, err := cc.Admin.SetRestrictions(ctx, &admin.SetRestrictionsRequest{AllowedLanguages: &admin.SelectLanguages{List: selectLanguages}}) _, err := cc.Admin.SetRestrictions(ctx, &admin.SetRestrictionsRequest{AllowedLanguages: &admin.SelectLanguages{List: selectLanguages}})
require.NoError(t, err) require.NoError(t, err)
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
defer awaitCancel() require.EventuallyWithT(t,
await(t, awaitCtx, func(tt *assert.CollectT) { func(tt *assert.CollectT) {
restrictions, getErr := cc.Admin.GetRestrictions(awaitCtx, &admin.GetRestrictionsRequest{}) restrictions, getErr := cc.Admin.GetRestrictions(ctx, &admin.GetRestrictionsRequest{})
expectLanguages := selectLanguages expectLanguages := selectLanguages
if len(selectLanguages) == 0 { if len(selectLanguages) == 0 {
expectLanguages = nil expectLanguages = nil
} }
assert.NoError(tt, getErr) assert.NoError(tt, getErr)
assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages()) assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages())
}) }, retryDuration, tick, "awaiting successful GetAllowedLanguages failed",
)
} }
func setAndAwaitDefaultLanguage(ctx context.Context, cc *integration.Client, t *testing.T, lang language.Tag) { func setAndAwaitDefaultLanguage(ctx context.Context, cc *integration.Client, t *testing.T, lang language.Tag) {
_, err := cc.Admin.SetDefaultLanguage(ctx, &admin.SetDefaultLanguageRequest{Language: lang.String()}) _, err := cc.Admin.SetDefaultLanguage(ctx, &admin.SetDefaultLanguageRequest{Language: lang.String()})
require.NoError(t, err) require.NoError(t, err)
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
defer awaitCancel() require.EventuallyWithT(t, func(tt *assert.CollectT) {
await(t, awaitCtx, func(tt *assert.CollectT) { defaultLang, getErr := cc.Admin.GetDefaultLanguage(ctx, &admin.GetDefaultLanguageRequest{})
defaultLang, getErr := cc.Admin.GetDefaultLanguage(awaitCtx, &admin.GetDefaultLanguageRequest{})
assert.NoError(tt, getErr) assert.NoError(tt, getErr)
assert.Equal(tt, lang.String(), defaultLang.GetLanguage()) assert.Equal(tt, lang.String(), defaultLang.GetLanguage())
}) }, retryDuration, tick, "awaiting successful GetDefaultLanguage failed",
)
} }
func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notContainsUILocales []string) { func awaitDiscoveryEndpoint(t *testing.T, ctx context.Context, domain string, containsUILocales, notContainsUILocales []string) {
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
defer awaitCancel() require.EventuallyWithT(t, func(tt *assert.CollectT) {
await(t, awaitCtx, func(tt *assert.CollectT) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
require.NoError(tt, err) require.NoError(tt, err)
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
require.NoError(tt, err) require.NoError(tt, err)
@ -213,14 +213,14 @@ func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notC
if notContainsUILocales != nil { if notContainsUILocales != nil {
assert.Condition(tt, not(contains(doc.UILocalesSupported, notContainsUILocales))) assert.Condition(tt, not(contains(doc.UILocalesSupported, notContainsUILocales)))
} }
}) }, retryDuration, tick, "awaiting successful call to Discovery endpoint failed",
)
} }
func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) { func awaitLoginUILanguage(t *testing.T, ctx context.Context, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) {
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
defer awaitCancel() require.EventuallyWithT(t, func(tt *assert.CollectT) {
await(t, awaitCtx, func(tt *assert.CollectT) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
req.Header.Set("Accept-Language", acceptLanguage.String()) req.Header.Set("Accept-Language", acceptLanguage.String())
require.NoError(tt, err) require.NoError(tt, err)
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
@ -232,7 +232,8 @@ func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.T
}() }()
require.NoError(tt, err) require.NoError(tt, err)
assert.Containsf(tt, string(body), containsText, "login ui language is in "+expectLang.String()) assert.Containsf(tt, string(body), containsText, "login ui language is in "+expectLang.String())
}) }, retryDuration, tick, "awaiting successful LoginUI in specific language failed",
)
} }
// We would love to use assert.Contains here, but it doesn't work with slices of strings // We would love to use assert.Contains here, but it doesn't work with slices of strings

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/integration" "github.com/zitadel/zitadel/internal/integration"
admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin" admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin"
@ -34,23 +33,6 @@ func TestMain(m *testing.M) {
}()) }())
} }
func await(t *testing.T, ctx context.Context, cb func(*assert.CollectT)) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(
t,
func(tt *assert.CollectT) {
defer func() {
// Panics are not recovered and don't mark the test as failed, so we need to do that ourselves
assert.Nil(tt, recover(), "panic in await callback")
}()
cb(tt)
},
retryDuration,
tick,
"awaiting successful callback failed",
)
}
var _ assert.TestingT = (*noopAssertionT)(nil) var _ assert.TestingT = (*noopAssertionT)(nil)
type noopAssertionT struct{} type noopAssertionT struct{}

View File

@ -19,6 +19,7 @@ func systemFeaturesToCommand(req *feature_pb.SetSystemFeaturesRequest) *command.
ImprovedPerformance: improvedPerformanceListToDomain(req.ImprovedPerformance), ImprovedPerformance: improvedPerformanceListToDomain(req.ImprovedPerformance),
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination, OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
DisableUserTokenEvent: req.DisableUserTokenEvent, DisableUserTokenEvent: req.DisableUserTokenEvent,
EnableBackChannelLogout: req.EnableBackChannelLogout,
} }
} }
@ -34,6 +35,7 @@ func systemFeaturesToPb(f *query.SystemFeatures) *feature_pb.GetSystemFeaturesRe
ImprovedPerformance: featureSourceToImprovedPerformanceFlagPb(&f.ImprovedPerformance), ImprovedPerformance: featureSourceToImprovedPerformanceFlagPb(&f.ImprovedPerformance),
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination), OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent), DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
} }
} }
@ -50,6 +52,7 @@ func instanceFeaturesToCommand(req *feature_pb.SetInstanceFeaturesRequest) *comm
DebugOIDCParentError: req.DebugOidcParentError, DebugOIDCParentError: req.DebugOidcParentError,
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination, OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
DisableUserTokenEvent: req.DisableUserTokenEvent, DisableUserTokenEvent: req.DisableUserTokenEvent,
EnableBackChannelLogout: req.EnableBackChannelLogout,
} }
} }
@ -67,6 +70,7 @@ func instanceFeaturesToPb(f *query.InstanceFeatures) *feature_pb.GetInstanceFeat
DebugOidcParentError: featureSourceToFlagPb(&f.DebugOIDCParentError), DebugOidcParentError: featureSourceToFlagPb(&f.DebugOIDCParentError),
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination), OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent), DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
} }
} }

View File

@ -80,6 +80,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
Level: feature.LevelSystem, Level: feature.LevelSystem,
Value: true, Value: true,
}, },
EnableBackChannelLogout: query.FeatureSource[bool]{
Level: feature.LevelSystem,
Value: true,
},
} }
want := &feature_pb.GetSystemFeaturesResponse{ want := &feature_pb.GetSystemFeaturesResponse{
Details: &object.Details{ Details: &object.Details{
@ -123,6 +127,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
Enabled: false, Enabled: false,
Source: feature_pb.Source_SOURCE_UNSPECIFIED, Source: feature_pb.Source_SOURCE_UNSPECIFIED,
}, },
EnableBackChannelLogout: &feature_pb.FeatureFlag{
Enabled: true,
Source: feature_pb.Source_SOURCE_SYSTEM,
},
} }
got := systemFeaturesToPb(arg) got := systemFeaturesToPb(arg)
assert.Equal(t, want, got) assert.Equal(t, want, got)
@ -140,6 +148,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
WebKey: gu.Ptr(true), WebKey: gu.Ptr(true),
DebugOidcParentError: gu.Ptr(true), DebugOidcParentError: gu.Ptr(true),
OidcSingleV1SessionTermination: gu.Ptr(true), OidcSingleV1SessionTermination: gu.Ptr(true),
EnableBackChannelLogout: gu.Ptr(true),
} }
want := &command.InstanceFeatures{ want := &command.InstanceFeatures{
LoginDefaultOrg: gu.Ptr(true), LoginDefaultOrg: gu.Ptr(true),
@ -152,6 +161,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
WebKey: gu.Ptr(true), WebKey: gu.Ptr(true),
DebugOIDCParentError: gu.Ptr(true), DebugOIDCParentError: gu.Ptr(true),
OIDCSingleV1SessionTermination: gu.Ptr(true), OIDCSingleV1SessionTermination: gu.Ptr(true),
EnableBackChannelLogout: gu.Ptr(true),
} }
got := instanceFeaturesToCommand(arg) got := instanceFeaturesToCommand(arg)
assert.Equal(t, want, got) assert.Equal(t, want, got)
@ -200,6 +210,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
Level: feature.LevelInstance, Level: feature.LevelInstance,
Value: true, Value: true,
}, },
EnableBackChannelLogout: query.FeatureSource[bool]{
Level: feature.LevelInstance,
Value: true,
},
} }
want := &feature_pb.GetInstanceFeaturesResponse{ want := &feature_pb.GetInstanceFeaturesResponse{
Details: &object.Details{ Details: &object.Details{
@ -251,6 +265,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
Enabled: false, Enabled: false,
Source: feature_pb.Source_SOURCE_UNSPECIFIED, Source: feature_pb.Source_SOURCE_UNSPECIFIED,
}, },
EnableBackChannelLogout: &feature_pb.FeatureFlag{
Enabled: true,
Source: feature_pb.Source_SOURCE_INSTANCE,
},
} }
got := instanceFeaturesToPb(arg) got := instanceFeaturesToPb(arg)
assert.Equal(t, want, got) assert.Equal(t, want, got)

View File

@ -57,6 +57,7 @@ func AddOIDCAppRequestToDomain(req *mgmt_pb.AddOIDCAppRequest) *domain.OIDCApp {
ClockSkew: req.ClockSkew.AsDuration(), ClockSkew: req.ClockSkew.AsDuration(),
AdditionalOrigins: req.AdditionalOrigins, AdditionalOrigins: req.AdditionalOrigins,
SkipNativeAppSuccessPage: req.SkipNativeAppSuccessPage, SkipNativeAppSuccessPage: req.SkipNativeAppSuccessPage,
BackChannelLogoutURI: req.GetBackChannelLogoutUri(),
} }
} }
@ -108,6 +109,7 @@ func UpdateOIDCAppConfigRequestToDomain(app *mgmt_pb.UpdateOIDCAppConfigRequest)
ClockSkew: app.ClockSkew.AsDuration(), ClockSkew: app.ClockSkew.AsDuration(),
AdditionalOrigins: app.AdditionalOrigins, AdditionalOrigins: app.AdditionalOrigins,
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage, SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
BackChannelLogoutURI: app.BackChannelLogoutUri,
} }
} }

View File

@ -19,14 +19,12 @@ import (
"github.com/zitadel/zitadel/pkg/grpc/object/v2" "github.com/zitadel/zitadel/pkg/grpc/object/v2"
oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2" oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2"
"github.com/zitadel/zitadel/pkg/grpc/session/v2" "github.com/zitadel/zitadel/pkg/grpc/session/v2"
"github.com/zitadel/zitadel/pkg/grpc/user/v2"
) )
var ( var (
CTX context.Context CTX context.Context
Instance *integration.Instance Instance *integration.Instance
Client oidc_pb.OIDCServiceClient Client oidc_pb.OIDCServiceClient
User *user.AddHumanUserResponse
) )
const ( const (
@ -44,7 +42,6 @@ func TestMain(m *testing.M) {
Client = Instance.Client.OIDCv2 Client = Instance.Client.OIDCv2
CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner) CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner)
User = Instance.CreateHumanUser(CTX)
return m.Run() return m.Run()
}()) }())
} }

View File

@ -19,14 +19,12 @@ import (
object "github.com/zitadel/zitadel/pkg/grpc/object/v2beta" object "github.com/zitadel/zitadel/pkg/grpc/object/v2beta"
oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2beta" oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2beta"
session "github.com/zitadel/zitadel/pkg/grpc/session/v2beta" session "github.com/zitadel/zitadel/pkg/grpc/session/v2beta"
"github.com/zitadel/zitadel/pkg/grpc/user/v2"
) )
var ( var (
CTX context.Context CTX context.Context
Instance *integration.Instance Instance *integration.Instance
Client oidc_pb.OIDCServiceClient Client oidc_pb.OIDCServiceClient
User *user.AddHumanUserResponse
) )
const ( const (
@ -44,7 +42,6 @@ func TestMain(m *testing.M) {
Client = Instance.Client.OIDCv2beta Client = Instance.Client.OIDCv2beta
CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner) CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner)
User = Instance.CreateHumanUser(CTX)
return m.Run() return m.Run()
}()) }())
} }

View File

@ -22,7 +22,7 @@ func OrgQueriesToModel(queries []*org_pb.OrgQuery) (_ []query.SearchQuery, err e
func OrgQueryToModel(apiQuery *org_pb.OrgQuery) (query.SearchQuery, error) { func OrgQueryToModel(apiQuery *org_pb.OrgQuery) (query.SearchQuery, error) {
switch q := apiQuery.Query.(type) { switch q := apiQuery.Query.(type) {
case *org_pb.OrgQuery_DomainQuery: case *org_pb.OrgQuery_DomainQuery:
return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
case *org_pb.OrgQuery_NameQuery: case *org_pb.OrgQuery_NameQuery:
return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name) return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name)
case *org_pb.OrgQuery_StateQuery: case *org_pb.OrgQuery_StateQuery:

View File

@ -15,6 +15,7 @@ import (
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
"github.com/zitadel/zitadel/internal/integration" "github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/management"
"github.com/zitadel/zitadel/pkg/grpc/object/v2" "github.com/zitadel/zitadel/pkg/grpc/object/v2"
"github.com/zitadel/zitadel/pkg/grpc/org/v2" "github.com/zitadel/zitadel/pkg/grpc/org/v2"
) )
@ -214,6 +215,46 @@ func TestServer_ListOrganizations(t *testing.T) {
}, },
}, },
}, },
{
name: "list org by domain (non primary), ok",
args: args{
CTX,
&org.ListOrganizationsRequest{},
func(ctx context.Context, request *org.ListOrganizationsRequest) ([]orgAttr, error) {
orgs := make([]orgAttr, 1)
name := fmt.Sprintf("ListOrgs-%s", gofakeit.AppName())
orgResp := Instance.CreateOrganization(ctx, name, gofakeit.Email())
orgs[0] = orgAttr{
ID: orgResp.GetOrganizationId(),
Name: name,
Details: orgResp.GetDetails(),
}
domain := gofakeit.DomainName()
_, err := Instance.Client.Mgmt.AddOrgDomain(integration.SetOrgID(ctx, orgResp.GetOrganizationId()), &management.AddOrgDomainRequest{
Domain: domain,
})
if err != nil {
return nil, err
}
request.Queries = []*org.SearchQuery{
OrganizationDomainQuery(domain),
}
return orgs, nil
},
},
want: &org.ListOrganizationsResponse{
Details: &object.ListDetails{
TotalResult: 1,
Timestamp: timestamppb.Now(),
},
SortingColumn: 0,
Result: []*org.Organization{
{
State: org.OrganizationState_ORGANIZATION_STATE_ACTIVE,
},
},
},
},
{ {
name: "list org by inactive state, ok", name: "list org by inactive state, ok",
args: args{ args: args{

View File

@ -57,7 +57,7 @@ func orgQueriesToQuery(ctx context.Context, queries []*org.SearchQuery) (_ []que
func orgQueryToQuery(ctx context.Context, orgQuery *org.SearchQuery) (query.SearchQuery, error) { func orgQueryToQuery(ctx context.Context, orgQuery *org.SearchQuery) (query.SearchQuery, error) {
switch q := orgQuery.Query.(type) { switch q := orgQuery.Query.(type) {
case *org.SearchQuery_DomainQuery: case *org.SearchQuery_DomainQuery:
return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain) return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
case *org.SearchQuery_NameQuery: case *org.SearchQuery_NameQuery:
return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name) return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name)
case *org.SearchQuery_StateQuery: case *org.SearchQuery_StateQuery:

View File

@ -61,6 +61,7 @@ func AppOIDCConfigToPb(app *query.OIDCApp) *app_pb.App_OidcConfig {
AdditionalOrigins: app.AdditionalOrigins, AdditionalOrigins: app.AdditionalOrigins,
AllowedOrigins: app.AllowedOrigins, AllowedOrigins: app.AllowedOrigins,
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage, SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
BackChannelLogoutUri: app.BackChannelLogoutURI,
}, },
} }
} }

View File

@ -62,10 +62,10 @@ func TestServer_ExecutionTarget(t *testing.T) {
changedRequest := &action.GetTargetRequest{Id: targetCreated.GetDetails().GetId()} changedRequest := &action.GetTargetRequest{Id: targetCreated.GetDetails().GetId()}
// replace original request with different targetID // replace original request with different targetID
urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusOK, changedRequest) urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusOK, changedRequest)
targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, false)
instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod)) targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, false)
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
// expected response from the GetTarget // expected response from the GetTarget
expectedResponse := &action.GetTargetResponse{ expectedResponse := &action.GetTargetResponse{
@ -119,10 +119,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
} }
// after request with different targetID, return changed response // after request with different targetID, return changed response
targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusOK, changedResponse) targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusOK, changedResponse)
targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, false)
instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, false)
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
return func() { return func() {
closeRequest() closeRequest()
closeResponse() closeResponse()
@ -161,12 +160,10 @@ func TestServer_ExecutionTarget(t *testing.T) {
wantRequest := &middleware.ContextInfoRequest{FullMethod: fullMethod, InstanceID: instance.ID(), OrgID: orgID, ProjectID: projectID, UserID: userID, Request: request} wantRequest := &middleware.ContextInfoRequest{FullMethod: fullMethod, InstanceID: instance.ID(), OrgID: orgID, ProjectID: projectID, UserID: userID, Request: request}
urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusInternalServerError, &action.GetTargetRequest{Id: "notchanged"}) urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusInternalServerError, &action.GetTargetRequest{Id: "notchanged"})
targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, true) targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, true)
instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId())) waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
// GetTarget with used target // GetTarget with used target
request.Id = targetRequest.GetDetails().GetId() request.Id = targetRequest.GetDetails().GetId()
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod))
return func() { return func() {
closeRequest() closeRequest()
}, nil }, nil
@ -233,10 +230,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
} }
// after request with different targetID, return changed response // after request with different targetID, return changed response
targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusInternalServerError, changedResponse) targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusInternalServerError, changedResponse)
targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, true)
instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod)) targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, true)
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
return func() { return func() {
closeResponse() closeResponse()
}, nil }, nil
@ -277,7 +273,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
} }
} }
func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition) { func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition, targets []*action.ExecutionTargetType) {
instance.SetExecution(ctx, t, condition, targets)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) { require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{ got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{
@ -290,11 +288,58 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in
if !assert.NoError(ttt, err) { if !assert.NoError(ttt, err) {
return return
} }
assert.Len(ttt, got.GetResult(), 1) if !assert.Len(ttt, got.GetResult(), 1) {
return
}
gotTargets := got.GetResult()[0].GetExecution().GetTargets()
// always first check length, otherwise its failed anyway
if assert.Len(ttt, gotTargets, len(targets)) {
for i := range targets {
assert.EqualExportedValues(ttt, targets[i].GetType(), gotTargets[i].GetType())
}
}
}, retryDuration, tick, "timeout waiting for expected execution result") }, retryDuration, tick, "timeout waiting for expected execution result")
return return
} }
func waitForTarget(ctx context.Context, t *testing.T, instance *integration.Instance, endpoint string, ty domain.TargetType, interrupt bool) *action.CreateTargetResponse {
resp := instance.CreateTarget(ctx, t, "", endpoint, ty, interrupt)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.SearchTargets(ctx, &action.SearchTargetsRequest{
Filters: []*action.TargetSearchFilter{
{Filter: &action.TargetSearchFilter_InTargetIdsFilter{
InTargetIdsFilter: &action.InTargetIDsFilter{TargetIds: []string{resp.GetDetails().GetId()}},
}},
},
})
if !assert.NoError(ttt, err) {
return
}
if !assert.Len(ttt, got.GetResult(), 1) {
return
}
config := got.GetResult()[0].GetConfig()
assert.Equal(ttt, config.GetEndpoint(), endpoint)
switch ty {
case domain.TargetTypeWebhook:
if !assert.NotNil(ttt, config.GetRestWebhook()) {
return
}
assert.Equal(ttt, interrupt, config.GetRestWebhook().GetInterruptOnError())
case domain.TargetTypeAsync:
assert.NotNil(ttt, config.GetRestAsync())
case domain.TargetTypeCall:
if !assert.NotNil(ttt, config.GetRestCall()) {
return
}
assert.Equal(ttt, interrupt, config.GetRestCall().GetInterruptOnError())
}
}, retryDuration, tick, "timeout waiting for expected execution result")
return resp
}
func conditionRequestFullMethod(fullMethod string) *action.Condition { func conditionRequestFullMethod(fullMethod string) *action.Condition {
return &action.Condition{ return &action.Condition{
ConditionType: &action.Condition_Request{ ConditionType: &action.Condition_Request{

View File

@ -216,14 +216,16 @@ func TestServer_GetTarget(t *testing.T) {
err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want) err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want)
require.NoError(t, err) require.NoError(t, err)
} }
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 2*time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) { require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req) got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req)
if tt.wantErr { if tt.wantErr {
assert.Error(ttt, err, "Error: "+err.Error()) assert.Error(ttt, err, "Error: "+err.Error())
return return
} }
assert.NoError(ttt, err) if !assert.NoError(ttt, err) {
return
}
wantTarget := tt.want.GetTarget() wantTarget := tt.want.GetTarget()
gotTarget := got.GetTarget() gotTarget := got.GetTarget()

View File

@ -36,7 +36,7 @@ func TestMain(m *testing.M) {
} }
func TestServer_Feature_Disabled(t *testing.T) { func TestServer_Feature_Disabled(t *testing.T) {
instance, iamCtx := createInstance(t, false) instance, iamCtx, _ := createInstance(t, false)
client := instance.Client.WebKeyV3Alpha client := instance.Client.WebKeyV3Alpha
t.Run("CreateWebKey", func(t *testing.T) { t.Run("CreateWebKey", func(t *testing.T) {
@ -62,18 +62,18 @@ func TestServer_Feature_Disabled(t *testing.T) {
} }
func TestServer_ListWebKeys(t *testing.T) { func TestServer_ListWebKeys(t *testing.T) {
instance, iamCtx := createInstance(t, true) instance, iamCtx, creationDate := createInstance(t, true)
// After the feature is first enabled, we can expect 2 generated keys with the default config. // After the feature is first enabled, we can expect 2 generated keys with the default config.
checkWebKeyListState(iamCtx, t, instance, 2, "", &webkey.WebKey_Rsa{ checkWebKeyListState(iamCtx, t, instance, 2, "", &webkey.WebKey_Rsa{
Rsa: &webkey.WebKeyRSAConfig{ Rsa: &webkey.WebKeyRSAConfig{
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
}, },
}) }, creationDate)
} }
func TestServer_CreateWebKey(t *testing.T) { func TestServer_CreateWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true) instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha client := instance.Client.WebKeyV3Alpha
_, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{ _, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{
@ -93,11 +93,11 @@ func TestServer_CreateWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
}, },
}) }, creationDate)
} }
func TestServer_ActivateWebKey(t *testing.T) { func TestServer_ActivateWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true) instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha client := instance.Client.WebKeyV3Alpha
resp, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{ resp, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{
@ -122,11 +122,11 @@ func TestServer_ActivateWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
}, },
}) }, creationDate)
} }
func TestServer_DeleteWebKey(t *testing.T) { func TestServer_DeleteWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true) instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha client := instance.Client.WebKeyV3Alpha
keyIDs := make([]string, 2) keyIDs := make([]string, 2)
@ -178,11 +178,12 @@ func TestServer_DeleteWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048, Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256, Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
}, },
}) }, creationDate)
} }
func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context) { func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context, *timestamppb.Timestamp) {
instance := integration.NewInstance(CTX) instance := integration.NewInstance(CTX)
creationDate := timestamppb.Now()
iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner) iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
if enableFeature { if enableFeature {
@ -203,7 +204,7 @@ func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, co
} }
}, retryDuration, tick) }, retryDuration, tick)
return instance, iamCTX return instance, iamCTX, creationDate
} }
func assertFeatureDisabledError(t *testing.T, err error) { func assertFeatureDisabledError(t *testing.T, err error) {
@ -214,7 +215,7 @@ func assertFeatureDisabledError(t *testing.T, err error) {
assert.Contains(t, s.Message(), "WEBKEY-Ohx6E") assert.Contains(t, s.Message(), "WEBKEY-Ohx6E")
} }
func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any) { func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any, creationDate *timestamppb.Timestamp) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
assert.EventuallyWithT(t, func(collect *assert.CollectT) { assert.EventuallyWithT(t, func(collect *assert.CollectT) {
@ -227,8 +228,8 @@ func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integrati
var gotActiveKeyID string var gotActiveKeyID string
for _, key := range list { for _, key := range list {
integration.AssertResourceDetails(t, &resource_object.Details{ integration.AssertResourceDetails(t, &resource_object.Details{
Created: timestamppb.Now(), Created: creationDate,
Changed: timestamppb.Now(), Changed: creationDate,
Owner: &object.Owner{ Owner: &object.Owner{
Type: object.OwnerType_OWNER_TYPE_INSTANCE, Type: object.OwnerType_OWNER_TYPE_INSTANCE,
Id: instance.ID(), Id: instance.ID(),

View File

@ -190,7 +190,6 @@ func TestServer_GetUserByID(t *testing.T) {
func TestServer_GetUserByID_Permission(t *testing.T) { func TestServer_GetUserByID_Permission(t *testing.T) {
t.Parallel() t.Parallel()
timeNow := time.Now().UTC()
newOrgOwnerEmail := gofakeit.Email() newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail) newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
newUserID := newOrg.CreatedAdmins[0].GetUserId() newUserID := newOrg.CreatedAdmins[0].GetUserId()
@ -237,7 +236,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
}, },
}, },
Details: &object.Details{ Details: &object.Details{
ChangeDate: timestamppb.New(timeNow), ChangeDate: timestamppb.Now(),
ResourceOwner: newOrg.GetOrganizationId(), ResourceOwner: newOrg.GetOrganizationId(),
}, },
}, },
@ -275,7 +274,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
}, },
}, },
Details: &object.Details{ Details: &object.Details{
ChangeDate: timestamppb.New(timeNow), ChangeDate: timestamppb.Now(),
ResourceOwner: newOrg.GetOrganizationId(), ResourceOwner: newOrg.GetOrganizationId(),
}, },
}, },
@ -303,24 +302,29 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got, err := Client.GetUserByID(tt.args.ctx, tt.args.req) retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
if tt.wantErr { require.EventuallyWithT(t, func(ttt *assert.CollectT) {
require.Error(t, err) got, err := Client.GetUserByID(tt.args.ctx, tt.args.req)
return if tt.wantErr {
} assert.Error(ttt, err)
require.NoError(t, err) return
}
if !assert.NoError(ttt, err) {
return
}
tt.want.User.UserId = tt.args.req.GetUserId() tt.want.User.UserId = tt.args.req.GetUserId()
tt.want.User.Username = newOrgOwnerEmail tt.want.User.Username = newOrgOwnerEmail
tt.want.User.PreferredLoginName = newOrgOwnerEmail tt.want.User.PreferredLoginName = newOrgOwnerEmail
tt.want.User.LoginNames = []string{newOrgOwnerEmail} tt.want.User.LoginNames = []string{newOrgOwnerEmail}
if human := tt.want.User.GetHuman(); human != nil { if human := tt.want.User.GetHuman(); human != nil {
human.Email.Email = newOrgOwnerEmail human.Email.Email = newOrgOwnerEmail
} }
// details tested in GetUserByID // details tested in GetUserByID
tt.want.User.Details = got.User.GetDetails() tt.want.User.Details = got.User.GetDetails()
assert.Equal(t, tt.want.User, got.User) assert.Equal(ttt, tt.want.User, got.User)
}, retryDuration, tick, "timeout waiting for expected user result")
}) })
} }
} }

View File

@ -2447,7 +2447,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG, OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG,
}) })
require.NoError(t, err) require.NoError(t, err)
idpLink, err := Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ _, err = Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
IdpId: provider.GetId(), IdpId: provider.GetId(),
UserId: "external-id", UserId: "external-id",
UserName: "displayName", UserName: "displayName",
@ -2639,25 +2639,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var got *user.ListAuthenticationMethodTypesResponse retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
var err error require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
for { require.NoError(ttt, err)
got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) {
if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) { return
break
} }
select { assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
case <-CTX.Done(): integration.AssertListDetails(ttt, tt.want, got)
t.Fatal(CTX.Err(), err) }, retryDuration, tick, "timeout waiting for expected auth methods result")
case <-time.After(time.Second):
t.Log("retrying ListAuthenticationMethodTypes")
continue
}
}
require.NoError(t, err)
assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult())
require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
}) })
} }
} }

View File

@ -2454,7 +2454,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG, OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG,
}) })
require.NoError(t, err) require.NoError(t, err)
idpLink, err := Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{ _, err = Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
IdpId: provider.GetId(), IdpId: provider.GetId(),
UserId: "external-id", UserId: "external-id",
UserName: "displayName", UserName: "displayName",
@ -2527,25 +2527,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var got *user.ListAuthenticationMethodTypesResponse retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
var err error require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
for { require.NoError(ttt, err)
got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req) if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) {
if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) { return
break
} }
select { assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
case <-CTX.Done(): integration.AssertListDetails(ttt, tt.want, got)
t.Fatal(CTX.Err(), err) }, retryDuration, tick, "timeout waiting for expected auth methods result")
case <-time.After(time.Second):
t.Log("retrying ListAuthenticationMethodTypes")
continue
}
}
require.NoError(t, err)
assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult())
require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
}) })
} }
} }

View File

@ -215,18 +215,18 @@ func (o *OPStorage) TerminateSession(ctx context.Context, userID, clientID strin
logging.Error("no user agent id") logging.Error("no user agent id")
return zerrors.ThrowPreconditionFailed(nil, "OIDC-fso7F", "no user agent id") return zerrors.ThrowPreconditionFailed(nil, "OIDC-fso7F", "no user agent id")
} }
userIDs, err := o.repo.UserSessionUserIDsByAgentID(ctx, userAgentID) sessions, err := o.repo.UserSessionsByAgentID(ctx, userAgentID)
if err != nil { if err != nil {
logging.WithError(err).Error("error retrieving user sessions") logging.WithError(err).Error("error retrieving user sessions")
return err return err
} }
if len(userIDs) == 0 { if len(sessions) == 0 {
return nil return nil
} }
data := authz.CtxData{ data := authz.CtxData{
UserID: userID, UserID: userID,
} }
err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, userIDs) err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, sessions)
logging.OnError(err).Error("error signing out") logging.OnError(err).Error("error signing out")
return err return err
} }
@ -278,18 +278,18 @@ func (o *OPStorage) terminateV1Session(ctx context.Context, userID, sessionID st
if err != nil { if err != nil {
return err return err
} }
return o.command.HumansSignOut(ctx, userAgentID, []string{userID}) return o.command.HumansSignOut(ctx, userAgentID, []command.HumanSignOutSession{{ID: sessionID, UserID: userID}})
} }
// otherwise we search for all active sessions within the same user agent of the current session id // otherwise we search for all active sessions within the same user agent of the current session id
userAgentID, userIDs, err := o.repo.ActiveUserIDsBySessionID(ctx, sessionID) userAgentID, sessions, err := o.repo.ActiveUserSessionsBySessionID(ctx, sessionID)
if err != nil { if err != nil {
logging.WithError(err).Error("error retrieving user sessions") logging.WithError(err).Error("error retrieving user sessions")
return err return err
} }
if len(userIDs) == 0 { if len(sessions) == 0 {
return nil return nil
} }
return o.command.HumansSignOut(ctx, userAgentID, userIDs) return o.command.HumansSignOut(ctx, userAgentID, sessions)
} }
func (o *OPStorage) RevokeToken(ctx context.Context, token, userID, clientID string) (err *oidc.Error) { func (o *OPStorage) RevokeToken(ctx context.Context, token, userID, clientID string) (err *oidc.Error) {
@ -588,6 +588,7 @@ func (s *Server) authResponseToken(authReq *AuthRequest, authorizer op.Authorize
authReq.UserID, authReq.UserID,
authReq.UserOrgID, authReq.UserOrgID,
client.client.ClientID, client.client.ClientID,
client.client.BackChannelLogoutURI,
scope, scope,
authReq.Audience, authReq.Audience,
authReq.AuthMethods(), authReq.AuthMethods(),

View File

@ -348,7 +348,7 @@ func (o *OPStorage) getSigningKey(ctx context.Context) (op.SigningKey, error) {
return nil, err return nil, err
} }
if len(keys.Keys) > 0 { if len(keys.Keys) > 0 {
return o.privateKeyToSigningKey(selectSigningKey(keys.Keys)) return PrivateKeyToSigningKey(SelectSigningKey(keys.Keys), o.encAlg)
} }
var position float64 var position float64
if keys.State != nil { if keys.State != nil {
@ -377,8 +377,8 @@ func (o *OPStorage) ensureIsLatestKey(ctx context.Context, position float64) (bo
return position >= maxSequence, nil return position >= maxSequence, nil
} }
func (o *OPStorage) privateKeyToSigningKey(key query.PrivateKey) (_ op.SigningKey, err error) { func PrivateKeyToSigningKey(key query.PrivateKey, algorithm crypto.EncryptionAlgorithm) (_ op.SigningKey, err error) {
keyData, err := crypto.Decrypt(key.Key(), o.encAlg) keyData, err := crypto.Decrypt(key.Key(), algorithm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -430,7 +430,7 @@ func (o *OPStorage) getMaxKeySequence(ctx context.Context) (float64, error) {
) )
} }
func selectSigningKey(keys []query.PrivateKey) query.PrivateKey { func SelectSigningKey(keys []query.PrivateKey) query.PrivateKey {
return keys[len(keys)-1] return keys[len(keys)-1]
} }

View File

@ -42,6 +42,7 @@ type Config struct {
DefaultLoginURLV2 string DefaultLoginURLV2 string
DefaultLogoutURLV2 string DefaultLogoutURLV2 string
PublicKeyCacheMaxAge time.Duration PublicKeyCacheMaxAge time.Duration
DefaultBackChannelLogoutLifetime time.Duration
} }
type EndpointConfig struct { type EndpointConfig struct {

View File

@ -46,7 +46,7 @@ type Server struct {
} }
func endpoints(endpointConfig *EndpointConfig) op.Endpoints { func endpoints(endpointConfig *EndpointConfig) op.Endpoints {
// some defaults. The new Server will disable enpoints that are nil. // some defaults. The new Server will disable endpoints that are nil.
endpoints := op.Endpoints{ endpoints := op.Endpoints{
Authorization: op.NewEndpoint("/oauth/v2/authorize"), Authorization: op.NewEndpoint("/oauth/v2/authorize"),
Token: op.NewEndpoint("/oauth/v2/token"), Token: op.NewEndpoint("/oauth/v2/token"),
@ -167,6 +167,7 @@ func (s *Server) EndSession(ctx context.Context, r *op.Request[oidc.EndSessionRe
func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales oidc.Locales) *oidc.DiscoveryConfiguration { func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales oidc.Locales) *oidc.DiscoveryConfiguration {
issuer := op.IssuerFromContext(ctx) issuer := op.IssuerFromContext(ctx)
backChannelLogoutSupported := authz.GetInstance(ctx).Features().EnableBackChannelLogout
return &oidc.DiscoveryConfiguration{ return &oidc.DiscoveryConfiguration{
Issuer: issuer, Issuer: issuer,
@ -199,6 +200,8 @@ func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales o
CodeChallengeMethodsSupported: op.CodeChallengeMethods(s.Provider()), CodeChallengeMethodsSupported: op.CodeChallengeMethods(s.Provider()),
UILocalesSupported: supportedUILocales, UILocalesSupported: supportedUILocales,
RequestParameterSupported: s.Provider().RequestObjectSupported(), RequestParameterSupported: s.Provider().RequestObjectSupported(),
BackChannelLogoutSupported: backChannelLogoutSupported,
BackChannelLogoutSessionSupported: backChannelLogoutSupported,
} }
} }

View File

@ -60,12 +60,19 @@ func (s *Server) accessTokenResponseFromSession(ctx context.Context, client op.C
return resp, err return resp, err
} }
// signerFunc is a getter function that allows add-hoc retrieval of the instance's signer. // SignerFunc is a getter function that allows add-hoc retrieval of the instance's signer.
type signerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error) type SignerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error)
// getSignerOnce returns a function which retrieves the instance's signer from the database once. func (s *Server) getSignerOnce() SignerFunc {
return GetSignerOnce(s.query.GetActiveSigningWebKey, s.Provider().Storage().SigningKey)
}
// GetSignerOnce returns a function which retrieves the instance's signer from the database once.
// Repeated calls of the returned function return the same results. // Repeated calls of the returned function return the same results.
func (s *Server) getSignerOnce() signerFunc { func GetSignerOnce(
getActiveSigningWebKey func(ctx context.Context) (*jose.JSONWebKey, error),
getSigningKey func(ctx context.Context) (op.SigningKey, error),
) SignerFunc {
var ( var (
once sync.Once once sync.Once
signer jose.Signer signer jose.Signer
@ -79,7 +86,7 @@ func (s *Server) getSignerOnce() signerFunc {
if authz.GetFeatures(ctx).WebKey { if authz.GetFeatures(ctx).WebKey {
var webKey *jose.JSONWebKey var webKey *jose.JSONWebKey
webKey, err = s.query.GetActiveSigningWebKey(ctx) webKey, err = getActiveSigningWebKey(ctx)
if err != nil { if err != nil {
return return
} }
@ -88,7 +95,7 @@ func (s *Server) getSignerOnce() signerFunc {
} }
var signingKey op.SigningKey var signingKey op.SigningKey
signingKey, err = s.Provider().Storage().SigningKey(ctx) signingKey, err = getSigningKey(ctx)
if err != nil { if err != nil {
return return
} }
@ -126,7 +133,7 @@ func (s *Server) getUserInfo(userID, projectID string, projectRoleAssertion, use
} }
} }
func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey signerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) { func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey SignerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) {
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
@ -170,7 +177,7 @@ func timeToOIDCExpiresIn(exp time.Time) uint64 {
return uint64(time.Until(exp) / time.Second) return uint64(time.Until(exp) / time.Second)
} }
func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner signerFunc) (_ string, err error) { func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner SignerFunc) (_ string, err error) {
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()

View File

@ -35,6 +35,7 @@ func (s *Server) ClientCredentialsExchange(ctx context.Context, r *op.ClientRequ
client.userID, client.userID,
client.resourceOwner, client.resourceOwner,
client.clientID, client.clientID,
"", // backChannelLogoutURI not needed for service user session
scope, scope,
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope), domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePassword}, []domain.UserAuthMethodType{domain.UserAuthMethodTypePassword},

View File

@ -75,6 +75,7 @@ func (s *Server) codeExchangeV1(ctx context.Context, client *Client, req *oidc.A
authReq.UserID, authReq.UserID,
authReq.UserOrgID, authReq.UserOrgID,
client.client.ClientID, client.client.ClientID,
client.client.BackChannelLogoutURI,
scope, scope,
authReq.Audience, authReq.Audience,
authReq.AuthMethods(), authReq.AuthMethods(),

View File

@ -288,6 +288,7 @@ func (s *Server) createExchangeAccessToken(
userID, userID,
resourceOwner, resourceOwner,
client.client.ClientID, client.client.ClientID,
client.client.BackChannelLogoutURI,
scope, scope,
audience, audience,
authMethods, authMethods,
@ -315,7 +316,7 @@ func (s *Server) createExchangeJWT(
client *Client, client *Client,
getUserInfo userInfoFunc, getUserInfo userInfoFunc,
roleAssertion bool, roleAssertion bool,
getSigner signerFunc, getSigner SignerFunc,
userID, userID,
resourceOwner string, resourceOwner string,
audience, audience,
@ -333,6 +334,7 @@ func (s *Server) createExchangeJWT(
userID, userID,
resourceOwner, resourceOwner,
client.client.ClientID, client.client.ClientID,
client.client.BackChannelLogoutURI,
scope, scope,
audience, audience,
authMethods, authMethods,

View File

@ -45,6 +45,7 @@ func (s *Server) JWTProfile(ctx context.Context, r *op.Request[oidc.JWTProfileGr
client.userID, client.userID,
client.resourceOwner, client.resourceOwner,
client.clientID, client.clientID,
"", // backChannelLogoutURI not needed for service user session
scope, scope,
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope), domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePrivateKey}, []domain.UserAuthMethodType{domain.UserAuthMethodTypePrivateKey},

View File

@ -54,6 +54,7 @@ func (s *Server) refreshTokenV1(ctx context.Context, client *Client, r *op.Clien
refreshToken.UserID, refreshToken.UserID,
refreshToken.ResourceOwner, refreshToken.ResourceOwner,
refreshToken.ClientID, refreshToken.ClientID,
"", // backChannelLogoutURI is not in refresh token view
scope, scope,
refreshToken.Audience, refreshToken.Audience,
AMRToAuthMethodTypes(refreshToken.AuthMethodsReferences), AMRToAuthMethodTypes(refreshToken.AuthMethodsReferences),

View File

@ -112,10 +112,10 @@
</div> </div>
</form> </form>
<script src="{{ resourceUrl " scripts/input_suffix_offset.js" }}"></script> <script src="{{ resourceUrl "scripts/input_suffix_offset.js" }}"></script>
<script src="{{ resourceUrl " scripts/form_submit.js" }}"></script> <script src="{{ resourceUrl "scripts/form_submit.js" }}"></script>
<script src="{{ resourceUrl " scripts/password_policy_check.js" }}"></script> <script src="{{ resourceUrl "scripts/password_policy_check.js" }}"></script>
<script src="{{ resourceUrl " scripts/register_check.js" }}"></script> <script src="{{ resourceUrl "scripts/register_check.js" }}"></script>
<script src="{{ resourceUrl " scripts/loginname_suffix.js" }}"></script> <script src="{{ resourceUrl "scripts/loginname_suffix.js" }}"></script>
{{template "main-bottom" .}} {{template "main-bottom" .}}

View File

@ -6,6 +6,7 @@ import (
"github.com/zitadel/zitadel/internal/api/authz" "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view" "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults" "github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/domain" "github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore" "github.com/zitadel/zitadel/internal/eventstore"
@ -27,26 +28,40 @@ func (repo *UserRepo) Health(ctx context.Context) error {
return repo.Eventstore.Health(ctx) return repo.Eventstore.Health(ctx)
} }
func (repo *UserRepo) UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) { func (repo *UserRepo) UserSessionsByAgentID(ctx context.Context, agentID string) ([]command.HumanSignOutSession, error) {
userSessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID()) sessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID())
if err != nil { if err != nil {
return nil, err return nil, err
} }
userIDs := make([]string, 0, len(userSessions)) signoutSessions := make([]command.HumanSignOutSession, 0, len(sessions))
for _, session := range userSessions { for _, session := range sessions {
if session.State.V == domain.UserSessionStateActive { if session.State.V == domain.UserSessionStateActive && session.ID.Valid {
userIDs = append(userIDs, session.UserID) signoutSessions = append(signoutSessions, command.HumanSignOutSession{
ID: session.ID.String,
UserID: session.UserID,
})
} }
} }
return userIDs, nil return signoutSessions, nil
} }
func (repo *UserRepo) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) { func (repo *UserRepo) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) {
return repo.View.UserAgentIDBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID()) return repo.View.UserAgentIDBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
} }
func (repo *UserRepo) ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) { func (repo *UserRepo) ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, signoutSessions []command.HumanSignOutSession, err error) {
return repo.View.ActiveUserIDsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID()) userAgentID, sessions, err := repo.View.ActiveUserSessionsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
if err != nil {
return "", nil, err
}
signoutSessions = make([]command.HumanSignOutSession, 0, len(sessions))
for sessionID, userID := range sessions {
signoutSessions = append(signoutSessions, command.HumanSignOutSession{
ID: sessionID,
UserID: userID,
})
}
return userAgentID, signoutSessions, nil
} }
func (repo *UserRepo) UserEventsByID(ctx context.Context, id string, changeDate time.Time, eventTypes []eventstore.EventType) ([]eventstore.Event, error) { func (repo *UserRepo) UserEventsByID(ctx context.Context, id string, changeDate time.Time, eventTypes []eventstore.EventType) ([]eventstore.Event, error) {

View File

@ -24,8 +24,8 @@ func (v *View) UserAgentIDBySessionID(ctx context.Context, sessionID, instanceID
return view.UserAgentIDBySessionID(ctx, v.client, sessionID, instanceID) return view.UserAgentIDBySessionID(ctx, v.client, sessionID, instanceID)
} }
func (v *View) ActiveUserIDsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, userIDs []string, err error) { func (v *View) ActiveUserSessionsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, sessions map[string]string, err error) {
return view.ActiveUserIDsBySessionID(ctx, v.client, sessionID, instanceID) return view.ActiveUserSessionsBySessionID(ctx, v.client, sessionID, instanceID)
} }
func (v *View) GetLatestUserSessionSequence(ctx context.Context, instanceID string) (_ *query.CurrentState, err error) { func (v *View) GetLatestUserSessionSequence(ctx context.Context, instanceID string) (_ *query.CurrentState, err error) {

View File

@ -2,10 +2,12 @@ package repository
import ( import (
"context" "context"
"github.com/zitadel/zitadel/internal/command"
) )
type UserRepository interface { type UserRepository interface {
UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) UserSessionsByAgentID(ctx context.Context, agentID string) (sessions []command.HumanSignOutSession, err error)
UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error)
ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, sessions []command.HumanSignOutSession, err error)
} }

View File

@ -173,7 +173,7 @@ func (repo *TokenVerifierRepo) verifySessionToken(ctx context.Context, sessionID
} }
// checkAuthentication ensures the session or token was authenticated (at least a single [domain.UserAuthMethodType]). // checkAuthentication ensures the session or token was authenticated (at least a single [domain.UserAuthMethodType]).
// It will also check if there was a multi factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor // It will also check if there was a multi-factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor
func (repo *TokenVerifierRepo) checkAuthentication(ctx context.Context, authMethods []domain.UserAuthMethodType, userID string) error { func (repo *TokenVerifierRepo) checkAuthentication(ctx context.Context, authMethods []domain.UserAuthMethodType, userID string) error {
if len(authMethods) == 0 { if len(authMethods) == 0 {
return zerrors.ThrowPermissionDenied(nil, "AUTHZ-Kl3p0", "authentication required") return zerrors.ThrowPermissionDenied(nil, "AUTHZ-Kl3p0", "authentication required")

View File

@ -6,7 +6,16 @@ import (
"time" "time"
"github.com/zitadel/logging" "github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database/postgres" )
// Purpose describes which object types are stored by a cache.
type Purpose int
//go:generate enumer -type Purpose -transform snake -trimprefix Purpose
const (
PurposeUnspecified Purpose = iota
PurposeAuthzInstance
PurposeMilestones
) )
// Cache stores objects with a value of type `V`. // Cache stores objects with a value of type `V`.
@ -71,17 +80,19 @@ type Entry[I, K comparable] interface {
Keys(index I) (key []K) Keys(index I) (key []K)
} }
type CachesConfig struct { type Connector int
Connectors struct {
Memory MemoryConnectorConfig
Postgres PostgresConnectorConfig
// Redis redis.Config?
}
Instance *CacheConfig
}
type CacheConfig struct { //go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text
Connector string const (
// Empty line comment ensures empty string for unspecified value
ConnectorUnspecified Connector = iota //
ConnectorMemory
ConnectorPostgres
ConnectorRedis
)
type Config struct {
Connector Connector
// Age since an object was added to the cache, // Age since an object was added to the cache,
// after which the object is considered invalid. // after which the object is considered invalid.
@ -97,14 +108,3 @@ type CacheConfig struct {
// By default only errors are logged to stdout. // By default only errors are logged to stdout.
Log *logging.Config Log *logging.Config
} }
type MemoryConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
}
type PostgresConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
Connection postgres.Config
}

69
internal/cache/connector/connector.go vendored Normal file
View File

@ -0,0 +1,69 @@
// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages.
package connector
import (
"context"
"fmt"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
"github.com/zitadel/zitadel/internal/cache/connector/noop"
"github.com/zitadel/zitadel/internal/cache/connector/pg"
"github.com/zitadel/zitadel/internal/cache/connector/redis"
"github.com/zitadel/zitadel/internal/database"
)
type CachesConfig struct {
Connectors struct {
Memory gomap.Config
Postgres pg.Config
Redis redis.Config
}
Instance *cache.Config
Milestones *cache.Config
}
type Connectors struct {
Config CachesConfig
Memory *gomap.Connector
Postgres *pg.Connector
Redis *redis.Connector
}
func StartConnectors(conf *CachesConfig, client *database.DB) (Connectors, error) {
if conf == nil {
return Connectors{}, nil
}
return Connectors{
Config: *conf,
Memory: gomap.NewConnector(conf.Connectors.Memory),
Postgres: pg.NewConnector(conf.Connectors.Postgres, client),
Redis: redis.NewConnector(conf.Connectors.Redis),
}, nil
}
func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) {
if conf == nil || conf.Connector == cache.ConnectorUnspecified {
return noop.NewCache[I, K, V](), nil
}
if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil {
c := gomap.NewCache[I, K, V](background, indices, *conf)
connectors.Memory.Config.StartAutoPrune(background, c, purpose)
return c, nil
}
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres)
if err != nil {
return nil, fmt.Errorf("start cache: %w", err)
}
connectors.Postgres.Config.AutoPrune.StartAutoPrune(background, c, purpose)
return c, nil
}
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
db := connectors.Redis.Config.DBOffset + int(purpose)
c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices)
return c, nil
}
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
}

View File

@ -0,0 +1,23 @@
package gomap
import (
"github.com/zitadel/zitadel/internal/cache"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
Config cache.AutoPruneConfig
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Config: config.AutoPrune,
}
}

View File

@ -14,14 +14,14 @@ import (
) )
type mapCache[I, K comparable, V cache.Entry[I, K]] struct { type mapCache[I, K comparable, V cache.Entry[I, K]] struct {
config *cache.CacheConfig config *cache.Config
indexMap map[I]*index[K, V] indexMap map[I]*index[K, V]
logger *slog.Logger logger *slog.Logger
} }
// NewCache returns an in-memory Cache implementation based on the builtin go map type. // NewCache returns an in-memory Cache implementation based on the builtin go map type.
// Object values are stored as-is and there is no encoding or decoding involved. // Object values are stored as-is and there is no encoding or decoding involved.
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.CacheConfig) cache.PrunerCache[I, K, V] { func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] {
m := &mapCache[I, K, V]{ m := &mapCache[I, K, V]{
config: &config, config: &config,
indexMap: make(map[I]*index[K, V], len(indices)), indexMap: make(map[I]*index[K, V], len(indices)),
@ -116,7 +116,7 @@ func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
type index[K comparable, V any] struct { type index[K comparable, V any] struct {
mutex sync.RWMutex mutex sync.RWMutex
config *cache.CacheConfig config *cache.Config
entries map[K]*entry[V] entries map[K]*entry[V]
} }
@ -177,7 +177,7 @@ type entry[V any] struct {
lastUse atomic.Int64 // UnixMicro time lastUse atomic.Int64 // UnixMicro time
} }
func (e *entry[V]) isValid(c *cache.CacheConfig) bool { func (e *entry[V]) isValid(c *cache.Config) bool {
if e.invalid.Load() { if e.invalid.Load() {
return false return false
} }

View File

@ -41,7 +41,7 @@ func (o *testObject) Keys(index testIndex) []string {
} }
func Test_mapCache_Get(t *testing.T) { func Test_mapCache_Get(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second, MaxAge: time.Second,
LastUseAge: time.Second / 4, LastUseAge: time.Second / 4,
Log: &logging.Config{ Log: &logging.Config{
@ -103,7 +103,7 @@ func Test_mapCache_Get(t *testing.T) {
} }
func Test_mapCache_Invalidate(t *testing.T) { func Test_mapCache_Invalidate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second, MaxAge: time.Second,
LastUseAge: time.Second / 4, LastUseAge: time.Second / 4,
Log: &logging.Config{ Log: &logging.Config{
@ -124,7 +124,7 @@ func Test_mapCache_Invalidate(t *testing.T) {
} }
func Test_mapCache_Delete(t *testing.T) { func Test_mapCache_Delete(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second, MaxAge: time.Second,
LastUseAge: time.Second / 4, LastUseAge: time.Second / 4,
Log: &logging.Config{ Log: &logging.Config{
@ -157,7 +157,7 @@ func Test_mapCache_Delete(t *testing.T) {
} }
func Test_mapCache_Prune(t *testing.T) { func Test_mapCache_Prune(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second, MaxAge: time.Second,
LastUseAge: time.Second / 4, LastUseAge: time.Second / 4,
Log: &logging.Config{ Log: &logging.Config{
@ -193,7 +193,7 @@ func Test_mapCache_Prune(t *testing.T) {
} }
func Test_mapCache_Truncate(t *testing.T) { func Test_mapCache_Truncate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{ c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second, MaxAge: time.Second,
LastUseAge: time.Second / 4, LastUseAge: time.Second / 4,
Log: &logging.Config{ Log: &logging.Config{
@ -235,7 +235,7 @@ func Test_entry_isValid(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
fields fields fields fields
config *cache.CacheConfig config *cache.Config
want bool want bool
}{ }{
{ {
@ -245,7 +245,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: true, invalid: true,
lastUse: time.Now(), lastUse: time.Now(),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -258,7 +258,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false, invalid: false,
lastUse: time.Now(), lastUse: time.Now(),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -271,7 +271,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false, invalid: false,
lastUse: time.Now(), lastUse: time.Now(),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
want: true, want: true,
@ -283,7 +283,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false, invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)), lastUse: time.Now().Add(-(time.Second * 2)),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -296,7 +296,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false, invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)), lastUse: time.Now().Add(-(time.Second * 2)),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
}, },
want: true, want: true,
@ -308,7 +308,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false, invalid: false,
lastUse: time.Now(), lastUse: time.Now(),
}, },
config: &cache.CacheConfig{ config: &cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },

View File

@ -0,0 +1,28 @@
package pg
import (
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/database"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
PGXPool
Dialect string
Config Config
}
func NewConnector(config Config, client *database.DB) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
PGXPool: client.Pool,
Dialect: client.Type(),
Config: config,
}
}

View File

@ -40,25 +40,25 @@ type PGXPool interface {
} }
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct { type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
name string purpose cache.Purpose
config *cache.CacheConfig config *cache.Config
indices []I indices []I
pool PGXPool connector *Connector
logger *slog.Logger logger *slog.Logger
} }
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables. // NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name string, config cache.CacheConfig, indices []I, pool PGXPool, dialect string) (cache.PrunerCache[I, K, V], error) { func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
c := &pgCache[I, K, V]{ c := &pgCache[I, K, V]{
name: name, purpose: purpose,
config: &config, config: &config,
indices: indices, indices: indices,
pool: pool, connector: connector,
logger: config.Log.Slog().With("cache_name", name), logger: config.Log.Slog().With("cache_purpose", purpose),
} }
c.logger.InfoContext(ctx, "pg cache logging enabled") c.logger.InfoContext(ctx, "pg cache logging enabled")
if dialect == "postgres" { if connector.Dialect == "postgres" {
if err := c.createPartition(ctx); err != nil { if err := c.createPartition(ctx); err != nil {
return nil, err return nil, err
} }
@ -68,10 +68,10 @@ func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name
func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error { func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error {
var query strings.Builder var query strings.Builder
if err := createPartitionTmpl.Execute(&query, c.name); err != nil { if err := createPartitionTmpl.Execute(&query, c.purpose.String()); err != nil {
return err return err
} }
_, err := c.pool.Exec(ctx, query.String()) _, err := c.connector.Exec(ctx, query.String())
return err return err
} }
@ -87,7 +87,7 @@ func (c *pgCache[I, K, V]) set(ctx context.Context, entry V) (err error) {
keys := c.indexKeysFromEntry(entry) keys := c.indexKeysFromEntry(entry)
c.logger.DebugContext(ctx, "pg cache set", "index_key", keys) c.logger.DebugContext(ctx, "pg cache set", "index_key", keys)
_, err = c.pool.Exec(ctx, setQuery, c.name, keys, entry) _, err = c.connector.Exec(ctx, setQuery, c.purpose.String(), keys, entry)
if err != nil { if err != nil {
c.logger.ErrorContext(ctx, "pg cache set", "err", err) c.logger.ErrorContext(ctx, "pg cache set", "err", err)
return err return err
@ -117,7 +117,7 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er
if !slices.Contains(c.indices, index) { if !slices.Contains(c.indices, index) {
return value, cache.NewIndexUnknownErr(index) return value, cache.NewIndexUnknownErr(index)
} }
err = c.pool.QueryRow(ctx, getQuery, c.name, index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value) err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
return value, err return value, err
} }
@ -125,7 +125,7 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, invalidateQuery, c.name, index, keys) _, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys)
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys) c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
return err return err
} }
@ -134,7 +134,7 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, deleteQuery, c.name, index, keys) _, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys)
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys) c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
return err return err
} }
@ -143,7 +143,7 @@ func (c *pgCache[I, K, V]) Prune(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, pruneQuery, c.name, c.config.MaxAge, c.config.LastUseAge) _, err = c.connector.Exec(ctx, pruneQuery, c.purpose.String(), c.config.MaxAge, c.config.LastUseAge)
c.logger.DebugContext(ctx, "pg cache prune") c.logger.DebugContext(ctx, "pg cache prune")
return err return err
} }
@ -152,7 +152,7 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx) ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }() defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, truncateQuery, c.name) _, err = c.connector.Exec(ctx, truncateQuery, c.purpose.String())
c.logger.DebugContext(ctx, "pg cache truncate") c.logger.DebugContext(ctx, "pg cache truncate")
return err return err
} }

View File

@ -67,7 +67,7 @@ func TestNewCache(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
conf := cache.CacheConfig{ conf := cache.Config{
Log: &logging.Config{ Log: &logging.Config{
Level: "debug", Level: "debug",
AddSource: true, AddSource: true,
@ -76,8 +76,12 @@ func TestNewCache(t *testing.T) {
pool, err := pgxmock.NewPool() pool, err := pgxmock.NewPool()
require.NoError(t, err) require.NoError(t, err)
tt.expect(pool) tt.expect(pool)
connector := &Connector{
PGXPool: pool,
Dialect: "postgres",
}
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres") c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
require.ErrorIs(t, err, tt.wantErr) require.ErrorIs(t, err, tt.wantErr)
if tt.wantErr == nil { if tt.wantErr == nil {
assert.NotNil(t, c) assert.NotNil(t, c)
@ -111,7 +115,7 @@ func Test_pgCache_Set(t *testing.T) {
}, },
expect: func(ppi pgxmock.PgxCommonIface) { expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect). ppi.ExpectExec(queryExpect).
WithArgs("test", WithArgs(cachePurpose.String(),
[]indexKey[testIndex, string]{ []indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"}, {IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"}, {IndexID: testIndexName, IndexKey: "foo"},
@ -135,7 +139,7 @@ func Test_pgCache_Set(t *testing.T) {
}, },
expect: func(ppi pgxmock.PgxCommonIface) { expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect). ppi.ExpectExec(queryExpect).
WithArgs("test", WithArgs(cachePurpose.String(),
[]indexKey[testIndex, string]{ []indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"}, {IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"}, {IndexID: testIndexName, IndexKey: "foo"},
@ -151,7 +155,7 @@ func Test_pgCache_Set(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, cache.CacheConfig{}) c, pool := prepareCache(t, cache.Config{})
defer pool.Close() defer pool.Close()
tt.expect(pool) tt.expect(pool)
@ -173,7 +177,7 @@ func Test_pgCache_Get(t *testing.T) {
} }
tests := []struct { tests := []struct {
name string name string
config cache.CacheConfig config cache.Config
args args args args
expect func(pgxmock.PgxCommonIface) expect func(pgxmock.PgxCommonIface)
want *testObject want *testObject
@ -181,7 +185,7 @@ func Test_pgCache_Get(t *testing.T) {
}{ }{
{ {
name: "invalid index", name: "invalid index",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -194,7 +198,7 @@ func Test_pgCache_Get(t *testing.T) {
}, },
{ {
name: "no rows", name: "no rows",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
@ -204,14 +208,14 @@ func Test_pgCache_Get(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect). pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)). WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnRows(pgxmock.NewRows([]string{"payload"})) WillReturnRows(pgxmock.NewRows([]string{"payload"}))
}, },
wantOk: false, wantOk: false,
}, },
{ {
name: "error", name: "error",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
@ -221,14 +225,14 @@ func Test_pgCache_Get(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect). pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)). WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed) WillReturnError(pgx.ErrTxClosed)
}, },
wantOk: false, wantOk: false,
}, },
{ {
name: "ok", name: "ok",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -238,7 +242,7 @@ func Test_pgCache_Get(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect). pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Minute, time.Second). WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second).
WillReturnRows( WillReturnRows(
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{ pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
ID: "id1", ID: "id1",
@ -276,14 +280,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
} }
tests := []struct { tests := []struct {
name string name string
config cache.CacheConfig config cache.Config
args args args args
expect func(pgxmock.PgxCommonIface) expect func(pgxmock.PgxCommonIface)
wantErr error wantErr error
}{ }{
{ {
name: "error", name: "error",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
@ -293,14 +297,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}). WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed) WillReturnError(pgx.ErrTxClosed)
}, },
wantErr: pgx.ErrTxClosed, wantErr: pgx.ErrTxClosed,
}, },
{ {
name: "ok", name: "ok",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -310,7 +314,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}). WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1)) WillReturnResult(pgxmock.NewResult("DELETE", 1))
}, },
}, },
@ -338,14 +342,14 @@ func Test_pgCache_Delete(t *testing.T) {
} }
tests := []struct { tests := []struct {
name string name string
config cache.CacheConfig config cache.Config
args args args args
expect func(pgxmock.PgxCommonIface) expect func(pgxmock.PgxCommonIface)
wantErr error wantErr error
}{ }{
{ {
name: "error", name: "error",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
@ -355,14 +359,14 @@ func Test_pgCache_Delete(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}). WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed) WillReturnError(pgx.ErrTxClosed)
}, },
wantErr: pgx.ErrTxClosed, wantErr: pgx.ErrTxClosed,
}, },
{ {
name: "ok", name: "ok",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
@ -372,7 +376,7 @@ func Test_pgCache_Delete(t *testing.T) {
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}). WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1)) WillReturnResult(pgxmock.NewResult("DELETE", 1))
}, },
}, },
@ -396,32 +400,32 @@ func Test_pgCache_Prune(t *testing.T) {
queryExpect := regexp.QuoteMeta(pruneQuery) queryExpect := regexp.QuoteMeta(pruneQuery)
tests := []struct { tests := []struct {
name string name string
config cache.CacheConfig config cache.Config
expect func(pgxmock.PgxCommonIface) expect func(pgxmock.PgxCommonIface)
wantErr error wantErr error
}{ }{
{ {
name: "error", name: "error",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", time.Duration(0), time.Duration(0)). WithArgs(cachePurpose.String(), time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed) WillReturnError(pgx.ErrTxClosed)
}, },
wantErr: pgx.ErrTxClosed, wantErr: pgx.ErrTxClosed,
}, },
{ {
name: "ok", name: "ok",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test", time.Minute, time.Second). WithArgs(cachePurpose.String(), time.Minute, time.Second).
WillReturnResult(pgxmock.NewResult("DELETE", 1)) WillReturnResult(pgxmock.NewResult("DELETE", 1))
}, },
}, },
@ -445,32 +449,32 @@ func Test_pgCache_Truncate(t *testing.T) {
queryExpect := regexp.QuoteMeta(truncateQuery) queryExpect := regexp.QuoteMeta(truncateQuery)
tests := []struct { tests := []struct {
name string name string
config cache.CacheConfig config cache.Config
expect func(pgxmock.PgxCommonIface) expect func(pgxmock.PgxCommonIface)
wantErr error wantErr error
}{ }{
{ {
name: "error", name: "error",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: 0, MaxAge: 0,
LastUseAge: 0, LastUseAge: 0,
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test"). WithArgs(cachePurpose.String()).
WillReturnError(pgx.ErrTxClosed) WillReturnError(pgx.ErrTxClosed)
}, },
wantErr: pgx.ErrTxClosed, wantErr: pgx.ErrTxClosed,
}, },
{ {
name: "ok", name: "ok",
config: cache.CacheConfig{ config: cache.Config{
MaxAge: time.Minute, MaxAge: time.Minute,
LastUseAge: time.Second, LastUseAge: time.Second,
}, },
expect: func(pci pgxmock.PgxCommonIface) { expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect). pci.ExpectExec(queryExpect).
WithArgs("test"). WithArgs(cachePurpose.String()).
WillReturnResult(pgxmock.NewResult("DELETE", 1)) WillReturnResult(pgxmock.NewResult("DELETE", 1))
}, },
}, },
@ -491,18 +495,18 @@ func Test_pgCache_Truncate(t *testing.T) {
} }
const ( const (
cacheName = "test" cachePurpose = cache.PurposeAuthzInstance
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_test expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_authz_instance
partition of cache.objects partition of cache.objects
for values in ('test'); for values in ('authz_instance');
create unlogged table if not exists cache.string_keys_test create unlogged table if not exists cache.string_keys_authz_instance
partition of cache.string_keys partition of cache.string_keys
for values in ('test'); for values in ('authz_instance');
` `
) )
func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) { func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
conf.Log = &logging.Config{ conf.Log = &logging.Config{
Level: "debug", Level: "debug",
AddSource: true, AddSource: true,
@ -512,8 +516,11 @@ func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testI
pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)). pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0)) WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
connector := &Connector{
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres") PGXPool: pool,
Dialect: "postgres",
}
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
require.NoError(t, err) require.NoError(t, err)
return c, pool return c, pool
} }

View File

@ -0,0 +1,10 @@
local function remove(object_id)
local setKey = keySetKey(object_id)
local keys = redis.call("SMEMBERS", setKey)
local n = #keys
for i = 1, n do
redis.call("DEL", keys[i])
end
redis.call("DEL", setKey)
redis.call("DEL", object_id)
end

View File

@ -0,0 +1,3 @@
-- SELECT ensures the DB namespace for each script.
-- When used, it consumes the first ARGV entry.
redis.call("SELECT", ARGV[1])

View File

@ -0,0 +1,17 @@
-- keySetKey returns the redis key of the set containing all keys to the object.
local function keySetKey (object_id)
return object_id .. "-keys"
end
local function getTime()
return tonumber(redis.call('TIME')[1])
end
-- getCall wrapts redis.call so a nil is returned instead of false.
local function getCall (...)
local result = redis.call(...)
if result == false then
return nil
end
return result
end

View File

@ -0,0 +1,154 @@
package redis
import (
"crypto/tls"
"time"
"github.com/redis/go-redis/v9"
)
type Config struct {
Enabled bool
// The network type, either tcp or unix.
// Default is tcp.
Network string
// host:port address.
Addr string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// Use the specified Username to authenticate the current connection
// with one of the connections defined in the ACL list when connecting
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
Username string
// Optional password. Must match the password specified in the
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
// or the User Password when connecting to a Redis 6.0 instance, or greater,
// that is using the Redis ACL system.
Password string
// Each ZITADEL cache uses an incremental DB namespace.
// This option offsets the first DB so it doesn't conflict with other databases on the same server.
// Note that ZITADEL uses FLUSHDB command to truncate a cache.
// This can have destructive consequences when overlapping DB namespaces are used.
DBOffset int
// Maximum number of retries before giving up.
// Default is 3 retries; -1 (not 0) disables retries.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
MinRetryBackoff time.Duration
// Maximum backoff between each retry.
// Default is 512 milliseconds; -1 disables backoff.
MaxRetryBackoff time.Duration
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetReadDeadline calls completely.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout time.Duration
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
// Note that FIFO has slightly higher overhead compared to LIFO,
// but it helps closing idle connections faster reducing the pool size.
PoolFIFO bool
// Base number of socket connections.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
// If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
// you can limit it through MaxActiveConns
PoolSize int
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
// Default is 0. the idle connections are not closed by default.
MinIdleConns int
// Maximum number of idle connections.
// Default is 0. the idle connections are not closed by default.
MaxIdleConns int
// Maximum number of connections allocated by the pool at a given time.
// When zero, there is no limit on the number of connections in the pool.
MaxActiveConns int
// ConnMaxIdleTime is the maximum amount of time a connection may be idle.
// Should be less than server's timeout.
//
// Expired connections may be closed lazily before reuse.
// If d <= 0, connections are not closed due to a connection's idle time.
//
// Default is 30 minutes. -1 disables idle timeout check.
ConnMaxIdleTime time.Duration
// ConnMaxLifetime is the maximum amount of time a connection may be reused.
//
// Expired connections may be closed lazily before reuse.
// If <= 0, connections are not closed due to a connection's age.
//
// Default is to not close idle connections.
ConnMaxLifetime time.Duration
EnableTLS bool
// Disable set-lib on connect. Default is false.
DisableIndentity bool
// Add suffix to client name. Default is empty.
IdentitySuffix string
}
type Connector struct {
*redis.Client
Config Config
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Client: redis.NewClient(optionsFromConfig(config)),
Config: config,
}
}
func optionsFromConfig(c Config) *redis.Options {
opts := &redis.Options{
Network: c.Network,
Addr: c.Addr,
ClientName: c.ClientName,
Protocol: 3,
Username: c.Username,
Password: c.Password,
MaxRetries: c.MaxRetries,
MinRetryBackoff: c.MinRetryBackoff,
MaxRetryBackoff: c.MaxRetryBackoff,
DialTimeout: c.DialTimeout,
ReadTimeout: c.ReadTimeout,
WriteTimeout: c.WriteTimeout,
ContextTimeoutEnabled: true,
PoolFIFO: c.PoolFIFO,
PoolTimeout: c.PoolTimeout,
MinIdleConns: c.MinIdleConns,
MaxIdleConns: c.MaxIdleConns,
MaxActiveConns: c.MaxActiveConns,
ConnMaxIdleTime: c.ConnMaxIdleTime,
ConnMaxLifetime: c.ConnMaxLifetime,
DisableIndentity: c.DisableIndentity,
IdentitySuffix: c.IdentitySuffix,
}
if c.EnableTLS {
opts.TLSConfig = new(tls.Config)
}
return opts
}

29
internal/cache/connector/redis/get.lua vendored Normal file
View File

@ -0,0 +1,29 @@
local result = redis.call("GET", KEYS[1])
if result == false then
return nil
end
local object_id = tostring(result)
local object = getCall("HGET", object_id, "object")
if object == nil then
-- object expired, but there are keys that need to be cleaned up
remove(object_id)
return nil
end
-- max-age must be checked manually
local expiry = getCall("HGET", object_id, "expiry")
if not (expiry == nil) and expiry > 0 then
if getTime() > expiry then
remove(object_id)
return nil
end
end
local usage_lifetime = getCall("HGET", object_id, "usage_lifetime")
-- reset usage based TTL
if not (usage_lifetime == nil) and tonumber(usage_lifetime) > 0 then
redis.call('EXPIRE', object_id, usage_lifetime)
end
return object

Some files were not shown because too many files have changed in this diff Show More