Merge branch 'main' into next

# Conflicts:
#	internal/api/grpc/admin/integration_test/server_test.go
#	internal/api/grpc/resources/action/v3alpha/integration_test/execution_target_test.go
#	internal/api/grpc/resources/action/v3alpha/integration_test/query_test.go
#	internal/api/grpc/resources/webkey/v3/integration_test/webkey_integration_test.go
#	internal/api/grpc/user/v2/integration_test/query_test.go
This commit is contained in:
Livio Spring 2024-11-12 13:56:05 +01:00
commit 9a05e671fb
No known key found for this signature in database
GPG Key ID: 26BB1C2FA5952CF0
232 changed files with 5764 additions and 1900 deletions

View File

@ -8,25 +8,24 @@ services:
network_mode: service:db
command: sleep infinity
environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
- 'ZITADEL_EXTERNALSECURE=false'
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
ZITADEL_EXTERNALSECURE: false
db:
image: postgres:latest
restart: unless-stopped
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
PGUSER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: postgres
volumes:
postgres-data:

View File

@ -1,6 +1,7 @@
name: 📄 Documentation
description: Create an issue for missing or wrong documentation.
labels: ["docs"]
type: task
body:
- type: markdown
attributes:

View File

@ -1,6 +1,7 @@
name: 💡 Proposal / Feature request
description: "Create an issue for a feature request/proposal."
labels: ["enhancement"]
type: enhancement
body:
- type: markdown
attributes:

View File

@ -1,54 +0,0 @@
name: 🛠️ Improvement
description: "Create an new issue for an improvment in ZITADEL"
labels: ["improvement"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this improvement request
- type: checkboxes
id: preflight
attributes:
label: Preflight Checklist
options:
- label:
I could not find a solution in the existing issues, docs, nor discussions
required: true
- label:
I have joined the [ZITADEL chat](https://zitadel.com/chat)
- type: textarea
id: problem
attributes:
label: Describe your problem
description: Please describe your problem this improvement is supposed to solve.
placeholder: Describe the problem you have
validations:
required: true
- type: textarea
id: solution
attributes:
label: Describe your ideal solution
description: Which solution do you propose?
placeholder: As a [type of user], I want [some goal] so that [some reason].
validations:
required: true
- type: input
id: version
attributes:
label: Version
description: Which version of the ZITADEL are you using.
- type: dropdown
id: environment
attributes:
label: Environment
description: How do you use ZITADEL?
options:
- ZITADEL Cloud
- Self-hosted
validations:
required: true
- type: textarea
id: additional
attributes:
label: Additional Context
description: Please add any other infos that could be useful.

View File

@ -36,6 +36,10 @@ jobs:
--health-timeout 5s
--health-retries 5
--health-start-period 10s
cache:
image: redis:latest
ports:
- 6379:6379
steps:
-
uses: actions/checkout@v4

1
.gitignore vendored
View File

@ -87,4 +87,5 @@ go.work.sum
load-test/node_modules
load-test/yarn-error.log
load-test/dist
load-test/output/*
.vercel

View File

@ -2,7 +2,7 @@ go_bin := "$$(go env GOPATH)/bin"
gen_authopt_path := "$(go_bin)/protoc-gen-authoption"
gen_zitadel_path := "$(go_bin)/protoc-gen-zitadel"
now := $(shell date --rfc-3339=seconds | sed 's/ /T/')
now := $(shell date '+%Y-%m-%dT%T%z' | sed -E 's/.([0-9]{2})([0-9]{2})$$/-\1:\2/')
VERSION ?= development-$(now)
COMMIT_SHA ?= $(shell git rev-parse HEAD)
ZITADEL_IMAGE ?= zitadel:local
@ -63,12 +63,12 @@ endif
.PHONY: core_grpc_dependencies
core_grpc_dependencies:
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions
go install github.com/envoyproxy/protoc-gen-validate@v1.0.4 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions
go install github.com/bufbuild/buf/cmd/buf@v1.34.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.35.1 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions
go install github.com/envoyproxy/protoc-gen-validate@v1.1.0 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions
go install github.com/bufbuild/buf/cmd/buf@v1.45.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions
.PHONY: core_api
core_api: core_api_generator core_grpc_dependencies
@ -113,7 +113,7 @@ core_unit_test:
.PHONY: core_integration_db_up
core_integration_db_up:
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR}
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} cache
.PHONY: core_integration_db_down
core_integration_db_down:

File diff suppressed because one or more lines are too long

View File

@ -25,7 +25,7 @@ import (
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/authz"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
@ -72,7 +72,7 @@ type ProjectionsConfig struct {
EncryptionKeys *encryption.EncryptionKeyConfig
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
Eventstore *eventstore.Config
Caches *cache.CachesConfig
Caches *connector.CachesConfig
Admin admin_es.Config
Auth auth_es.Config
@ -128,13 +128,16 @@ func projections(
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, client)
logging.OnError(err).Fatal("unable to start caches")
queries, err := query.StartQueries(
ctx,
es,
esV4.Querier,
client,
client,
config.Caches,
cacheConnectors,
config.Projections,
config.SystemDefaults,
keys.IDPConfig,
@ -161,8 +164,9 @@ func projections(
DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure,
}
commands, err := command.StartCommands(
commands, err := command.StartCommands(ctx,
es,
cacheConnectors,
config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings,
staticStorage,
@ -199,6 +203,7 @@ func projections(
ctx,
config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
*config.Telemetry,
config.ExternalDomain,
@ -212,6 +217,8 @@ func projections(
keys.User,
keys.SMTP,
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
)
config.Auth.Spooler.Client = client

View File

@ -9,6 +9,7 @@ import (
"golang.org/x/text/language"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/crypto"
@ -64,7 +65,9 @@ func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error
return err
}
cmd, err := command.StartCommands(mig.es,
cmd, err := command.StartCommands(ctx,
mig.es,
connector.Connectors{},
mig.defaults,
mig.zitadelRoles,
nil,

118
cmd/setup/36.go Normal file
View File

@ -0,0 +1,118 @@
package setup
import (
"context"
_ "embed"
"errors"
"fmt"
"slices"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/repository/milestone"
)
var (
//go:embed 36.sql
getProjectedMilestones string
)
type FillV3Milestones struct {
dbClient *database.DB
eventstore *eventstore.Eventstore
}
type instanceMilestone struct {
Type milestone.Type
Reached time.Time
Pushed *time.Time
}
func (mig *FillV3Milestones) Execute(ctx context.Context, _ eventstore.Event) error {
im, err := mig.getProjectedMilestones(ctx)
if err != nil {
return err
}
return mig.pushEventsByInstance(ctx, im)
}
func (mig *FillV3Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) {
type row struct {
InstanceID string
Type milestone.Type
Reached time.Time
Pushed *time.Time
}
rows, _ := mig.dbClient.Pool.Query(ctx, getProjectedMilestones)
scanned, err := pgx.CollectRows(rows, pgx.RowToStructByPos[row])
var pgError *pgconn.PgError
// catch ERROR: relation "projections.milestones" does not exist
if errors.As(err, &pgError) && pgError.SQLState() == "42P01" {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("milestones get: %w", err)
}
milestoneMap := make(map[string][]instanceMilestone)
for _, s := range scanned {
milestoneMap[s.InstanceID] = append(milestoneMap[s.InstanceID], instanceMilestone{
Type: s.Type,
Reached: s.Reached,
Pushed: s.Pushed,
})
}
return milestoneMap, nil
}
// pushEventsByInstance creates the v2 milestone events by instance.
// This prevents we will try to push 6*N(instance) events in one push.
func (mig *FillV3Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error {
// keep a deterministic order by instance ID.
order := make([]string, 0, len(milestoneMap))
for k := range milestoneMap {
order = append(order, k)
}
slices.Sort(order)
for i, instanceID := range order {
logging.WithFields("instance_id", instanceID, "migration", mig.String(), "progress", fmt.Sprintf("%d/%d", i+1, len(order))).Info("filter existing milestone events")
// because each Push runs in a separate TX, we need to make sure that events
// from a partially executed migration are pushed again.
model := command.NewMilestonesReachedWriteModel(instanceID)
if err := mig.eventstore.FilterToQueryReducer(ctx, model); err != nil {
return fmt.Errorf("milestones filter: %w", err)
}
if model.InstanceCreated {
logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("milestone events already migrated")
continue // This instance was migrated, skip
}
logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("push milestone events")
aggregate := milestone.NewInstanceAggregate(instanceID)
cmds := make([]eventstore.Command, 0, len(milestoneMap[instanceID])*2)
for _, m := range milestoneMap[instanceID] {
cmds = append(cmds, milestone.NewReachedEventWithDate(ctx, aggregate, m.Type, &m.Reached))
if m.Pushed != nil {
cmds = append(cmds, milestone.NewPushedEventWithDate(ctx, aggregate, m.Type, nil, "", m.Pushed))
}
}
if _, err := mig.eventstore.Push(ctx, cmds...); err != nil {
return fmt.Errorf("milestones push: %w", err)
}
}
return nil
}
func (mig *FillV3Milestones) String() string {
return "36_fill_v3_milestones"
}

4
cmd/setup/36.sql Normal file
View File

@ -0,0 +1,4 @@
SELECT instance_id, type, reached_date, last_pushed_date
FROM projections.milestones
WHERE reached_date IS NOT NULL
ORDER BY instance_id, reached_date;

27
cmd/setup/37.go Normal file
View File

@ -0,0 +1,27 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 37.sql
addBackChannelLogoutURI string
)
type Apps7OIDConfigsBackChannelLogoutURI struct {
dbClient *database.DB
}
func (mig *Apps7OIDConfigsBackChannelLogoutURI) Execute(ctx context.Context, _ eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, addBackChannelLogoutURI)
return err
}
func (mig *Apps7OIDConfigsBackChannelLogoutURI) String() string {
return "37_apps7_oidc_configs_add_back_channel_logout_uri"
}

1
cmd/setup/37.sql Normal file
View File

@ -0,0 +1 @@
ALTER TABLE IF EXISTS projections.apps7_oidc_configs ADD COLUMN IF NOT EXISTS back_channel_logout_uri TEXT;

28
cmd/setup/38.go Normal file
View File

@ -0,0 +1,28 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 38.sql
backChannelLogoutCurrentState string
)
type BackChannelLogoutNotificationStart struct {
dbClient *database.DB
esClient *eventstore.Eventstore
}
func (mig *BackChannelLogoutNotificationStart) Execute(ctx context.Context, e eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, backChannelLogoutCurrentState, e.Sequence(), e.CreatedAt(), e.Position())
return err
}
func (mig *BackChannelLogoutNotificationStart) String() string {
return "38_back_channel_logout_notification_start_"
}

20
cmd/setup/38.sql Normal file
View File

@ -0,0 +1,20 @@
INSERT INTO projections.current_states (
instance_id
, projection_name
, last_updated
, sequence
, event_date
, position
, filter_offset
)
SELECT instance_id
, 'projections.notifications_back_channel_logout'
, now()
, $1
, $2
, $3
, 0
FROM eventstore.events2
WHERE aggregate_type = 'instance'
AND event_type = 'instance.added'
ON CONFLICT DO NOTHING;

View File

@ -15,7 +15,7 @@ import (
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/oidc"
"github.com/zitadel/zitadel/internal/api/ui/login"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/hook"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
@ -31,7 +31,7 @@ import (
type Config struct {
ForMirror bool
Database database.Config
Caches *cache.CachesConfig
Caches *connector.CachesConfig
SystemDefaults systemdefaults.SystemDefaults
InternalAuthZ internal_authz.Config
ExternalDomain string
@ -122,6 +122,9 @@ type Steps struct {
s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid
s34AddCacheSchema *AddCacheSchema
s35AddPositionToIndexEsWm *AddPositionToIndexEsWm
s36FillV2Milestones *FillV3Milestones
s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI
s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart
}
func MustNewSteps(v *viper.Viper) *Steps {

View File

@ -3,6 +3,7 @@ package setup
import (
"context"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/eventstore"
@ -31,8 +32,9 @@ func (mig *externalConfigChange) Check(lastRun map[string]interface{}) bool {
}
func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error {
cmd, err := command.StartCommands(
cmd, err := command.StartCommands(ctx,
mig.es,
connector.Connectors{},
mig.defaults,
nil,
nil,

View File

@ -22,6 +22,7 @@ import (
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/authz"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
"github.com/zitadel/zitadel/internal/database"
@ -165,6 +166,9 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient}
steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient}
steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient}
steps.s36FillV2Milestones = &FillV3Milestones{dbClient: queryDBClient, eventstore: eventstoreClient}
steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient}
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient}
err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections")
@ -209,6 +213,8 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s30FillFieldsForOrgDomainVerified,
steps.s34AddCacheSchema,
steps.s35AddPositionToIndexEsWm,
steps.s36FillV2Milestones,
steps.s38BackChannelLogoutNotificationStart,
} {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
}
@ -225,6 +231,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s27IDPTemplate6SAMLNameIDFormat,
steps.s32AddAuthSessionID,
steps.s33SMSConfigs3TwilioAddVerifyServiceSid,
steps.s37Apps7OIDConfigsBackChannelLogoutURI,
} {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
}
@ -340,13 +347,17 @@ func initProjections(
}
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
logging.OnError(err).Fatal("unable to start caches")
queries, err := query.StartQueries(
ctx,
eventstoreClient,
eventstoreV4.Querier,
queryDBClient,
projectionDBClient,
config.Caches,
cacheConnectors,
config.Projections,
config.SystemDefaults,
keys.IDPConfig,
@ -388,8 +399,9 @@ func initProjections(
permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
commands, err := command.StartCommands(
commands, err := command.StartCommands(ctx,
eventstoreClient,
cacheConnectors,
config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings,
staticStorage,
@ -421,6 +433,7 @@ func initProjections(
ctx,
config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
*config.Telemetry,
config.ExternalDomain,
@ -434,6 +447,8 @@ func initProjections(
keys.User,
keys.SMTP,
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
)
for _, p := range notify_handler.Projections() {
err := migration.Migrate(ctx, eventstoreClient, p)

View File

@ -18,7 +18,7 @@ import (
"github.com/zitadel/zitadel/internal/api/ui/console"
"github.com/zitadel/zitadel/internal/api/ui/login"
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/hook"
"github.com/zitadel/zitadel/internal/config/network"
@ -49,7 +49,7 @@ type Config struct {
HTTP1HostHeader string
WebAuthNName string
Database database.Config
Caches *cache.CachesConfig
Caches *connector.CachesConfig
Tracing tracing.Config
Metrics metrics.Config
Profiler profiler.Config

View File

@ -69,6 +69,7 @@ import (
"github.com/zitadel/zitadel/internal/authz"
authz_repo "github.com/zitadel/zitadel/internal/authz/repository"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/crypto"
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
@ -177,6 +178,10 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
}))
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
if err != nil {
return fmt.Errorf("unable to start caches: %w", err)
}
queries, err := query.StartQueries(
ctx,
@ -184,7 +189,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
eventstoreV4.Querier,
queryDBClient,
projectionDBClient,
config.Caches,
cacheConnectors,
config.Projections,
config.SystemDefaults,
keys.IDPConfig,
@ -222,8 +227,9 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure,
}
commands, err := command.StartCommands(
commands, err := command.StartCommands(ctx,
eventstoreClient,
cacheConnectors,
config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings,
storage,
@ -269,6 +275,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
ctx,
config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
*config.Telemetry,
config.ExternalDomain,
@ -282,6 +289,8 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
keys.User,
keys.SMTP,
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
)
notification.Start(ctx)

View File

@ -155,7 +155,7 @@
>
<cnsl-form-field class="lifetime-form-field" label="Password Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="passwordCheckLifetime" formControlName="passwordCheckLifetime" />
<input cnslInput type="number" name="passwordCheckLifetime" formControlName="passwordCheckLifetime" min="1" step="1" />
</cnsl-form-field>
</cnsl-card>
<cnsl-card
@ -164,7 +164,14 @@
>
<cnsl-form-field class="lifetime-form-field" label="external Login Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="externalLoginCheckLifetime" formControlName="externalLoginCheckLifetime" />
<input
cnslInput
type="number"
name="externalLoginCheckLifetime"
formControlName="externalLoginCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field>
</cnsl-card>
<cnsl-card
@ -173,7 +180,7 @@
>
<cnsl-form-field class="lifetime-form-field" label="MFA Init Skip Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="mfaInitSkipLifetime" formControlName="mfaInitSkipLifetime" />
<input cnslInput type="number" name="mfaInitSkipLifetime" formControlName="mfaInitSkipLifetime" min="0" step="1" />
</cnsl-form-field>
</cnsl-card>
<cnsl-card
@ -182,16 +189,30 @@
>
<cnsl-form-field class="lifetime-form-field" label="Second Factor Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="secondFactorCheckLifetime" formControlName="secondFactorCheckLifetime" />
<input
cnslInput
type="number"
name="secondFactorCheckLifetime"
formControlName="secondFactorCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field>
</cnsl-card>
<cnsl-card
[title]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.TITLE' | translate"
[description]="'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.MULTI_FACTOR_CHECK.DESCRIPTION' | translate"
>
<cnsl-form-field class="lifetime-form-field" label="Multi Factor Check Lifetime" required="true">
<cnsl-form-field class="lifetime-form-field" label="Multi-factor Check Lifetime" required="true">
<cnsl-label>{{ 'DESCRIPTIONS.SETTINGS.LOGIN.LIFETIMES.LABEL' | translate }}</cnsl-label>
<input cnslInput type="number" name="multiFactorCheckLifetime" formControlName="multiFactorCheckLifetime" />
<input
cnslInput
type="number"
name="multiFactorCheckLifetime"
formControlName="multiFactorCheckLifetime"
min="1"
step="1"
/>
</cnsl-form-field>
</cnsl-card>
</form>

View File

@ -760,7 +760,7 @@
"3": "Deleted"
},
"DIALOG": {
"MFA_DELETE_TITLE": "Remove Secondfactor",
"MFA_DELETE_TITLE": "Remove Second factor",
"MFA_DELETE_DESCRIPTION": "You are about to delete a second factor. Are you sure?",
"ADD_MFA_TITLE": "Add Second Factor",
"ADD_MFA_DESCRIPTION": "Select one of the following options."
@ -773,9 +773,9 @@
"IDPNAME": "IDP Name",
"USERDISPLAYNAME": "External Name",
"EXTERNALUSERID": "External User ID",
"EMPTY": "No external IDP found",
"EMPTY": "No external IdP found",
"DIALOG": {
"DELETE_TITLE": "Remove IDP",
"DELETE_TITLE": "Remove IdP",
"DELETE_DESCRIPTION": "You are about to delete an Identity Provider from a user. Do you really want to continue?"
}
},
@ -1691,7 +1691,7 @@
"username": "Username",
"tempUsername": "Temp username",
"otp": "One-time password",
"verifyUrl": "Verify One-time-password URL",
"verifyUrl": "Verify One-time password URL",
"expiry": "Expiry",
"applicationName": "Application name"
},
@ -2154,7 +2154,7 @@
"PREFERREDLANGUAGEATTRIBUTE": "Preferred language attribute",
"PREFERREDUSERNAMEATTRIBUTE": "Preferred username attribute",
"PROFILEATTRIBUTE": "Profile attribute",
"IDPDISPLAYNAMMAPPING": "IDP Display Name Mapping",
"IDPDISPLAYNAMMAPPING": "IdP Display Name Mapping",
"USERNAMEMAPPING": "Username Mapping",
"DATES": "Dates",
"CREATIONDATE": "Created At",
@ -2162,13 +2162,13 @@
"DEACTIVATE": "Deactivate",
"ACTIVATE": "Activate",
"DELETE": "Delete",
"DELETE_TITLE": "Delete IDP",
"DELETE_TITLE": "Delete IdP",
"DELETE_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?",
"REMOVE_WARN_TITLE": "Remove IDP",
"REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IDP for your users and already registered users won't be able to login again. Are you sure to continue?",
"DELETE_SELECTION_TITLE": "Delete IDP",
"REMOVE_WARN_TITLE": "Remove IdP",
"REMOVE_WARN_DESCRIPTION": "You are about to remove an identity provider. This will remove the selection of the available IdP for your users and already registered users won't be able to login again. Are you sure to continue?",
"DELETE_SELECTION_TITLE": "Delete IdP",
"DELETE_SELECTION_DESCRIPTION": "You are about to delete an identity provider. The resulting changes are irrevocable. Do you really want to do this?",
"EMPTY": "No IDP available",
"EMPTY": "No IdP available",
"OIDC": {
"GENERAL": "General Information",
"TITLE": "OIDC Configuration",

View File

@ -80,7 +80,7 @@ Use the function that reflects your log level.
### Example
```js
logger.info("This is an info log.")
logger.log("This is an info log.")
logger.warn("This is a warn log.")

View File

@ -92,7 +92,7 @@ Some secrets cannot be hashed because they need to be used in their raw form. Th
- Federation
- Client Secrets of Identity Providers (IdPs)
- Multi Factor Authentication
- Multi-factor Authentication
- TOTP Seed Values
- Validation Secrets
- Verifying contact information like eMail, Phonenumbers

View File

@ -144,4 +144,4 @@ The storage layer of ZITADEL is responsible for multiple tasks. For example:
- Backup and restore operation for disaster recovery purpose
ZITADEL currently supports PostgreSQL and CockroachDB..
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide on using one of them.
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide on using one of them.

View File

@ -11,7 +11,7 @@ Since the storage layer takes the heavy lifting of making sure that data in sync
Depending on your projects needs our general recommendation is to run ZITADEL and ZITADELs storage layer across multiple availability zones in the same region or if you need higher guarantees run the storage layer across multiple regions.
Consult the [CockroachDB documentation](https://www.cockroachlabs.com/docs/) for more details or use the [CockroachCloud Service](https://www.cockroachlabs.com/docs/cockroachcloud/create-an-account.html)
Alternatively you can run ZITADEL also with Postgres which is [Enterprise Supported](/docs/support/software-release-cycles-support#partially-supported).
Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it.
Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-postgresql) before you decide to use it.
## Scalability

View File

@ -36,5 +36,5 @@ Possible conditions for the Execution:
## Further reading
- [Actions v2 example execution locally](/apis/actionsv2/execution-local)
- [Actions v2 reference](/apis/actionsv2/introduction#action)
- [Actions v2 reference](/apis/actions/v3/usage)
- [Actions v2 example execution locally](/apis/actions/v3/testing-locally)

View File

@ -14,7 +14,7 @@ curl --request GET \
```
Response Example:
The relevant part for the list is the second factor and multi factor list.
The relevant part for the list is the second factor and multi-factor list.
```bash
{

View File

@ -30,7 +30,7 @@ ZITADEL supports different Methods:
### Start TOTP Registration
The user has selected to setup Time-based One-Time-Password (TOTP).
The user has selected to setup Time-based One-Time Password (TOTP).
To show the user the QR to register TOTP with his Authenticator App like Google/Microsoft Authenticator or Authy you have to start the registration on the ZITADEL API.
Generate the QR Code with the URI from the response.
For users that do not have a QR Code reader make sure to also show the secret, to enable manual configuration.
@ -485,7 +485,7 @@ You have successfully registered a new U2F to the user.
### Check User
To be able to check the Universal-Second-Factor (U2F) you need a user check and a webAuthN challenge.
To be able to check the Universal Second Factor (U2F) you need a user check and a webAuthN challenge.
In the creat session request you can check for the user and directly initiate the webAuthN challenge.
For U2F you can choose between "USER_VERIFICATION_REQUIREMENT_PREFERRED" and "USER_VERIFICATION_REQUIREMENT_DISCOURAGED" for the challenge.

View File

@ -12,7 +12,7 @@ You need to give a user the [manager role](/docs/guides/manage/console/managers)
If you like to know more about eventsourcing/eventstore and how this works in ZITADEL, head over to our [concepts](/docs/concepts/eventstore/overview).
## Request Events
Call the [ListEvents](/apis/resources/admin) enpoint in the Administration API to get all the events you need.
Call the [ListEvents](/apis/resources/admin) endpoint in the Administration API to get all the events you need.
To further restrict your result you can add the following filters:
- sequence
- editor user id
@ -139,10 +139,10 @@ curl --request POST \
The following example shows you how you could use the events search to find out the failed login attempts of your users.
You have to include all the event types that tell you that a login attempt has failed.
In this case this are the following events:
In this case these are the following events:
- Password verification failed
- One-time-password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc)
- Universal-Second-Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc)
- One-time password (OTP) check failed (Authenticator Apps like Authy, Google Authenticator, etc)
- Universal Second Factor (U2F) check failed (FaceID, WindowsHello, FingerPrint, etc)
- Passwordless/Passkey check failed (FaceID, WindowsHello, FingerPrint, etc)
```bash

View File

@ -178,10 +178,10 @@ Multifactors:
- U2F (Universal Second Factor) with PIN, e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey
Secondfactors (2FA):
Second factors (2FA):
- Time-based One Time Password (TOTP), Authenticator Apps like Google/Microsoft Authenticator, Authy, etc.
- Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardwaretokens like Yubikey
- Universal Second Factor (U2F), e.g FaceID, WindowsHello, Fingerprint, Hardware tokens like Yubikey
- One Time Password with Email (Email OTP)
- One Time Password with SMS (SMS OTP)
@ -195,9 +195,9 @@ Configure the different lifetimes checks for the login process:
- **Password Check Lifetime** specifies after which period a user has to reenter his password during the login process
- **External Login Check Lifetime** specifies after which period a user will be redirected to the IDP during the login process
- **Multifactor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi Factor during the login process (value 0 will deactivate the prompt)
- **Multi-factor Init Lifetime** specifies after which period a user will be prompted to setup a 2-Factor / Multi-factor during the login process (value 0 will deactivate the prompt)
- **Second Factor Check Lifetime** specifies after which period a user has to revalidate the 2-Factor during the login process
- **Multifactor Login Check Lifetime** specifies after which period a user has to revalidate the Multi Factor during the login process
- **Multi-factor Login Check Lifetime** specifies after which period a user has to revalidate the Multi-factor during the login process
## Identity Providers

View File

@ -16,7 +16,7 @@ The following scripts don't include:
- Global policies
- IAM members
- Global IDPs
- Global second/multi factors
- Global second factor / multi-factors
- Machine keys
- Personal Access Tokens
- Application keys

View File

@ -173,7 +173,7 @@ In case the hashes can't be transferred directly, you always have the option to
If your legacy system receives the passwords in clear text (eg, login form) you could also directly create users via ZITADEL API.
We will explain this pattern in more detail in this guide.
### One-time-passwords (OTP)
### One-time passwords (OTP)
You can pass the OTP secret when creating users:

View File

@ -75,12 +75,6 @@ Data location refers to a region, consisting of one or many countries or territo
We can not guarantee that during transit the data will only remain within this region. We take measures, as outlined in our [privacy policy](../policies/privacy-policy), to protect your data in transit and in rest.
The following regions will be available when using our cloud service. This list is for informational purposes and will be updated in due course, please refer to our website for all available regions at this time.
- **Global**: All available cloud regions offered by our cloud provider
- **Switzerland**: Exclusively on Swiss region
- **GDPR safe countries**: Hosting location is within any of the EU member states and [Adequate Countries](https://ec.europa.eu/info/law/law-topic/data-protection/international-dimension-data-protection/adequacy-decisions_en) as recognized by the European Commission under the GDPR
## Backup
Our backup strategy executes daily full backups and differential backups on much higher frequency.

View File

@ -51,6 +51,9 @@ By executing the commands below, you will download the following file:
# Download the docker compose example configuration.
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose-sa.yaml -O docker-compose.yaml
# create the machine key directory
mkdir machinekey
# Run the database and application containers.
docker compose up --detach

View File

@ -1,27 +1,27 @@
version: '3.8'
services:
zitadel:
# The user should have the permission to write to ./machinekey
user: "${UID:-1000}"
restart: 'always'
networks:
- 'zitadel'
image: 'ghcr.io/zitadel/zitadel:latest'
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
- 'ZITADEL_EXTERNALSECURE=false'
- 'ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH=/machinekey/zitadel-admin-sa.json'
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME=zitadel-admin-sa'
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME=Admin'
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE=1'
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
ZITADEL_EXTERNALSECURE: false
ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH: /machinekey/zitadel-admin-sa.json
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME: zitadel-admin-sa
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME: Admin
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE: 1
depends_on:
db:
condition: 'service_healthy'
@ -34,12 +34,12 @@ services:
restart: 'always'
image: postgres:16-alpine
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
PGUSER: postgres
POSTGRES_PASSWORD: postgres
networks:
- 'zitadel'
healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"]
test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
interval: '10s'
timeout: '30s'
retries: 5

View File

@ -1,5 +1,3 @@
version: '3.8'
services:
zitadel:
restart: 'always'
@ -8,16 +6,16 @@ services:
image: 'ghcr.io/zitadel/zitadel:latest'
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
environment:
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
- 'ZITADEL_EXTERNALSECURE=false'
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
ZITADEL_EXTERNALSECURE: false
depends_on:
db:
condition: 'service_healthy'
@ -28,9 +26,8 @@ services:
restart: 'always'
image: postgres:16-alpine
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=zitadel
PGUSER: postgres
POSTGRES_PASSWORD: postgres
networks:
- 'zitadel'
healthcheck:

View File

@ -14,7 +14,7 @@ Choose your platform and run ZITADEL with the most minimal configuration possibl
## Prerequisites
- For test environments, ZITADEL does not need many resources, 1 CPU and 512MB memory are more than enough. (With more CPU, the password hashing might be faster)
- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use Postgresql.
- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide to use Postgresql.
## Releases

View File

@ -1,7 +1,6 @@
## ZITADEL with Postgres
If you want to use a PostgreSQL database you can [overwrite the default configuration](../configure/configure.mdx).
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it.
Currently versions >= 14 are supported.

View File

@ -109,17 +109,16 @@ but in the Projections.Customizations.Telemetry section
## Database
### Prefer CockroachDB
### Prefer PostgreSQL
ZITADEL supports [CockroachDB](https://www.cockroachlabs.com/) and [PostgreSQL](https://www.postgresql.org/).
We recommend using CockroachDB,
as horizontal scaling is much easier than with PostgreSQL.
Also, if you are concerned about multi-regional data locality,
[the way to go is with CockroachDB](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html).
We recommend using PostgreSQL, as it is the better choice when you want to prioritize performance and latency.
However, if [multi-regional data locality](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html) is a critical requirement, CockroachDB might be a suitable option.
The indexes for the database are optimized using load tests from [ZITADEL Cloud](https://zitadel.com),
which runs with CockroachDB.
If you identify problems with your Postgresql during load tests that indicate that the indexes are not optimized,
which runs with PostgreSQL.
If you identify problems with your CockroachDB during load tests that indicate that the indexes are not optimized,
please create an issue in our [github repository](https://github.com/zitadel/zitadel).
### Configure ZITADEL
@ -128,7 +127,7 @@ Depending on your environment, you maybe would want to tweak some settings about
```yaml
Database:
cockroach:
postgres:
Host: localhost
Port: 26257
Database: zitadel
@ -140,6 +139,7 @@ Database:
Options: ""
```
You also might want to configure how [projections](/concepts/eventstore/implementation#projections) are computed. These are the default values:
```yaml

View File

@ -7,19 +7,19 @@ services:
service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment:
- ZITADEL_EXTERNALPORT=80
- ZITADEL_EXTERNALSECURE=false
- ZITADEL_TLS_ENABLED=false
ZITADEL_EXTERNALPORT: 80
ZITADEL_EXTERNALSECURE: false
ZITADEL_TLS_ENABLED: false
# database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db
- ZITADEL_DATABASE_POSTGRES_PORT=5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks:
- 'zitadel'
depends_on:
@ -33,19 +33,19 @@ services:
service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment:
- ZITADEL_EXTERNALPORT=443
- ZITADEL_EXTERNALSECURE=true
- ZITADEL_TLS_ENABLED=false
ZITADEL_EXTERNALPORT: 443
ZITADEL_EXTERNALSECURE: true
ZITADEL_TLS_ENABLED: false
# database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db
- ZITADEL_DATABASE_POSTGRES_PORT=5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks:
- 'zitadel'
depends_on:
@ -59,21 +59,21 @@ services:
service: zitadel-init
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
environment:
- ZITADEL_EXTERNALPORT=443
- ZITADEL_EXTERNALSECURE=true
- ZITADEL_TLS_ENABLED=true
- ZITADEL_TLS_CERTPATH=/etc/certs/selfsigned.crt
- ZITADEL_TLS_KEYPATH=/etc/certs/selfsigned.key
ZITADEL_EXTERNALPORT: 443
ZITADEL_EXTERNALSECURE: true
ZITADEL_TLS_ENABLED: true
ZITADEL_TLS_CERTPATH: /etc/certs/selfsigned.crt
ZITADEL_TLS_KEYPATH: /etc/certs/selfsigned.key
# database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db
- ZITADEL_DATABASE_POSTGRES_PORT=5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
volumes:
- ./selfsigned.crt:/etc/certs/selfsigned.crt
- ./selfsigned.key:/etc/certs/selfsigned.key
@ -96,22 +96,22 @@ services:
# Using an external domain other than localhost proofs, that the proxy configuration works.
# If ZITADEL can't resolve a requests original host to this domain,
# it will return a 404 Instance not found error.
- ZITADEL_EXTERNALDOMAIN=127.0.0.1.sslip.io
ZITADEL_EXTERNALDOMAIN: 127.0.0.1.sslip.io
# In case something doesn't work as expected,
# it can be handy to be able to read the access logs.
- ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED=true
ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED: true
# For convenience, ZITADEL should not ask to change the initial admin users password.
- ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED: false
# database configuration
- ZITADEL_DATABASE_POSTGRES_HOST=db
- ZITADEL_DATABASE_POSTGRES_PORT=5432
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
ZITADEL_DATABASE_POSTGRES_HOST: db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
networks:
- 'zitadel'
healthcheck:
@ -125,10 +125,10 @@ services:
restart: 'always'
image: postgres:16-alpine
environment:
- POSTGRES_USER=root
- POSTGRES_PASSWORD=postgres
PGUSER: root
POSTGRES_PASSWORD: postgres
healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"]
test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
interval: 5s
timeout: 60s
retries: 10

View File

@ -6068,9 +6068,9 @@ http-parser-js@>=0.5.1:
integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==
http-proxy-middleware@^2.0.3:
version "2.0.6"
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f"
integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==
version "2.0.7"
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6"
integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==
dependencies:
"@types/http-proxy" "^1.17.8"
http-proxy "^1.18.1"

39
go.mod
View File

@ -10,6 +10,7 @@ require (
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.24.0
github.com/Masterminds/squirrel v1.5.4
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b
github.com/alicebob/miniredis/v2 v2.33.0
github.com/benbjohnson/clock v1.3.5
github.com/boombuler/barcode v1.0.2
github.com/brianvoe/gofakeit/v6 v6.28.0
@ -34,7 +35,7 @@ require (
github.com/gorilla/websocket v1.4.1
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0
github.com/h2non/gock v1.2.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/improbable-eng/grpc-web v0.15.0
@ -52,7 +53,8 @@ require (
github.com/pashagolub/pgxmock/v4 v4.3.0
github.com/pquerna/otp v1.4.0
github.com/rakyll/statik v0.1.7
github.com/rs/cors v1.11.0
github.com/redis/go-redis/v9 v9.7.0
github.com/rs/cors v1.11.1
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
github.com/sony/sonyflake v1.2.0
github.com/spf13/cobra v1.8.1
@ -62,29 +64,29 @@ require (
github.com/ttacon/libphonenumber v1.2.1
github.com/twilio/twilio-go v1.22.2
github.com/zitadel/logging v0.6.1
github.com/zitadel/oidc/v3 v3.28.1
github.com/zitadel/oidc/v3 v3.32.0
github.com/zitadel/passwap v0.6.0
github.com/zitadel/saml v0.2.0
github.com/zitadel/schema v1.3.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
go.opentelemetry.io/otel v1.29.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
go.opentelemetry.io/otel/exporters/prometheus v0.50.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0
go.opentelemetry.io/otel/metric v1.28.0
go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/sdk/metric v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0
go.opentelemetry.io/otel/metric v1.29.0
go.opentelemetry.io/otel/sdk v1.29.0
go.opentelemetry.io/otel/sdk/metric v1.29.0
go.opentelemetry.io/otel/trace v1.29.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.27.0
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/net v0.26.0
golang.org/x/oauth2 v0.22.0
golang.org/x/net v0.28.0
golang.org/x/oauth2 v0.23.0
golang.org/x/sync v0.8.0
golang.org/x/text v0.18.0
golang.org/x/text v0.19.0
google.golang.org/api v0.187.0
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
sigs.k8s.io/yaml v1.4.0
@ -94,8 +96,10 @@ require (
cloud.google.com/go/auth v0.6.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
github.com/crewjam/httperr v0.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@ -121,11 +125,12 @@ require (
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
github.com/zenazn/goji v1.0.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
)
require (
@ -197,7 +202,7 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/sys v0.25.0
gopkg.in/ini.v1 v1.67.0 // indirect

82
go.sum
View File

@ -56,6 +56,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA=
github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0=
github.com/amdonov/xmlsig v0.1.0 h1:i0iQ3neKLmUhcfIRgiiR3eRPKgXZj+n5lAfqnfKoeXI=
github.com/amdonov/xmlsig v0.1.0/go.mod h1:jTR/jO0E8fSl/cLvMesP+RjxyV4Ux4WL1Ip64ZnQpA0=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
@ -80,13 +84,17 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMcJ4=
github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
@ -127,6 +135,8 @@ github.com/descope/virtualwebauthn v1.0.2/go.mod h1:iJvinjD1iZYqQ09J5lF0+795OdDb
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg=
@ -354,8 +364,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
@ -620,6 +630,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -628,8 +640,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po=
github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys=
@ -719,12 +731,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8=
github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y=
github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow=
github.com/zitadel/oidc/v3 v3.28.1 h1:PsbFm5CzEMQq9HBXUNJ8yvnWmtVYxpwV5Cinj7TTsHo=
github.com/zitadel/oidc/v3 v3.28.1/go.mod h1:WmDFu3dZ9YNKrIoZkmxjGG8QyUR4PbbhsVVSY+rpojM=
github.com/zitadel/oidc/v3 v3.32.0 h1:Mw0EPZRC6h+OXAuT0Uk2BZIjJQNHLqUpaJCm6c3IByc=
github.com/zitadel/oidc/v3 v3.32.0/go.mod h1:DyE/XClysRK/ozFaZSqlYamKVnTh4l6Ln25ihSNI03w=
github.com/zitadel/passwap v0.6.0 h1:m9F3epFC0VkBXu25rihSLGyHvWiNlCzU5kk8RoI+SXQ=
github.com/zitadel/passwap v0.6.0/go.mod h1:kqAiJ4I4eZvm3Y6oAk6hlEqlZZOkjMHraGXF90GG7LI=
github.com/zitadel/saml v0.2.0 h1:vv7r+Xz43eAPCb+fImMaospD+TWRZQDkb78AbSJRcL4=
@ -742,24 +756,24 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0=
go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng=
go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08=
go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U=
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@ -857,13 +871,13 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -934,8 +948,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@ -983,10 +997,10 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls=
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo=
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View File

@ -114,7 +114,15 @@ func WithConsole(ctx context.Context, projectID, appID string) context.Context {
i.projectID = projectID
i.appID = appID
//i.clientID = clientID
return context.WithValue(ctx, instanceKey, i)
}
func WithConsoleClientID(ctx context.Context, clientID string) context.Context {
i, ok := ctx.Value(instanceKey).(*instance)
if !ok {
i = new(instance)
}
i.clientID = clientID
return context.WithValue(ctx, instanceKey, i)
}

View File

@ -10,6 +10,7 @@ import (
"net/http/cookiejar"
"net/url"
"testing"
"time"
"github.com/muhlemmer/gu"
"github.com/stretchr/testify/assert"
@ -70,28 +71,34 @@ func awaitPubOrgRegDisallowed(t *testing.T, ctx context.Context, cc *integration
// awaitGetSSRGetResponse cuts the CSRF token from the response body if it exists
func awaitGetSSRGetResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int) string {
var csrfToken []byte
await(t, ctx, func(tt *assert.CollectT) {
resp, err := client.Get(parsedURL.String())
require.NoError(tt, err)
body, err := io.ReadAll(resp.Body)
require.NoError(tt, err)
searchField := `<input type="hidden" name="gorilla.csrf.Token" value="`
_, after, hasCsrfToken := bytes.Cut(body, []byte(searchField))
if hasCsrfToken {
csrfToken, _, _ = bytes.Cut(after, []byte(`">`))
}
assert.Equal(tt, resp.StatusCode, expectCode)
})
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t,
func(tt *assert.CollectT) {
resp, err := client.Get(parsedURL.String())
require.NoError(tt, err)
body, err := io.ReadAll(resp.Body)
require.NoError(tt, err)
searchField := `<input type="hidden" name="gorilla.csrf.Token" value="`
_, after, hasCsrfToken := bytes.Cut(body, []byte(searchField))
if hasCsrfToken {
csrfToken, _, _ = bytes.Cut(after, []byte(`">`))
}
assert.Equal(tt, resp.StatusCode, expectCode)
}, retryDuration, tick, "awaiting successful get SSR get response failed",
)
return string(csrfToken)
}
// awaitPostFormResponse needs a valid CSRF token to make it to the actual endpoint implementation and get the expected status code
func awaitPostFormResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int, csrfToken string) {
await(t, ctx, func(tt *assert.CollectT) {
resp, err := client.PostForm(parsedURL.String(), url.Values{
"gorilla.csrf.Token": {csrfToken},
})
require.NoError(tt, err)
assert.Equal(tt, resp.StatusCode, expectCode)
})
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t,
func(tt *assert.CollectT) {
resp, err := client.PostForm(parsedURL.String(), url.Values{
"gorilla.csrf.Token": {csrfToken},
})
require.NoError(tt, err)
assert.Equal(tt, resp.StatusCode, expectCode)
}, retryDuration, tick, "awaiting successful Post Form failed",
)
}

View File

@ -51,7 +51,7 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
require.Equal(ttt, language.Make(defaultLang.Language), language.English)
})
tt.Run("the discovery endpoint returns all supported languages", func(ttt *testing.T) {
awaitDiscoveryEndpoint(ttt, instance.Domain, supportedLanguagesStr, nil)
awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, supportedLanguagesStr, nil)
})
})
t.Run("restricting the default language fails", func(tt *testing.T) {
@ -92,10 +92,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
require.Condition(tt, contains(supported.GetLanguages(), supportedLanguagesStr))
})
t.Run("the disallowed language is not listed in the discovery endpoint", func(tt *testing.T) {
awaitDiscoveryEndpoint(tt, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()})
awaitDiscoveryEndpoint(tt, ctx, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()})
})
t.Run("the login ui is rendered in the default language", func(tt *testing.T) {
awaitLoginUILanguage(tt, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort")
awaitLoginUILanguage(tt, ctx, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort")
})
t.Run("preferred languages are not restricted by the supported languages", func(tt *testing.T) {
tt.Run("change user profile", func(ttt *testing.T) {
@ -153,10 +153,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
t.Run("allowing the language makes it usable again", func(tt *testing.T) {
tt.Run("the previously disallowed language is listed in the discovery endpoint again", func(ttt *testing.T) {
awaitDiscoveryEndpoint(ttt, instance.Domain, []string{disallowedLanguage.String()}, nil)
awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, []string{disallowedLanguage.String()}, nil)
})
tt.Run("the login ui is rendered in the previously disallowed language", func(ttt *testing.T) {
awaitLoginUILanguage(ttt, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña")
awaitLoginUILanguage(ttt, ctx, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña")
})
})
}
@ -164,36 +164,36 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
func setAndAwaitAllowedLanguages(ctx context.Context, cc *integration.Client, t *testing.T, selectLanguages []string) {
_, err := cc.Admin.SetRestrictions(ctx, &admin.SetRestrictionsRequest{AllowedLanguages: &admin.SelectLanguages{List: selectLanguages}})
require.NoError(t, err)
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second)
defer awaitCancel()
await(t, awaitCtx, func(tt *assert.CollectT) {
restrictions, getErr := cc.Admin.GetRestrictions(awaitCtx, &admin.GetRestrictionsRequest{})
expectLanguages := selectLanguages
if len(selectLanguages) == 0 {
expectLanguages = nil
}
assert.NoError(tt, getErr)
assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages())
})
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t,
func(tt *assert.CollectT) {
restrictions, getErr := cc.Admin.GetRestrictions(ctx, &admin.GetRestrictionsRequest{})
expectLanguages := selectLanguages
if len(selectLanguages) == 0 {
expectLanguages = nil
}
assert.NoError(tt, getErr)
assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages())
}, retryDuration, tick, "awaiting successful GetAllowedLanguages failed",
)
}
func setAndAwaitDefaultLanguage(ctx context.Context, cc *integration.Client, t *testing.T, lang language.Tag) {
_, err := cc.Admin.SetDefaultLanguage(ctx, &admin.SetDefaultLanguageRequest{Language: lang.String()})
require.NoError(t, err)
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second)
defer awaitCancel()
await(t, awaitCtx, func(tt *assert.CollectT) {
defaultLang, getErr := cc.Admin.GetDefaultLanguage(awaitCtx, &admin.GetDefaultLanguageRequest{})
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(tt *assert.CollectT) {
defaultLang, getErr := cc.Admin.GetDefaultLanguage(ctx, &admin.GetDefaultLanguageRequest{})
assert.NoError(tt, getErr)
assert.Equal(tt, lang.String(), defaultLang.GetLanguage())
})
}, retryDuration, tick, "awaiting successful GetDefaultLanguage failed",
)
}
func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notContainsUILocales []string) {
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second)
defer awaitCancel()
await(t, awaitCtx, func(tt *assert.CollectT) {
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
func awaitDiscoveryEndpoint(t *testing.T, ctx context.Context, domain string, containsUILocales, notContainsUILocales []string) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(tt *assert.CollectT) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
require.NoError(tt, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(tt, err)
@ -213,14 +213,14 @@ func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notC
if notContainsUILocales != nil {
assert.Condition(tt, not(contains(doc.UILocalesSupported, notContainsUILocales)))
}
})
}, retryDuration, tick, "awaiting successful call to Discovery endpoint failed",
)
}
func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) {
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second)
defer awaitCancel()
await(t, awaitCtx, func(tt *assert.CollectT) {
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
func awaitLoginUILanguage(t *testing.T, ctx context.Context, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(tt *assert.CollectT) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
req.Header.Set("Accept-Language", acceptLanguage.String())
require.NoError(tt, err)
resp, err := http.DefaultClient.Do(req)
@ -232,7 +232,8 @@ func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.T
}()
require.NoError(tt, err)
assert.Containsf(tt, string(body), containsText, "login ui language is in "+expectLang.String())
})
}, retryDuration, tick, "awaiting successful LoginUI in specific language failed",
)
}
// We would love to use assert.Contains here, but it doesn't work with slices of strings

View File

@ -9,7 +9,6 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/integration"
admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin"
@ -34,23 +33,6 @@ func TestMain(m *testing.M) {
}())
}
func await(t *testing.T, ctx context.Context, cb func(*assert.CollectT)) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(
t,
func(tt *assert.CollectT) {
defer func() {
// Panics are not recovered and don't mark the test as failed, so we need to do that ourselves
assert.Nil(tt, recover(), "panic in await callback")
}()
cb(tt)
},
retryDuration,
tick,
"awaiting successful callback failed",
)
}
var _ assert.TestingT = (*noopAssertionT)(nil)
type noopAssertionT struct{}

View File

@ -19,6 +19,7 @@ func systemFeaturesToCommand(req *feature_pb.SetSystemFeaturesRequest) *command.
ImprovedPerformance: improvedPerformanceListToDomain(req.ImprovedPerformance),
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
DisableUserTokenEvent: req.DisableUserTokenEvent,
EnableBackChannelLogout: req.EnableBackChannelLogout,
}
}
@ -34,6 +35,7 @@ func systemFeaturesToPb(f *query.SystemFeatures) *feature_pb.GetSystemFeaturesRe
ImprovedPerformance: featureSourceToImprovedPerformanceFlagPb(&f.ImprovedPerformance),
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
}
}
@ -50,6 +52,7 @@ func instanceFeaturesToCommand(req *feature_pb.SetInstanceFeaturesRequest) *comm
DebugOIDCParentError: req.DebugOidcParentError,
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
DisableUserTokenEvent: req.DisableUserTokenEvent,
EnableBackChannelLogout: req.EnableBackChannelLogout,
}
}
@ -67,6 +70,7 @@ func instanceFeaturesToPb(f *query.InstanceFeatures) *feature_pb.GetInstanceFeat
DebugOidcParentError: featureSourceToFlagPb(&f.DebugOIDCParentError),
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
}
}

View File

@ -80,6 +80,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
Level: feature.LevelSystem,
Value: true,
},
EnableBackChannelLogout: query.FeatureSource[bool]{
Level: feature.LevelSystem,
Value: true,
},
}
want := &feature_pb.GetSystemFeaturesResponse{
Details: &object.Details{
@ -123,6 +127,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
Enabled: false,
Source: feature_pb.Source_SOURCE_UNSPECIFIED,
},
EnableBackChannelLogout: &feature_pb.FeatureFlag{
Enabled: true,
Source: feature_pb.Source_SOURCE_SYSTEM,
},
}
got := systemFeaturesToPb(arg)
assert.Equal(t, want, got)
@ -140,6 +148,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
WebKey: gu.Ptr(true),
DebugOidcParentError: gu.Ptr(true),
OidcSingleV1SessionTermination: gu.Ptr(true),
EnableBackChannelLogout: gu.Ptr(true),
}
want := &command.InstanceFeatures{
LoginDefaultOrg: gu.Ptr(true),
@ -152,6 +161,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
WebKey: gu.Ptr(true),
DebugOIDCParentError: gu.Ptr(true),
OIDCSingleV1SessionTermination: gu.Ptr(true),
EnableBackChannelLogout: gu.Ptr(true),
}
got := instanceFeaturesToCommand(arg)
assert.Equal(t, want, got)
@ -200,6 +210,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
Level: feature.LevelInstance,
Value: true,
},
EnableBackChannelLogout: query.FeatureSource[bool]{
Level: feature.LevelInstance,
Value: true,
},
}
want := &feature_pb.GetInstanceFeaturesResponse{
Details: &object.Details{
@ -251,6 +265,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
Enabled: false,
Source: feature_pb.Source_SOURCE_UNSPECIFIED,
},
EnableBackChannelLogout: &feature_pb.FeatureFlag{
Enabled: true,
Source: feature_pb.Source_SOURCE_INSTANCE,
},
}
got := instanceFeaturesToPb(arg)
assert.Equal(t, want, got)

View File

@ -57,6 +57,7 @@ func AddOIDCAppRequestToDomain(req *mgmt_pb.AddOIDCAppRequest) *domain.OIDCApp {
ClockSkew: req.ClockSkew.AsDuration(),
AdditionalOrigins: req.AdditionalOrigins,
SkipNativeAppSuccessPage: req.SkipNativeAppSuccessPage,
BackChannelLogoutURI: req.GetBackChannelLogoutUri(),
}
}
@ -108,6 +109,7 @@ func UpdateOIDCAppConfigRequestToDomain(app *mgmt_pb.UpdateOIDCAppConfigRequest)
ClockSkew: app.ClockSkew.AsDuration(),
AdditionalOrigins: app.AdditionalOrigins,
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
BackChannelLogoutURI: app.BackChannelLogoutUri,
}
}

View File

@ -19,14 +19,12 @@ import (
"github.com/zitadel/zitadel/pkg/grpc/object/v2"
oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2"
"github.com/zitadel/zitadel/pkg/grpc/session/v2"
"github.com/zitadel/zitadel/pkg/grpc/user/v2"
)
var (
CTX context.Context
Instance *integration.Instance
Client oidc_pb.OIDCServiceClient
User *user.AddHumanUserResponse
)
const (
@ -44,7 +42,6 @@ func TestMain(m *testing.M) {
Client = Instance.Client.OIDCv2
CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner)
User = Instance.CreateHumanUser(CTX)
return m.Run()
}())
}

View File

@ -19,14 +19,12 @@ import (
object "github.com/zitadel/zitadel/pkg/grpc/object/v2beta"
oidc_pb "github.com/zitadel/zitadel/pkg/grpc/oidc/v2beta"
session "github.com/zitadel/zitadel/pkg/grpc/session/v2beta"
"github.com/zitadel/zitadel/pkg/grpc/user/v2"
)
var (
CTX context.Context
Instance *integration.Instance
Client oidc_pb.OIDCServiceClient
User *user.AddHumanUserResponse
)
const (
@ -44,7 +42,6 @@ func TestMain(m *testing.M) {
Client = Instance.Client.OIDCv2beta
CTX = Instance.WithAuthorization(ctx, integration.UserTypeOrgOwner)
User = Instance.CreateHumanUser(CTX)
return m.Run()
}())
}

View File

@ -22,7 +22,7 @@ func OrgQueriesToModel(queries []*org_pb.OrgQuery) (_ []query.SearchQuery, err e
func OrgQueryToModel(apiQuery *org_pb.OrgQuery) (query.SearchQuery, error) {
switch q := apiQuery.Query.(type) {
case *org_pb.OrgQuery_DomainQuery:
return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
case *org_pb.OrgQuery_NameQuery:
return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name)
case *org_pb.OrgQuery_StateQuery:

View File

@ -15,6 +15,7 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/management"
"github.com/zitadel/zitadel/pkg/grpc/object/v2"
"github.com/zitadel/zitadel/pkg/grpc/org/v2"
)
@ -214,6 +215,46 @@ func TestServer_ListOrganizations(t *testing.T) {
},
},
},
{
name: "list org by domain (non primary), ok",
args: args{
CTX,
&org.ListOrganizationsRequest{},
func(ctx context.Context, request *org.ListOrganizationsRequest) ([]orgAttr, error) {
orgs := make([]orgAttr, 1)
name := fmt.Sprintf("ListOrgs-%s", gofakeit.AppName())
orgResp := Instance.CreateOrganization(ctx, name, gofakeit.Email())
orgs[0] = orgAttr{
ID: orgResp.GetOrganizationId(),
Name: name,
Details: orgResp.GetDetails(),
}
domain := gofakeit.DomainName()
_, err := Instance.Client.Mgmt.AddOrgDomain(integration.SetOrgID(ctx, orgResp.GetOrganizationId()), &management.AddOrgDomainRequest{
Domain: domain,
})
if err != nil {
return nil, err
}
request.Queries = []*org.SearchQuery{
OrganizationDomainQuery(domain),
}
return orgs, nil
},
},
want: &org.ListOrganizationsResponse{
Details: &object.ListDetails{
TotalResult: 1,
Timestamp: timestamppb.Now(),
},
SortingColumn: 0,
Result: []*org.Organization{
{
State: org.OrganizationState_ORGANIZATION_STATE_ACTIVE,
},
},
},
},
{
name: "list org by inactive state, ok",
args: args{

View File

@ -57,7 +57,7 @@ func orgQueriesToQuery(ctx context.Context, queries []*org.SearchQuery) (_ []que
func orgQueryToQuery(ctx context.Context, orgQuery *org.SearchQuery) (query.SearchQuery, error) {
switch q := orgQuery.Query.(type) {
case *org.SearchQuery_DomainQuery:
return query.NewOrgDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
return query.NewOrgVerifiedDomainSearchQuery(object.TextMethodToQuery(q.DomainQuery.Method), q.DomainQuery.Domain)
case *org.SearchQuery_NameQuery:
return query.NewOrgNameSearchQuery(object.TextMethodToQuery(q.NameQuery.Method), q.NameQuery.Name)
case *org.SearchQuery_StateQuery:

View File

@ -61,6 +61,7 @@ func AppOIDCConfigToPb(app *query.OIDCApp) *app_pb.App_OidcConfig {
AdditionalOrigins: app.AdditionalOrigins,
AllowedOrigins: app.AllowedOrigins,
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
BackChannelLogoutUri: app.BackChannelLogoutURI,
},
}
}

View File

@ -62,10 +62,10 @@ func TestServer_ExecutionTarget(t *testing.T) {
changedRequest := &action.GetTargetRequest{Id: targetCreated.GetDetails().GetId()}
// replace original request with different targetID
urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusOK, changedRequest)
targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, false)
instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod))
targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, false)
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
// expected response from the GetTarget
expectedResponse := &action.GetTargetResponse{
@ -119,10 +119,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
}
// after request with different targetID, return changed response
targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusOK, changedResponse)
targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, false)
instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod))
targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, false)
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
return func() {
closeRequest()
closeResponse()
@ -161,12 +160,10 @@ func TestServer_ExecutionTarget(t *testing.T) {
wantRequest := &middleware.ContextInfoRequest{FullMethod: fullMethod, InstanceID: instance.ID(), OrgID: orgID, ProjectID: projectID, UserID: userID, Request: request}
urlRequest, closeRequest := testServerCall(wantRequest, 0, http.StatusInternalServerError, &action.GetTargetRequest{Id: "notchanged"})
targetRequest := instance.CreateTarget(ctx, t, "", urlRequest, domain.TargetTypeCall, true)
instance.SetExecution(ctx, t, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
targetRequest := waitForTarget(ctx, t, instance, urlRequest, domain.TargetTypeCall, true)
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod), executionTargetsSingleTarget(targetRequest.GetDetails().GetId()))
// GetTarget with used target
request.Id = targetRequest.GetDetails().GetId()
waitForExecutionOnCondition(ctx, t, instance, conditionRequestFullMethod(fullMethod))
return func() {
closeRequest()
}, nil
@ -233,10 +230,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
}
// after request with different targetID, return changed response
targetResponseURL, closeResponse := testServerCall(wantResponse, 0, http.StatusInternalServerError, changedResponse)
targetResponse := instance.CreateTarget(ctx, t, "", targetResponseURL, domain.TargetTypeCall, true)
instance.SetExecution(ctx, t, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod))
targetResponse := waitForTarget(ctx, t, instance, targetResponseURL, domain.TargetTypeCall, true)
waitForExecutionOnCondition(ctx, t, instance, conditionResponseFullMethod(fullMethod), executionTargetsSingleTarget(targetResponse.GetDetails().GetId()))
return func() {
closeResponse()
}, nil
@ -277,7 +273,9 @@ func TestServer_ExecutionTarget(t *testing.T) {
}
}
func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition) {
func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *integration.Instance, condition *action.Condition, targets []*action.ExecutionTargetType) {
instance.SetExecution(ctx, t, condition, targets)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.SearchExecutions(ctx, &action.SearchExecutionsRequest{
@ -290,11 +288,58 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in
if !assert.NoError(ttt, err) {
return
}
assert.Len(ttt, got.GetResult(), 1)
if !assert.Len(ttt, got.GetResult(), 1) {
return
}
gotTargets := got.GetResult()[0].GetExecution().GetTargets()
// always first check length, otherwise its failed anyway
if assert.Len(ttt, gotTargets, len(targets)) {
for i := range targets {
assert.EqualExportedValues(ttt, targets[i].GetType(), gotTargets[i].GetType())
}
}
}, retryDuration, tick, "timeout waiting for expected execution result")
return
}
func waitForTarget(ctx context.Context, t *testing.T, instance *integration.Instance, endpoint string, ty domain.TargetType, interrupt bool) *action.CreateTargetResponse {
resp := instance.CreateTarget(ctx, t, "", endpoint, ty, interrupt)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.SearchTargets(ctx, &action.SearchTargetsRequest{
Filters: []*action.TargetSearchFilter{
{Filter: &action.TargetSearchFilter_InTargetIdsFilter{
InTargetIdsFilter: &action.InTargetIDsFilter{TargetIds: []string{resp.GetDetails().GetId()}},
}},
},
})
if !assert.NoError(ttt, err) {
return
}
if !assert.Len(ttt, got.GetResult(), 1) {
return
}
config := got.GetResult()[0].GetConfig()
assert.Equal(ttt, config.GetEndpoint(), endpoint)
switch ty {
case domain.TargetTypeWebhook:
if !assert.NotNil(ttt, config.GetRestWebhook()) {
return
}
assert.Equal(ttt, interrupt, config.GetRestWebhook().GetInterruptOnError())
case domain.TargetTypeAsync:
assert.NotNil(ttt, config.GetRestAsync())
case domain.TargetTypeCall:
if !assert.NotNil(ttt, config.GetRestCall()) {
return
}
assert.Equal(ttt, interrupt, config.GetRestCall().GetInterruptOnError())
}
}, retryDuration, tick, "timeout waiting for expected execution result")
return resp
}
func conditionRequestFullMethod(fullMethod string) *action.Condition {
return &action.Condition{
ConditionType: &action.Condition_Request{

View File

@ -216,14 +216,16 @@ func TestServer_GetTarget(t *testing.T) {
err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want)
require.NoError(t, err)
}
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 2*time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req)
if tt.wantErr {
assert.Error(ttt, err, "Error: "+err.Error())
return
}
assert.NoError(ttt, err)
if !assert.NoError(ttt, err) {
return
}
wantTarget := tt.want.GetTarget()
gotTarget := got.GetTarget()

View File

@ -36,7 +36,7 @@ func TestMain(m *testing.M) {
}
func TestServer_Feature_Disabled(t *testing.T) {
instance, iamCtx := createInstance(t, false)
instance, iamCtx, _ := createInstance(t, false)
client := instance.Client.WebKeyV3Alpha
t.Run("CreateWebKey", func(t *testing.T) {
@ -62,18 +62,18 @@ func TestServer_Feature_Disabled(t *testing.T) {
}
func TestServer_ListWebKeys(t *testing.T) {
instance, iamCtx := createInstance(t, true)
instance, iamCtx, creationDate := createInstance(t, true)
// After the feature is first enabled, we can expect 2 generated keys with the default config.
checkWebKeyListState(iamCtx, t, instance, 2, "", &webkey.WebKey_Rsa{
Rsa: &webkey.WebKeyRSAConfig{
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
},
})
}, creationDate)
}
func TestServer_CreateWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true)
instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha
_, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{
@ -93,11 +93,11 @@ func TestServer_CreateWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
},
})
}, creationDate)
}
func TestServer_ActivateWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true)
instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha
resp, err := client.CreateWebKey(iamCtx, &webkey.CreateWebKeyRequest{
@ -122,11 +122,11 @@ func TestServer_ActivateWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
},
})
}, creationDate)
}
func TestServer_DeleteWebKey(t *testing.T) {
instance, iamCtx := createInstance(t, true)
instance, iamCtx, creationDate := createInstance(t, true)
client := instance.Client.WebKeyV3Alpha
keyIDs := make([]string, 2)
@ -178,11 +178,12 @@ func TestServer_DeleteWebKey(t *testing.T) {
Bits: webkey.WebKeyRSAConfig_RSA_BITS_2048,
Hasher: webkey.WebKeyRSAConfig_RSA_HASHER_SHA256,
},
})
}, creationDate)
}
func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context) {
func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, context.Context, *timestamppb.Timestamp) {
instance := integration.NewInstance(CTX)
creationDate := timestamppb.Now()
iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
if enableFeature {
@ -203,7 +204,7 @@ func createInstance(t *testing.T, enableFeature bool) (*integration.Instance, co
}
}, retryDuration, tick)
return instance, iamCTX
return instance, iamCTX, creationDate
}
func assertFeatureDisabledError(t *testing.T, err error) {
@ -214,7 +215,7 @@ func assertFeatureDisabledError(t *testing.T, err error) {
assert.Contains(t, s.Message(), "WEBKEY-Ohx6E")
}
func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any) {
func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integration.Instance, nKeys int, expectActiveKeyID string, config any, creationDate *timestamppb.Timestamp) {
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
assert.EventuallyWithT(t, func(collect *assert.CollectT) {
@ -227,8 +228,8 @@ func checkWebKeyListState(ctx context.Context, t *testing.T, instance *integrati
var gotActiveKeyID string
for _, key := range list {
integration.AssertResourceDetails(t, &resource_object.Details{
Created: timestamppb.Now(),
Changed: timestamppb.Now(),
Created: creationDate,
Changed: creationDate,
Owner: &object.Owner{
Type: object.OwnerType_OWNER_TYPE_INSTANCE,
Id: instance.ID(),

View File

@ -190,7 +190,6 @@ func TestServer_GetUserByID(t *testing.T) {
func TestServer_GetUserByID_Permission(t *testing.T) {
t.Parallel()
timeNow := time.Now().UTC()
newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
newUserID := newOrg.CreatedAdmins[0].GetUserId()
@ -237,7 +236,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
},
},
Details: &object.Details{
ChangeDate: timestamppb.New(timeNow),
ChangeDate: timestamppb.Now(),
ResourceOwner: newOrg.GetOrganizationId(),
},
},
@ -275,7 +274,7 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
},
},
Details: &object.Details{
ChangeDate: timestamppb.New(timeNow),
ChangeDate: timestamppb.Now(),
ResourceOwner: newOrg.GetOrganizationId(),
},
},
@ -303,24 +302,29 @@ func TestServer_GetUserByID_Permission(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Client.GetUserByID(tt.args.ctx, tt.args.req)
if tt.wantErr {
require.Error(t, err)
return
}
require.NoError(t, err)
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := Client.GetUserByID(tt.args.ctx, tt.args.req)
if tt.wantErr {
assert.Error(ttt, err)
return
}
if !assert.NoError(ttt, err) {
return
}
tt.want.User.UserId = tt.args.req.GetUserId()
tt.want.User.Username = newOrgOwnerEmail
tt.want.User.PreferredLoginName = newOrgOwnerEmail
tt.want.User.LoginNames = []string{newOrgOwnerEmail}
if human := tt.want.User.GetHuman(); human != nil {
human.Email.Email = newOrgOwnerEmail
}
// details tested in GetUserByID
tt.want.User.Details = got.User.GetDetails()
tt.want.User.UserId = tt.args.req.GetUserId()
tt.want.User.Username = newOrgOwnerEmail
tt.want.User.PreferredLoginName = newOrgOwnerEmail
tt.want.User.LoginNames = []string{newOrgOwnerEmail}
if human := tt.want.User.GetHuman(); human != nil {
human.Email.Email = newOrgOwnerEmail
}
// details tested in GetUserByID
tt.want.User.Details = got.User.GetDetails()
assert.Equal(t, tt.want.User, got.User)
assert.Equal(ttt, tt.want.User, got.User)
}, retryDuration, tick, "timeout waiting for expected user result")
})
}
}

View File

@ -2447,7 +2447,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG,
})
require.NoError(t, err)
idpLink, err := Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
_, err = Instance.Client.UserV2.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
IdpId: provider.GetId(),
UserId: "external-id",
UserName: "displayName",
@ -2639,25 +2639,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var got *user.ListAuthenticationMethodTypesResponse
var err error
for {
got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) {
break
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
require.NoError(ttt, err)
if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) {
return
}
select {
case <-CTX.Done():
t.Fatal(CTX.Err(), err)
case <-time.After(time.Second):
t.Log("retrying ListAuthenticationMethodTypes")
continue
}
}
require.NoError(t, err)
assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult())
require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
integration.AssertListDetails(ttt, tt.want, got)
}, retryDuration, tick, "timeout waiting for expected auth methods result")
})
}
}

View File

@ -2454,7 +2454,7 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
OwnerType: idp.IDPOwnerType_IDP_OWNER_TYPE_ORG,
})
require.NoError(t, err)
idpLink, err := Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
_, err = Client.AddIDPLink(CTX, &user.AddIDPLinkRequest{UserId: userMultipleAuth, IdpLink: &user.IDPLink{
IdpId: provider.GetId(),
UserId: "external-id",
UserName: "displayName",
@ -2527,25 +2527,16 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var got *user.ListAuthenticationMethodTypesResponse
var err error
for {
got, err = Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
if err == nil && !got.GetDetails().GetTimestamp().AsTime().Before(idpLink.GetDetails().GetChangeDate().AsTime()) {
break
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(tt.args.ctx, time.Minute)
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
got, err := Client.ListAuthenticationMethodTypes(tt.args.ctx, tt.args.req)
require.NoError(ttt, err)
if !assert.Equal(ttt, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult()) {
return
}
select {
case <-CTX.Done():
t.Fatal(CTX.Err(), err)
case <-time.After(time.Second):
t.Log("retrying ListAuthenticationMethodTypes")
continue
}
}
require.NoError(t, err)
assert.Equal(t, tt.want.GetDetails().GetTotalResult(), got.GetDetails().GetTotalResult())
require.Equal(t, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
assert.Equal(ttt, tt.want.GetAuthMethodTypes(), got.GetAuthMethodTypes())
integration.AssertListDetails(ttt, tt.want, got)
}, retryDuration, tick, "timeout waiting for expected auth methods result")
})
}
}

View File

@ -215,18 +215,18 @@ func (o *OPStorage) TerminateSession(ctx context.Context, userID, clientID strin
logging.Error("no user agent id")
return zerrors.ThrowPreconditionFailed(nil, "OIDC-fso7F", "no user agent id")
}
userIDs, err := o.repo.UserSessionUserIDsByAgentID(ctx, userAgentID)
sessions, err := o.repo.UserSessionsByAgentID(ctx, userAgentID)
if err != nil {
logging.WithError(err).Error("error retrieving user sessions")
return err
}
if len(userIDs) == 0 {
if len(sessions) == 0 {
return nil
}
data := authz.CtxData{
UserID: userID,
}
err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, userIDs)
err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, sessions)
logging.OnError(err).Error("error signing out")
return err
}
@ -278,18 +278,18 @@ func (o *OPStorage) terminateV1Session(ctx context.Context, userID, sessionID st
if err != nil {
return err
}
return o.command.HumansSignOut(ctx, userAgentID, []string{userID})
return o.command.HumansSignOut(ctx, userAgentID, []command.HumanSignOutSession{{ID: sessionID, UserID: userID}})
}
// otherwise we search for all active sessions within the same user agent of the current session id
userAgentID, userIDs, err := o.repo.ActiveUserIDsBySessionID(ctx, sessionID)
userAgentID, sessions, err := o.repo.ActiveUserSessionsBySessionID(ctx, sessionID)
if err != nil {
logging.WithError(err).Error("error retrieving user sessions")
return err
}
if len(userIDs) == 0 {
if len(sessions) == 0 {
return nil
}
return o.command.HumansSignOut(ctx, userAgentID, userIDs)
return o.command.HumansSignOut(ctx, userAgentID, sessions)
}
func (o *OPStorage) RevokeToken(ctx context.Context, token, userID, clientID string) (err *oidc.Error) {
@ -588,6 +588,7 @@ func (s *Server) authResponseToken(authReq *AuthRequest, authorizer op.Authorize
authReq.UserID,
authReq.UserOrgID,
client.client.ClientID,
client.client.BackChannelLogoutURI,
scope,
authReq.Audience,
authReq.AuthMethods(),

View File

@ -348,7 +348,7 @@ func (o *OPStorage) getSigningKey(ctx context.Context) (op.SigningKey, error) {
return nil, err
}
if len(keys.Keys) > 0 {
return o.privateKeyToSigningKey(selectSigningKey(keys.Keys))
return PrivateKeyToSigningKey(SelectSigningKey(keys.Keys), o.encAlg)
}
var position float64
if keys.State != nil {
@ -377,8 +377,8 @@ func (o *OPStorage) ensureIsLatestKey(ctx context.Context, position float64) (bo
return position >= maxSequence, nil
}
func (o *OPStorage) privateKeyToSigningKey(key query.PrivateKey) (_ op.SigningKey, err error) {
keyData, err := crypto.Decrypt(key.Key(), o.encAlg)
func PrivateKeyToSigningKey(key query.PrivateKey, algorithm crypto.EncryptionAlgorithm) (_ op.SigningKey, err error) {
keyData, err := crypto.Decrypt(key.Key(), algorithm)
if err != nil {
return nil, err
}
@ -430,7 +430,7 @@ func (o *OPStorage) getMaxKeySequence(ctx context.Context) (float64, error) {
)
}
func selectSigningKey(keys []query.PrivateKey) query.PrivateKey {
func SelectSigningKey(keys []query.PrivateKey) query.PrivateKey {
return keys[len(keys)-1]
}

View File

@ -42,6 +42,7 @@ type Config struct {
DefaultLoginURLV2 string
DefaultLogoutURLV2 string
PublicKeyCacheMaxAge time.Duration
DefaultBackChannelLogoutLifetime time.Duration
}
type EndpointConfig struct {

View File

@ -46,7 +46,7 @@ type Server struct {
}
func endpoints(endpointConfig *EndpointConfig) op.Endpoints {
// some defaults. The new Server will disable enpoints that are nil.
// some defaults. The new Server will disable endpoints that are nil.
endpoints := op.Endpoints{
Authorization: op.NewEndpoint("/oauth/v2/authorize"),
Token: op.NewEndpoint("/oauth/v2/token"),
@ -167,6 +167,7 @@ func (s *Server) EndSession(ctx context.Context, r *op.Request[oidc.EndSessionRe
func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales oidc.Locales) *oidc.DiscoveryConfiguration {
issuer := op.IssuerFromContext(ctx)
backChannelLogoutSupported := authz.GetInstance(ctx).Features().EnableBackChannelLogout
return &oidc.DiscoveryConfiguration{
Issuer: issuer,
@ -199,6 +200,8 @@ func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales o
CodeChallengeMethodsSupported: op.CodeChallengeMethods(s.Provider()),
UILocalesSupported: supportedUILocales,
RequestParameterSupported: s.Provider().RequestObjectSupported(),
BackChannelLogoutSupported: backChannelLogoutSupported,
BackChannelLogoutSessionSupported: backChannelLogoutSupported,
}
}

View File

@ -60,12 +60,19 @@ func (s *Server) accessTokenResponseFromSession(ctx context.Context, client op.C
return resp, err
}
// signerFunc is a getter function that allows add-hoc retrieval of the instance's signer.
type signerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error)
// SignerFunc is a getter function that allows add-hoc retrieval of the instance's signer.
type SignerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error)
// getSignerOnce returns a function which retrieves the instance's signer from the database once.
func (s *Server) getSignerOnce() SignerFunc {
return GetSignerOnce(s.query.GetActiveSigningWebKey, s.Provider().Storage().SigningKey)
}
// GetSignerOnce returns a function which retrieves the instance's signer from the database once.
// Repeated calls of the returned function return the same results.
func (s *Server) getSignerOnce() signerFunc {
func GetSignerOnce(
getActiveSigningWebKey func(ctx context.Context) (*jose.JSONWebKey, error),
getSigningKey func(ctx context.Context) (op.SigningKey, error),
) SignerFunc {
var (
once sync.Once
signer jose.Signer
@ -79,7 +86,7 @@ func (s *Server) getSignerOnce() signerFunc {
if authz.GetFeatures(ctx).WebKey {
var webKey *jose.JSONWebKey
webKey, err = s.query.GetActiveSigningWebKey(ctx)
webKey, err = getActiveSigningWebKey(ctx)
if err != nil {
return
}
@ -88,7 +95,7 @@ func (s *Server) getSignerOnce() signerFunc {
}
var signingKey op.SigningKey
signingKey, err = s.Provider().Storage().SigningKey(ctx)
signingKey, err = getSigningKey(ctx)
if err != nil {
return
}
@ -126,7 +133,7 @@ func (s *Server) getUserInfo(userID, projectID string, projectRoleAssertion, use
}
}
func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey signerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) {
func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey SignerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
@ -170,7 +177,7 @@ func timeToOIDCExpiresIn(exp time.Time) uint64 {
return uint64(time.Until(exp) / time.Second)
}
func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner signerFunc) (_ string, err error) {
func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner SignerFunc) (_ string, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()

View File

@ -35,6 +35,7 @@ func (s *Server) ClientCredentialsExchange(ctx context.Context, r *op.ClientRequ
client.userID,
client.resourceOwner,
client.clientID,
"", // backChannelLogoutURI not needed for service user session
scope,
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePassword},

View File

@ -75,6 +75,7 @@ func (s *Server) codeExchangeV1(ctx context.Context, client *Client, req *oidc.A
authReq.UserID,
authReq.UserOrgID,
client.client.ClientID,
client.client.BackChannelLogoutURI,
scope,
authReq.Audience,
authReq.AuthMethods(),

View File

@ -288,6 +288,7 @@ func (s *Server) createExchangeAccessToken(
userID,
resourceOwner,
client.client.ClientID,
client.client.BackChannelLogoutURI,
scope,
audience,
authMethods,
@ -315,7 +316,7 @@ func (s *Server) createExchangeJWT(
client *Client,
getUserInfo userInfoFunc,
roleAssertion bool,
getSigner signerFunc,
getSigner SignerFunc,
userID,
resourceOwner string,
audience,
@ -333,6 +334,7 @@ func (s *Server) createExchangeJWT(
userID,
resourceOwner,
client.client.ClientID,
client.client.BackChannelLogoutURI,
scope,
audience,
authMethods,

View File

@ -45,6 +45,7 @@ func (s *Server) JWTProfile(ctx context.Context, r *op.Request[oidc.JWTProfileGr
client.userID,
client.resourceOwner,
client.clientID,
"", // backChannelLogoutURI not needed for service user session
scope,
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePrivateKey},

View File

@ -54,6 +54,7 @@ func (s *Server) refreshTokenV1(ctx context.Context, client *Client, r *op.Clien
refreshToken.UserID,
refreshToken.ResourceOwner,
refreshToken.ClientID,
"", // backChannelLogoutURI is not in refresh token view
scope,
refreshToken.Audience,
AMRToAuthMethodTypes(refreshToken.AuthMethodsReferences),

View File

@ -112,10 +112,10 @@
</div>
</form>
<script src="{{ resourceUrl " scripts/input_suffix_offset.js" }}"></script>
<script src="{{ resourceUrl " scripts/form_submit.js" }}"></script>
<script src="{{ resourceUrl " scripts/password_policy_check.js" }}"></script>
<script src="{{ resourceUrl " scripts/register_check.js" }}"></script>
<script src="{{ resourceUrl " scripts/loginname_suffix.js" }}"></script>
<script src="{{ resourceUrl "scripts/input_suffix_offset.js" }}"></script>
<script src="{{ resourceUrl "scripts/form_submit.js" }}"></script>
<script src="{{ resourceUrl "scripts/password_policy_check.js" }}"></script>
<script src="{{ resourceUrl "scripts/register_check.js" }}"></script>
<script src="{{ resourceUrl "scripts/loginname_suffix.js" }}"></script>
{{template "main-bottom" .}}

View File

@ -6,6 +6,7 @@ import (
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/config/systemdefaults"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
@ -27,26 +28,40 @@ func (repo *UserRepo) Health(ctx context.Context) error {
return repo.Eventstore.Health(ctx)
}
func (repo *UserRepo) UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) {
userSessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID())
func (repo *UserRepo) UserSessionsByAgentID(ctx context.Context, agentID string) ([]command.HumanSignOutSession, error) {
sessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID())
if err != nil {
return nil, err
}
userIDs := make([]string, 0, len(userSessions))
for _, session := range userSessions {
if session.State.V == domain.UserSessionStateActive {
userIDs = append(userIDs, session.UserID)
signoutSessions := make([]command.HumanSignOutSession, 0, len(sessions))
for _, session := range sessions {
if session.State.V == domain.UserSessionStateActive && session.ID.Valid {
signoutSessions = append(signoutSessions, command.HumanSignOutSession{
ID: session.ID.String,
UserID: session.UserID,
})
}
}
return userIDs, nil
return signoutSessions, nil
}
func (repo *UserRepo) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) {
return repo.View.UserAgentIDBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
}
func (repo *UserRepo) ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) {
return repo.View.ActiveUserIDsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
func (repo *UserRepo) ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, signoutSessions []command.HumanSignOutSession, err error) {
userAgentID, sessions, err := repo.View.ActiveUserSessionsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
if err != nil {
return "", nil, err
}
signoutSessions = make([]command.HumanSignOutSession, 0, len(sessions))
for sessionID, userID := range sessions {
signoutSessions = append(signoutSessions, command.HumanSignOutSession{
ID: sessionID,
UserID: userID,
})
}
return userAgentID, signoutSessions, nil
}
func (repo *UserRepo) UserEventsByID(ctx context.Context, id string, changeDate time.Time, eventTypes []eventstore.EventType) ([]eventstore.Event, error) {

View File

@ -24,8 +24,8 @@ func (v *View) UserAgentIDBySessionID(ctx context.Context, sessionID, instanceID
return view.UserAgentIDBySessionID(ctx, v.client, sessionID, instanceID)
}
func (v *View) ActiveUserIDsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, userIDs []string, err error) {
return view.ActiveUserIDsBySessionID(ctx, v.client, sessionID, instanceID)
func (v *View) ActiveUserSessionsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, sessions map[string]string, err error) {
return view.ActiveUserSessionsBySessionID(ctx, v.client, sessionID, instanceID)
}
func (v *View) GetLatestUserSessionSequence(ctx context.Context, instanceID string) (_ *query.CurrentState, err error) {

View File

@ -2,10 +2,12 @@ package repository
import (
"context"
"github.com/zitadel/zitadel/internal/command"
)
type UserRepository interface {
UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error)
UserSessionsByAgentID(ctx context.Context, agentID string) (sessions []command.HumanSignOutSession, err error)
UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error)
ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error)
ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, sessions []command.HumanSignOutSession, err error)
}

View File

@ -173,7 +173,7 @@ func (repo *TokenVerifierRepo) verifySessionToken(ctx context.Context, sessionID
}
// checkAuthentication ensures the session or token was authenticated (at least a single [domain.UserAuthMethodType]).
// It will also check if there was a multi factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor
// It will also check if there was a multi-factor authentication, if either MFA is forced by the login policy or if the user has set up any second factor
func (repo *TokenVerifierRepo) checkAuthentication(ctx context.Context, authMethods []domain.UserAuthMethodType, userID string) error {
if len(authMethods) == 0 {
return zerrors.ThrowPermissionDenied(nil, "AUTHZ-Kl3p0", "authentication required")

View File

@ -6,7 +6,16 @@ import (
"time"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database/postgres"
)
// Purpose describes which object types are stored by a cache.
type Purpose int
//go:generate enumer -type Purpose -transform snake -trimprefix Purpose
const (
PurposeUnspecified Purpose = iota
PurposeAuthzInstance
PurposeMilestones
)
// Cache stores objects with a value of type `V`.
@ -71,17 +80,19 @@ type Entry[I, K comparable] interface {
Keys(index I) (key []K)
}
type CachesConfig struct {
Connectors struct {
Memory MemoryConnectorConfig
Postgres PostgresConnectorConfig
// Redis redis.Config?
}
Instance *CacheConfig
}
type Connector int
type CacheConfig struct {
Connector string
//go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text
const (
// Empty line comment ensures empty string for unspecified value
ConnectorUnspecified Connector = iota //
ConnectorMemory
ConnectorPostgres
ConnectorRedis
)
type Config struct {
Connector Connector
// Age since an object was added to the cache,
// after which the object is considered invalid.
@ -97,14 +108,3 @@ type CacheConfig struct {
// By default only errors are logged to stdout.
Log *logging.Config
}
type MemoryConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
}
type PostgresConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
Connection postgres.Config
}

69
internal/cache/connector/connector.go vendored Normal file
View File

@ -0,0 +1,69 @@
// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages.
package connector
import (
"context"
"fmt"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
"github.com/zitadel/zitadel/internal/cache/connector/noop"
"github.com/zitadel/zitadel/internal/cache/connector/pg"
"github.com/zitadel/zitadel/internal/cache/connector/redis"
"github.com/zitadel/zitadel/internal/database"
)
type CachesConfig struct {
Connectors struct {
Memory gomap.Config
Postgres pg.Config
Redis redis.Config
}
Instance *cache.Config
Milestones *cache.Config
}
type Connectors struct {
Config CachesConfig
Memory *gomap.Connector
Postgres *pg.Connector
Redis *redis.Connector
}
func StartConnectors(conf *CachesConfig, client *database.DB) (Connectors, error) {
if conf == nil {
return Connectors{}, nil
}
return Connectors{
Config: *conf,
Memory: gomap.NewConnector(conf.Connectors.Memory),
Postgres: pg.NewConnector(conf.Connectors.Postgres, client),
Redis: redis.NewConnector(conf.Connectors.Redis),
}, nil
}
func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) {
if conf == nil || conf.Connector == cache.ConnectorUnspecified {
return noop.NewCache[I, K, V](), nil
}
if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil {
c := gomap.NewCache[I, K, V](background, indices, *conf)
connectors.Memory.Config.StartAutoPrune(background, c, purpose)
return c, nil
}
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres)
if err != nil {
return nil, fmt.Errorf("start cache: %w", err)
}
connectors.Postgres.Config.AutoPrune.StartAutoPrune(background, c, purpose)
return c, nil
}
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
db := connectors.Redis.Config.DBOffset + int(purpose)
c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices)
return c, nil
}
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
}

View File

@ -0,0 +1,23 @@
package gomap
import (
"github.com/zitadel/zitadel/internal/cache"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
Config cache.AutoPruneConfig
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Config: config.AutoPrune,
}
}

View File

@ -14,14 +14,14 @@ import (
)
type mapCache[I, K comparable, V cache.Entry[I, K]] struct {
config *cache.CacheConfig
config *cache.Config
indexMap map[I]*index[K, V]
logger *slog.Logger
}
// NewCache returns an in-memory Cache implementation based on the builtin go map type.
// Object values are stored as-is and there is no encoding or decoding involved.
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.CacheConfig) cache.PrunerCache[I, K, V] {
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] {
m := &mapCache[I, K, V]{
config: &config,
indexMap: make(map[I]*index[K, V], len(indices)),
@ -116,7 +116,7 @@ func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
type index[K comparable, V any] struct {
mutex sync.RWMutex
config *cache.CacheConfig
config *cache.Config
entries map[K]*entry[V]
}
@ -177,7 +177,7 @@ type entry[V any] struct {
lastUse atomic.Int64 // UnixMicro time
}
func (e *entry[V]) isValid(c *cache.CacheConfig) bool {
func (e *entry[V]) isValid(c *cache.Config) bool {
if e.invalid.Load() {
return false
}

View File

@ -41,7 +41,7 @@ func (o *testObject) Keys(index testIndex) []string {
}
func Test_mapCache_Get(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
@ -103,7 +103,7 @@ func Test_mapCache_Get(t *testing.T) {
}
func Test_mapCache_Invalidate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
@ -124,7 +124,7 @@ func Test_mapCache_Invalidate(t *testing.T) {
}
func Test_mapCache_Delete(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
@ -157,7 +157,7 @@ func Test_mapCache_Delete(t *testing.T) {
}
func Test_mapCache_Prune(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
@ -193,7 +193,7 @@ func Test_mapCache_Prune(t *testing.T) {
}
func Test_mapCache_Truncate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
@ -235,7 +235,7 @@ func Test_entry_isValid(t *testing.T) {
tests := []struct {
name string
fields fields
config *cache.CacheConfig
config *cache.Config
want bool
}{
{
@ -245,7 +245,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: true,
lastUse: time.Now(),
},
config: &cache.CacheConfig{
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -258,7 +258,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false,
lastUse: time.Now(),
},
config: &cache.CacheConfig{
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -271,7 +271,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false,
lastUse: time.Now(),
},
config: &cache.CacheConfig{
config: &cache.Config{
LastUseAge: time.Second,
},
want: true,
@ -283,7 +283,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.CacheConfig{
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -296,7 +296,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.CacheConfig{
config: &cache.Config{
MaxAge: time.Minute,
},
want: true,
@ -308,7 +308,7 @@ func Test_entry_isValid(t *testing.T) {
invalid: false,
lastUse: time.Now(),
},
config: &cache.CacheConfig{
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},

View File

@ -0,0 +1,28 @@
package pg
import (
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/database"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
PGXPool
Dialect string
Config Config
}
func NewConnector(config Config, client *database.DB) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
PGXPool: client.Pool,
Dialect: client.Type(),
Config: config,
}
}

View File

@ -40,25 +40,25 @@ type PGXPool interface {
}
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
name string
config *cache.CacheConfig
indices []I
pool PGXPool
logger *slog.Logger
purpose cache.Purpose
config *cache.Config
indices []I
connector *Connector
logger *slog.Logger
}
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name string, config cache.CacheConfig, indices []I, pool PGXPool, dialect string) (cache.PrunerCache[I, K, V], error) {
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
c := &pgCache[I, K, V]{
name: name,
config: &config,
indices: indices,
pool: pool,
logger: config.Log.Slog().With("cache_name", name),
purpose: purpose,
config: &config,
indices: indices,
connector: connector,
logger: config.Log.Slog().With("cache_purpose", purpose),
}
c.logger.InfoContext(ctx, "pg cache logging enabled")
if dialect == "postgres" {
if connector.Dialect == "postgres" {
if err := c.createPartition(ctx); err != nil {
return nil, err
}
@ -68,10 +68,10 @@ func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name
func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error {
var query strings.Builder
if err := createPartitionTmpl.Execute(&query, c.name); err != nil {
if err := createPartitionTmpl.Execute(&query, c.purpose.String()); err != nil {
return err
}
_, err := c.pool.Exec(ctx, query.String())
_, err := c.connector.Exec(ctx, query.String())
return err
}
@ -87,7 +87,7 @@ func (c *pgCache[I, K, V]) set(ctx context.Context, entry V) (err error) {
keys := c.indexKeysFromEntry(entry)
c.logger.DebugContext(ctx, "pg cache set", "index_key", keys)
_, err = c.pool.Exec(ctx, setQuery, c.name, keys, entry)
_, err = c.connector.Exec(ctx, setQuery, c.purpose.String(), keys, entry)
if err != nil {
c.logger.ErrorContext(ctx, "pg cache set", "err", err)
return err
@ -117,7 +117,7 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er
if !slices.Contains(c.indices, index) {
return value, cache.NewIndexUnknownErr(index)
}
err = c.pool.QueryRow(ctx, getQuery, c.name, index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
return value, err
}
@ -125,7 +125,7 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, invalidateQuery, c.name, index, keys)
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys)
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
return err
}
@ -134,7 +134,7 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, deleteQuery, c.name, index, keys)
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys)
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
return err
}
@ -143,7 +143,7 @@ func (c *pgCache[I, K, V]) Prune(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, pruneQuery, c.name, c.config.MaxAge, c.config.LastUseAge)
_, err = c.connector.Exec(ctx, pruneQuery, c.purpose.String(), c.config.MaxAge, c.config.LastUseAge)
c.logger.DebugContext(ctx, "pg cache prune")
return err
}
@ -152,7 +152,7 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, truncateQuery, c.name)
_, err = c.connector.Exec(ctx, truncateQuery, c.purpose.String())
c.logger.DebugContext(ctx, "pg cache truncate")
return err
}

View File

@ -67,7 +67,7 @@ func TestNewCache(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
conf := cache.CacheConfig{
conf := cache.Config{
Log: &logging.Config{
Level: "debug",
AddSource: true,
@ -76,8 +76,12 @@ func TestNewCache(t *testing.T) {
pool, err := pgxmock.NewPool()
require.NoError(t, err)
tt.expect(pool)
connector := &Connector{
PGXPool: pool,
Dialect: "postgres",
}
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
require.ErrorIs(t, err, tt.wantErr)
if tt.wantErr == nil {
assert.NotNil(t, c)
@ -111,7 +115,7 @@ func Test_pgCache_Set(t *testing.T) {
},
expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect).
WithArgs("test",
WithArgs(cachePurpose.String(),
[]indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"},
@ -135,7 +139,7 @@ func Test_pgCache_Set(t *testing.T) {
},
expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect).
WithArgs("test",
WithArgs(cachePurpose.String(),
[]indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"},
@ -151,7 +155,7 @@ func Test_pgCache_Set(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, cache.CacheConfig{})
c, pool := prepareCache(t, cache.Config{})
defer pool.Close()
tt.expect(pool)
@ -173,7 +177,7 @@ func Test_pgCache_Get(t *testing.T) {
}
tests := []struct {
name string
config cache.CacheConfig
config cache.Config
args args
expect func(pgxmock.PgxCommonIface)
want *testObject
@ -181,7 +185,7 @@ func Test_pgCache_Get(t *testing.T) {
}{
{
name: "invalid index",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -194,7 +198,7 @@ func Test_pgCache_Get(t *testing.T) {
},
{
name: "no rows",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
@ -204,14 +208,14 @@ func Test_pgCache_Get(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
},
wantOk: false,
},
{
name: "error",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
@ -221,14 +225,14 @@ func Test_pgCache_Get(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed)
},
wantOk: false,
},
{
name: "ok",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -238,7 +242,7 @@ func Test_pgCache_Get(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Minute, time.Second).
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second).
WillReturnRows(
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
ID: "id1",
@ -276,14 +280,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
}
tests := []struct {
name string
config cache.CacheConfig
config cache.Config
args args
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
@ -293,14 +297,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -310,7 +314,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
@ -338,14 +342,14 @@ func Test_pgCache_Delete(t *testing.T) {
}
tests := []struct {
name string
config cache.CacheConfig
config cache.Config
args args
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
@ -355,14 +359,14 @@ func Test_pgCache_Delete(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
@ -372,7 +376,7 @@ func Test_pgCache_Delete(t *testing.T) {
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
@ -396,32 +400,32 @@ func Test_pgCache_Prune(t *testing.T) {
queryExpect := regexp.QuoteMeta(pruneQuery)
tests := []struct {
name string
config cache.CacheConfig
config cache.Config
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", time.Duration(0), time.Duration(0)).
WithArgs(cachePurpose.String(), time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", time.Minute, time.Second).
WithArgs(cachePurpose.String(), time.Minute, time.Second).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
@ -445,32 +449,32 @@ func Test_pgCache_Truncate(t *testing.T) {
queryExpect := regexp.QuoteMeta(truncateQuery)
tests := []struct {
name string
config cache.CacheConfig
config cache.Config
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: 0,
LastUseAge: 0,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test").
WithArgs(cachePurpose.String()).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
config: cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test").
WithArgs(cachePurpose.String()).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
@ -491,18 +495,18 @@ func Test_pgCache_Truncate(t *testing.T) {
}
const (
cacheName = "test"
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_test
cachePurpose = cache.PurposeAuthzInstance
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_authz_instance
partition of cache.objects
for values in ('test');
for values in ('authz_instance');
create unlogged table if not exists cache.string_keys_test
create unlogged table if not exists cache.string_keys_authz_instance
partition of cache.string_keys
for values in ('test');
for values in ('authz_instance');
`
)
func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
conf.Log = &logging.Config{
Level: "debug",
AddSource: true,
@ -512,8 +516,11 @@ func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testI
pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
connector := &Connector{
PGXPool: pool,
Dialect: "postgres",
}
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
require.NoError(t, err)
return c, pool
}

View File

@ -0,0 +1,10 @@
local function remove(object_id)
local setKey = keySetKey(object_id)
local keys = redis.call("SMEMBERS", setKey)
local n = #keys
for i = 1, n do
redis.call("DEL", keys[i])
end
redis.call("DEL", setKey)
redis.call("DEL", object_id)
end

View File

@ -0,0 +1,3 @@
-- SELECT ensures the DB namespace for each script.
-- When used, it consumes the first ARGV entry.
redis.call("SELECT", ARGV[1])

View File

@ -0,0 +1,17 @@
-- keySetKey returns the redis key of the set containing all keys to the object.
local function keySetKey (object_id)
return object_id .. "-keys"
end
local function getTime()
return tonumber(redis.call('TIME')[1])
end
-- getCall wrapts redis.call so a nil is returned instead of false.
local function getCall (...)
local result = redis.call(...)
if result == false then
return nil
end
return result
end

View File

@ -0,0 +1,154 @@
package redis
import (
"crypto/tls"
"time"
"github.com/redis/go-redis/v9"
)
type Config struct {
Enabled bool
// The network type, either tcp or unix.
// Default is tcp.
Network string
// host:port address.
Addr string
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
ClientName string
// Use the specified Username to authenticate the current connection
// with one of the connections defined in the ACL list when connecting
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
Username string
// Optional password. Must match the password specified in the
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
// or the User Password when connecting to a Redis 6.0 instance, or greater,
// that is using the Redis ACL system.
Password string
// Each ZITADEL cache uses an incremental DB namespace.
// This option offsets the first DB so it doesn't conflict with other databases on the same server.
// Note that ZITADEL uses FLUSHDB command to truncate a cache.
// This can have destructive consequences when overlapping DB namespaces are used.
DBOffset int
// Maximum number of retries before giving up.
// Default is 3 retries; -1 (not 0) disables retries.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
MinRetryBackoff time.Duration
// Maximum backoff between each retry.
// Default is 512 milliseconds; -1 disables backoff.
MaxRetryBackoff time.Duration
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetReadDeadline calls completely.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking. Supported values:
// - `0` - default timeout (3 seconds).
// - `-1` - no timeout (block indefinitely).
// - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout time.Duration
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
// Note that FIFO has slightly higher overhead compared to LIFO,
// but it helps closing idle connections faster reducing the pool size.
PoolFIFO bool
// Base number of socket connections.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
// If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
// you can limit it through MaxActiveConns
PoolSize int
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
// Default is 0. the idle connections are not closed by default.
MinIdleConns int
// Maximum number of idle connections.
// Default is 0. the idle connections are not closed by default.
MaxIdleConns int
// Maximum number of connections allocated by the pool at a given time.
// When zero, there is no limit on the number of connections in the pool.
MaxActiveConns int
// ConnMaxIdleTime is the maximum amount of time a connection may be idle.
// Should be less than server's timeout.
//
// Expired connections may be closed lazily before reuse.
// If d <= 0, connections are not closed due to a connection's idle time.
//
// Default is 30 minutes. -1 disables idle timeout check.
ConnMaxIdleTime time.Duration
// ConnMaxLifetime is the maximum amount of time a connection may be reused.
//
// Expired connections may be closed lazily before reuse.
// If <= 0, connections are not closed due to a connection's age.
//
// Default is to not close idle connections.
ConnMaxLifetime time.Duration
EnableTLS bool
// Disable set-lib on connect. Default is false.
DisableIndentity bool
// Add suffix to client name. Default is empty.
IdentitySuffix string
}
type Connector struct {
*redis.Client
Config Config
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Client: redis.NewClient(optionsFromConfig(config)),
Config: config,
}
}
func optionsFromConfig(c Config) *redis.Options {
opts := &redis.Options{
Network: c.Network,
Addr: c.Addr,
ClientName: c.ClientName,
Protocol: 3,
Username: c.Username,
Password: c.Password,
MaxRetries: c.MaxRetries,
MinRetryBackoff: c.MinRetryBackoff,
MaxRetryBackoff: c.MaxRetryBackoff,
DialTimeout: c.DialTimeout,
ReadTimeout: c.ReadTimeout,
WriteTimeout: c.WriteTimeout,
ContextTimeoutEnabled: true,
PoolFIFO: c.PoolFIFO,
PoolTimeout: c.PoolTimeout,
MinIdleConns: c.MinIdleConns,
MaxIdleConns: c.MaxIdleConns,
MaxActiveConns: c.MaxActiveConns,
ConnMaxIdleTime: c.ConnMaxIdleTime,
ConnMaxLifetime: c.ConnMaxLifetime,
DisableIndentity: c.DisableIndentity,
IdentitySuffix: c.IdentitySuffix,
}
if c.EnableTLS {
opts.TLSConfig = new(tls.Config)
}
return opts
}

29
internal/cache/connector/redis/get.lua vendored Normal file
View File

@ -0,0 +1,29 @@
local result = redis.call("GET", KEYS[1])
if result == false then
return nil
end
local object_id = tostring(result)
local object = getCall("HGET", object_id, "object")
if object == nil then
-- object expired, but there are keys that need to be cleaned up
remove(object_id)
return nil
end
-- max-age must be checked manually
local expiry = getCall("HGET", object_id, "expiry")
if not (expiry == nil) and expiry > 0 then
if getTime() > expiry then
remove(object_id)
return nil
end
end
local usage_lifetime = getCall("HGET", object_id, "usage_lifetime")
-- reset usage based TTL
if not (usage_lifetime == nil) and tonumber(usage_lifetime) > 0 then
redis.call('EXPIRE', object_id, usage_lifetime)
end
return object

Some files were not shown because too many files have changed in this diff Show More