mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-04 23:45:07 +00:00
Merge branch 'main' into template-branch
This commit is contained in:
commit
f8303df2cc
@ -8,25 +8,24 @@ services:
|
||||
network_mode: service:db
|
||||
command: sleep infinity
|
||||
environment:
|
||||
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
|
||||
- 'ZITADEL_EXTERNALSECURE=false'
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
ZITADEL_EXTERNALSECURE: false
|
||||
db:
|
||||
image: postgres:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
PGUSER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
|
4
.github/workflows/core-integration-test.yml
vendored
4
.github/workflows/core-integration-test.yml
vendored
@ -36,6 +36,10 @@ jobs:
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
--health-start-period 10s
|
||||
cache:
|
||||
image: redis:latest
|
||||
ports:
|
||||
- 6379:6379
|
||||
steps:
|
||||
-
|
||||
uses: actions/checkout@v4
|
||||
|
@ -10,6 +10,7 @@ module.exports = {
|
||||
"@semantic-release/github",
|
||||
{
|
||||
draftRelease: true,
|
||||
successComment: false,
|
||||
assets: [
|
||||
{
|
||||
path: ".artifacts/zitadel-linux-amd64/zitadel-linux-amd64.tar.gz",
|
||||
|
9
ADOPTERS.md
Normal file
9
ADOPTERS.md
Normal file
@ -0,0 +1,9 @@
|
||||
## Adopters
|
||||
|
||||
We are grateful to the organizations and individuals who are using ZITADEL. If you are using ZITADEL, please consider adding your name to this list by submitting a pull request.
|
||||
|
||||
| Organization/Individual | Contact Information | Description of Usage |
|
||||
| ----------------------- | -------------------------------------------------------- | ----------------------------------------------- |
|
||||
| ZITADEL | [@fforootd](https://github.com/fforootd) (and many more) | ZITADEL Cloud makes heavy use of of ZITADEL ;-) |
|
||||
| Organization Name | contact@example.com | Description of how they use ZITADEL |
|
||||
| Individual Name | contact@example.com | Description of how they use ZITADEL |
|
14
Makefile
14
Makefile
@ -63,12 +63,12 @@ endif
|
||||
|
||||
.PHONY: core_grpc_dependencies
|
||||
core_grpc_dependencies:
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions
|
||||
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions
|
||||
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.20.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions
|
||||
go install github.com/envoyproxy/protoc-gen-validate@v1.0.4 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions
|
||||
go install github.com/bufbuild/buf/cmd/buf@v1.34.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.35.1 # https://pkg.go.dev/google.golang.org/protobuf/cmd/protoc-gen-go?tab=versions
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 # https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc?tab=versions
|
||||
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway?tab=versions
|
||||
go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.22.0 # https://pkg.go.dev/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2?tab=versions
|
||||
go install github.com/envoyproxy/protoc-gen-validate@v1.1.0 # https://pkg.go.dev/github.com/envoyproxy/protoc-gen-validate?tab=versions
|
||||
go install github.com/bufbuild/buf/cmd/buf@v1.45.0 # https://pkg.go.dev/github.com/bufbuild/buf/cmd/buf?tab=versions
|
||||
|
||||
.PHONY: core_api
|
||||
core_api: core_api_generator core_grpc_dependencies
|
||||
@ -113,7 +113,7 @@ core_unit_test:
|
||||
|
||||
.PHONY: core_integration_db_up
|
||||
core_integration_db_up:
|
||||
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR}
|
||||
docker compose -f internal/integration/config/docker-compose.yaml up --pull always --wait $${INTEGRATION_DB_FLAVOR} cache
|
||||
|
||||
.PHONY: core_integration_db_down
|
||||
core_integration_db_down:
|
||||
|
@ -89,6 +89,10 @@ Available data regions are:
|
||||
ZITADEL Cloud comes with a free tier, providing you with all the same features as the open-source version.
|
||||
Learn more about the [pay-as-you-go pricing](https://zitadel.com/pricing).
|
||||
|
||||
## Adopters
|
||||
|
||||
We are grateful to the organizations and individuals who are using ZITADEL. If you are using ZITADEL, please consider adding your name to our [Adopters list](./ADOPTERS.md) by submitting a pull request.
|
||||
|
||||
### Example applications
|
||||
|
||||
Clone one of our [example applications](https://zitadel.com/docs/sdk-examples/introduction) or deploy them directly to Vercel.
|
||||
|
File diff suppressed because one or more lines are too long
@ -25,7 +25,7 @@ import (
|
||||
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
crypto_db "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
@ -72,7 +72,7 @@ type ProjectionsConfig struct {
|
||||
EncryptionKeys *encryption.EncryptionKeyConfig
|
||||
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
|
||||
Eventstore *eventstore.Config
|
||||
Caches *cache.CachesConfig
|
||||
Caches *connector.CachesConfig
|
||||
|
||||
Admin admin_es.Config
|
||||
Auth auth_es.Config
|
||||
@ -128,13 +128,16 @@ func projections(
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
cacheConnectors, err := connector.StartConnectors(config.Caches, client)
|
||||
logging.OnError(err).Fatal("unable to start caches")
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
es,
|
||||
esV4.Querier,
|
||||
client,
|
||||
client,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
@ -161,9 +164,9 @@ func projections(
|
||||
DisplayName: config.WebAuthNName,
|
||||
ExternalSecure: config.ExternalSecure,
|
||||
}
|
||||
commands, err := command.StartCommands(
|
||||
commands, err := command.StartCommands(ctx,
|
||||
es,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
staticStorage,
|
||||
@ -200,6 +203,7 @@ func projections(
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["backchannel"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
@ -213,6 +217,8 @@ func projections(
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.OIDC,
|
||||
config.OIDC.DefaultBackChannelLogoutLifetime,
|
||||
)
|
||||
|
||||
config.Auth.Spooler.Client = client
|
||||
|
@ -5,13 +5,16 @@ import (
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
cryptoDatabase "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/database/dialect"
|
||||
"github.com/zitadel/zitadel/internal/query/projection"
|
||||
)
|
||||
|
||||
func verifyCmd() *cobra.Command {
|
||||
@ -98,12 +101,22 @@ func getViews(ctx context.Context, dest *database.DB, schema string) (tables []s
|
||||
}
|
||||
|
||||
func countEntries(ctx context.Context, client *database.DB, table string) (count int) {
|
||||
instanceClause := instanceClause()
|
||||
noInstanceIDColumn := []string{
|
||||
projection.InstanceProjectionTable,
|
||||
projection.SystemFeatureTable,
|
||||
cryptoDatabase.EncryptionKeysTable,
|
||||
}
|
||||
if slices.Contains(noInstanceIDColumn, table) {
|
||||
instanceClause = ""
|
||||
}
|
||||
|
||||
err := client.QueryRowContext(
|
||||
ctx,
|
||||
func(r *sql.Row) error {
|
||||
return r.Scan(&count)
|
||||
},
|
||||
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause()),
|
||||
fmt.Sprintf("SELECT COUNT(*) FROM %s %s", table, instanceClause),
|
||||
)
|
||||
logging.WithFields("table", table, "db", client.DatabaseName()).OnError(err).Error("unable to count")
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
@ -64,8 +65,9 @@ func (mig *FirstInstance) Execute(ctx context.Context, _ eventstore.Event) error
|
||||
return err
|
||||
}
|
||||
|
||||
cmd, err := command.StartCommands(mig.es,
|
||||
nil,
|
||||
cmd, err := command.StartCommands(ctx,
|
||||
mig.es,
|
||||
connector.Connectors{},
|
||||
mig.defaults,
|
||||
mig.zitadelRoles,
|
||||
nil,
|
||||
|
@ -23,7 +23,7 @@ var (
|
||||
getProjectedMilestones string
|
||||
)
|
||||
|
||||
type FillV2Milestones struct {
|
||||
type FillV3Milestones struct {
|
||||
dbClient *database.DB
|
||||
eventstore *eventstore.Eventstore
|
||||
}
|
||||
@ -34,7 +34,7 @@ type instanceMilestone struct {
|
||||
Pushed *time.Time
|
||||
}
|
||||
|
||||
func (mig *FillV2Milestones) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
func (mig *FillV3Milestones) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
im, err := mig.getProjectedMilestones(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -42,7 +42,7 @@ func (mig *FillV2Milestones) Execute(ctx context.Context, _ eventstore.Event) er
|
||||
return mig.pushEventsByInstance(ctx, im)
|
||||
}
|
||||
|
||||
func (mig *FillV2Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) {
|
||||
func (mig *FillV3Milestones) getProjectedMilestones(ctx context.Context) (map[string][]instanceMilestone, error) {
|
||||
type row struct {
|
||||
InstanceID string
|
||||
Type milestone.Type
|
||||
@ -73,7 +73,7 @@ func (mig *FillV2Milestones) getProjectedMilestones(ctx context.Context) (map[st
|
||||
|
||||
// pushEventsByInstance creates the v2 milestone events by instance.
|
||||
// This prevents we will try to push 6*N(instance) events in one push.
|
||||
func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error {
|
||||
func (mig *FillV3Milestones) pushEventsByInstance(ctx context.Context, milestoneMap map[string][]instanceMilestone) error {
|
||||
// keep a deterministic order by instance ID.
|
||||
order := make([]string, 0, len(milestoneMap))
|
||||
for k := range milestoneMap {
|
||||
@ -81,8 +81,8 @@ func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestone
|
||||
}
|
||||
slices.Sort(order)
|
||||
|
||||
for _, instanceID := range order {
|
||||
logging.WithFields("instance_id", instanceID, "migration", mig.String()).Info("filter existing milestone events")
|
||||
for i, instanceID := range order {
|
||||
logging.WithFields("instance_id", instanceID, "migration", mig.String(), "progress", fmt.Sprintf("%d/%d", i+1, len(order))).Info("filter existing milestone events")
|
||||
|
||||
// because each Push runs in a separate TX, we need to make sure that events
|
||||
// from a partially executed migration are pushed again.
|
||||
@ -113,6 +113,6 @@ func (mig *FillV2Milestones) pushEventsByInstance(ctx context.Context, milestone
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mig *FillV2Milestones) String() string {
|
||||
return "36_fill_v2_milestones"
|
||||
func (mig *FillV3Milestones) String() string {
|
||||
return "36_fill_v3_milestones"
|
||||
}
|
||||
|
27
cmd/setup/37.go
Normal file
27
cmd/setup/37.go
Normal file
@ -0,0 +1,27 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 37.sql
|
||||
addBackChannelLogoutURI string
|
||||
)
|
||||
|
||||
type Apps7OIDConfigsBackChannelLogoutURI struct {
|
||||
dbClient *database.DB
|
||||
}
|
||||
|
||||
func (mig *Apps7OIDConfigsBackChannelLogoutURI) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, addBackChannelLogoutURI)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *Apps7OIDConfigsBackChannelLogoutURI) String() string {
|
||||
return "37_apps7_oidc_configs_add_back_channel_logout_uri"
|
||||
}
|
1
cmd/setup/37.sql
Normal file
1
cmd/setup/37.sql
Normal file
@ -0,0 +1 @@
|
||||
ALTER TABLE IF EXISTS projections.apps7_oidc_configs ADD COLUMN IF NOT EXISTS back_channel_logout_uri TEXT;
|
28
cmd/setup/38.go
Normal file
28
cmd/setup/38.go
Normal file
@ -0,0 +1,28 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed 38.sql
|
||||
backChannelLogoutCurrentState string
|
||||
)
|
||||
|
||||
type BackChannelLogoutNotificationStart struct {
|
||||
dbClient *database.DB
|
||||
esClient *eventstore.Eventstore
|
||||
}
|
||||
|
||||
func (mig *BackChannelLogoutNotificationStart) Execute(ctx context.Context, e eventstore.Event) error {
|
||||
_, err := mig.dbClient.ExecContext(ctx, backChannelLogoutCurrentState, e.Sequence(), e.CreatedAt(), e.Position())
|
||||
return err
|
||||
}
|
||||
|
||||
func (mig *BackChannelLogoutNotificationStart) String() string {
|
||||
return "38_back_channel_logout_notification_start_"
|
||||
}
|
20
cmd/setup/38.sql
Normal file
20
cmd/setup/38.sql
Normal file
@ -0,0 +1,20 @@
|
||||
INSERT INTO projections.current_states (
|
||||
instance_id
|
||||
, projection_name
|
||||
, last_updated
|
||||
, sequence
|
||||
, event_date
|
||||
, position
|
||||
, filter_offset
|
||||
)
|
||||
SELECT instance_id
|
||||
, 'projections.notifications_back_channel_logout'
|
||||
, now()
|
||||
, $1
|
||||
, $2
|
||||
, $3
|
||||
, 0
|
||||
FROM eventstore.events2
|
||||
WHERE aggregate_type = 'instance'
|
||||
AND event_type = 'instance.added'
|
||||
ON CONFLICT DO NOTHING;
|
@ -15,7 +15,7 @@ import (
|
||||
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/api/oidc"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
@ -31,7 +31,7 @@ import (
|
||||
type Config struct {
|
||||
ForMirror bool
|
||||
Database database.Config
|
||||
Caches *cache.CachesConfig
|
||||
Caches *connector.CachesConfig
|
||||
SystemDefaults systemdefaults.SystemDefaults
|
||||
InternalAuthZ internal_authz.Config
|
||||
ExternalDomain string
|
||||
@ -122,7 +122,9 @@ type Steps struct {
|
||||
s33SMSConfigs3TwilioAddVerifyServiceSid *SMSConfigs3TwilioAddVerifyServiceSid
|
||||
s34AddCacheSchema *AddCacheSchema
|
||||
s35AddPositionToIndexEsWm *AddPositionToIndexEsWm
|
||||
s36FillV2Milestones *FillV2Milestones
|
||||
s36FillV2Milestones *FillV3Milestones
|
||||
s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI
|
||||
s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart
|
||||
}
|
||||
|
||||
func MustNewSteps(v *viper.Viper) *Steps {
|
||||
|
@ -3,6 +3,7 @@ package setup
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
@ -31,9 +32,9 @@ func (mig *externalConfigChange) Check(lastRun map[string]interface{}) bool {
|
||||
}
|
||||
|
||||
func (mig *externalConfigChange) Execute(ctx context.Context, _ eventstore.Event) error {
|
||||
cmd, err := command.StartCommands(
|
||||
cmd, err := command.StartCommands(ctx,
|
||||
mig.es,
|
||||
nil,
|
||||
connector.Connectors{},
|
||||
mig.defaults,
|
||||
nil,
|
||||
nil,
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
auth_view "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
@ -165,7 +166,9 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s33SMSConfigs3TwilioAddVerifyServiceSid = &SMSConfigs3TwilioAddVerifyServiceSid{dbClient: esPusherDBClient}
|
||||
steps.s34AddCacheSchema = &AddCacheSchema{dbClient: queryDBClient}
|
||||
steps.s35AddPositionToIndexEsWm = &AddPositionToIndexEsWm{dbClient: esPusherDBClient}
|
||||
steps.s36FillV2Milestones = &FillV2Milestones{dbClient: queryDBClient, eventstore: eventstoreClient}
|
||||
steps.s36FillV2Milestones = &FillV3Milestones{dbClient: queryDBClient, eventstore: eventstoreClient}
|
||||
steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient}
|
||||
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient}
|
||||
|
||||
err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil)
|
||||
logging.OnError(err).Fatal("unable to start projections")
|
||||
@ -211,6 +214,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s34AddCacheSchema,
|
||||
steps.s35AddPositionToIndexEsWm,
|
||||
steps.s36FillV2Milestones,
|
||||
steps.s38BackChannelLogoutNotificationStart,
|
||||
} {
|
||||
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
|
||||
}
|
||||
@ -227,6 +231,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
|
||||
steps.s27IDPTemplate6SAMLNameIDFormat,
|
||||
steps.s32AddAuthSessionID,
|
||||
steps.s33SMSConfigs3TwilioAddVerifyServiceSid,
|
||||
steps.s37Apps7OIDConfigsBackChannelLogoutURI,
|
||||
} {
|
||||
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
|
||||
}
|
||||
@ -342,13 +347,17 @@ func initProjections(
|
||||
}
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
|
||||
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
|
||||
logging.OnError(err).Fatal("unable to start caches")
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
eventstoreClient,
|
||||
eventstoreV4.Querier,
|
||||
queryDBClient,
|
||||
projectionDBClient,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
@ -390,9 +399,9 @@ func initProjections(
|
||||
permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) {
|
||||
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
|
||||
}
|
||||
commands, err := command.StartCommands(
|
||||
commands, err := command.StartCommands(ctx,
|
||||
eventstoreClient,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
staticStorage,
|
||||
@ -424,6 +433,7 @@ func initProjections(
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["backchannel"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
@ -437,6 +447,8 @@ func initProjections(
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.OIDC,
|
||||
config.OIDC.DefaultBackChannelLogoutLifetime,
|
||||
)
|
||||
for _, p := range notify_handler.Projections() {
|
||||
err := migration.Migrate(ctx, eventstoreClient, p)
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/api/ui/console"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/hook"
|
||||
"github.com/zitadel/zitadel/internal/config/network"
|
||||
@ -49,7 +49,7 @@ type Config struct {
|
||||
HTTP1HostHeader string
|
||||
WebAuthNName string
|
||||
Database database.Config
|
||||
Caches *cache.CachesConfig
|
||||
Caches *connector.CachesConfig
|
||||
Tracing tracing.Config
|
||||
Metrics metrics.Config
|
||||
Profiler profiler.Config
|
||||
|
@ -69,6 +69,7 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/authz"
|
||||
authz_repo "github.com/zitadel/zitadel/internal/authz/repository"
|
||||
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/crypto"
|
||||
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
|
||||
@ -177,6 +178,10 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
}))
|
||||
|
||||
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
|
||||
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start caches: %w", err)
|
||||
}
|
||||
|
||||
queries, err := query.StartQueries(
|
||||
ctx,
|
||||
@ -184,7 +189,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
eventstoreV4.Querier,
|
||||
queryDBClient,
|
||||
projectionDBClient,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.Projections,
|
||||
config.SystemDefaults,
|
||||
keys.IDPConfig,
|
||||
@ -222,9 +227,9 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
DisplayName: config.WebAuthNName,
|
||||
ExternalSecure: config.ExternalSecure,
|
||||
}
|
||||
commands, err := command.StartCommands(
|
||||
commands, err := command.StartCommands(ctx,
|
||||
eventstoreClient,
|
||||
config.Caches,
|
||||
cacheConnectors,
|
||||
config.SystemDefaults,
|
||||
config.InternalAuthZ.RolePermissionMappings,
|
||||
storage,
|
||||
@ -270,6 +275,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
ctx,
|
||||
config.Projections.Customizations["notifications"],
|
||||
config.Projections.Customizations["notificationsquotas"],
|
||||
config.Projections.Customizations["backchannel"],
|
||||
config.Projections.Customizations["telemetry"],
|
||||
*config.Telemetry,
|
||||
config.ExternalDomain,
|
||||
@ -283,6 +289,8 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
|
||||
keys.User,
|
||||
keys.SMTP,
|
||||
keys.SMS,
|
||||
keys.OIDC,
|
||||
config.OIDC.DefaultBackChannelLogoutLifetime,
|
||||
)
|
||||
notification.Start(ctx)
|
||||
|
||||
|
77
docs/docs/apis/benchmarks/_template.mdx
Normal file
77
docs/docs/apis/benchmarks/_template.mdx
Normal file
@ -0,0 +1,77 @@
|
||||
<!--
|
||||
query data from output.csv:
|
||||
|
||||
Note: you might need to adjust the WHERE clause to only filter the required trends and the current placeholders
|
||||
Warning: it's currently only possible to show data of one endpoint
|
||||
|
||||
```
|
||||
copy (SELECT
|
||||
metric_name
|
||||
, to_timestamp(timestamp::DOUBLE) as timestamp
|
||||
, approx_quantile(metric_value, 0.50) AS p50
|
||||
, approx_quantile(metric_value, 0.95) AS p95
|
||||
, approx_quantile(metric_value, 0.99) AS p99
|
||||
FROM
|
||||
read_csv('/path/to/k6-output.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, comment='', header=true, columns={'metric_name': 'VARCHAR', 'timestamp': 'BIGINT', 'metric_value': 'DOUBLE', 'check': 'VARCHAR', 'error': 'VARCHAR', 'error_code': 'VARCHAR', 'expected_response': 'BOOLEAN', 'group': 'VARCHAR', 'method': 'VARCHAR', 'name': 'VARCHAR', 'proto': 'VARCHAR', 'scenario': 'VARCHAR', 'service': 'VARCHAR', 'status': 'BIGINT', 'subproto': 'VARCHAR', 'tls_version': 'VARCHAR', 'url': 'VARCHAR', 'extra_tags': 'VARCHAR', 'metadata': 'VARCHAR'})
|
||||
WHERE
|
||||
metric_name LIKE '%_duration'
|
||||
GROUP BY
|
||||
metric_name
|
||||
, timestamp
|
||||
ORDER BY
|
||||
metric_name
|
||||
, timestamp
|
||||
) to 'output.json' (ARRAY);
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
## Summary
|
||||
|
||||
TODO: describe the outcome of the test?
|
||||
|
||||
## Performance test results
|
||||
|
||||
| Metric | Value |
|
||||
|-:-------------------------------------|-:-----|
|
||||
| Baseline | none |
|
||||
| Purpose | |
|
||||
| Test start | UTC |
|
||||
| Test duration | 30min |
|
||||
| Executed test | |
|
||||
| k6 version | |
|
||||
| VUs | |
|
||||
| Client location | |
|
||||
| Client machine specification | vCPU: <br/> memory: Gb |
|
||||
| ZITADEL location | |
|
||||
| ZITADEL container specification | vCPU: <br/> Memory: Gb <br/>Container count: |
|
||||
| ZITADEL Version | |
|
||||
| ZITADEL Configuration | |
|
||||
| ZITADEL feature flags | |
|
||||
| Database | type: crdb / psql<br />version: |
|
||||
| Database location | |
|
||||
| Database specification | vCPU: <br/> memory: Gb |
|
||||
| ZITADEL metrics during test | |
|
||||
| Observed errors | |
|
||||
| Top 3 most expensive database queries | |
|
||||
| Database metrics during test | |
|
||||
| k6 Iterations per second | |
|
||||
| k6 overview | |
|
||||
| k6 output | |
|
||||
| flowchart outcome | |
|
||||
|
||||
|
||||
## Endpoint latencies
|
||||
|
||||
import OutputSource from "!!raw-loader!./output.json";
|
||||
|
||||
import { BenchmarkChart } from '/src/components/benchmark_chart';
|
||||
|
||||
<BenchmarkChart testResults={OutputSource} />
|
||||
|
||||
## k6 output {#k6-output}
|
||||
|
||||
```bash
|
||||
TODO: add summary of k6
|
||||
```
|
||||
|
111
docs/docs/apis/benchmarks/index.mdx
Normal file
111
docs/docs/apis/benchmarks/index.mdx
Normal file
@ -0,0 +1,111 @@
|
||||
---
|
||||
title: Benchmarks
|
||||
sidebar_label: Benchmarks
|
||||
---
|
||||
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
||||
Benchmarks are crucial to understand if ZITADEL fulfills your expected workload and what resources it needs to do so.
|
||||
|
||||
This document explains the process and goals of load-testing zitadel in a cloud environment.
|
||||
|
||||
The results can be found on sub pages.
|
||||
|
||||
## Goals
|
||||
|
||||
The primary goal is to assess if ZITADEL can scale to required proportion. The goals might change over time and maturity of ZITADEL. At the moment the goal is to assess how the application’s performance scales. There are some concrete goals we have to meet:
|
||||
|
||||
1. [https://github.com/zitadel/zitadel/issues/8352](https://github.com/zitadel/zitadel/issues/8352) defines 1000 JWT profile auth/sec
|
||||
2. [https://github.com/zitadel/zitadel/issues/4424](https://github.com/zitadel/zitadel/issues/4424) defines 1200 logins / sec.
|
||||
|
||||
## Procedure
|
||||
|
||||
First we determine the “target” of our load-test. The target is expressed as a make recipe in the load-test [Makefile](https://github.com/zitadel/zitadel/blob/main/load-test/Makefile). See also the load-test [readme](https://github.com/zitadel/zitadel/blob/main/load-test/README.md) on how to configure and run load-tests.
|
||||
A target should be tested for longer periods of time, as it might take time for certain metrics to show up. For example, cloud SQL samples query insights. A runtime of at least **30 minutes** is advised at the moment.
|
||||
|
||||
After each iteration of load-test, we should consult the [After test procedure](#after-test-procedure) to conclude an outcome:
|
||||
|
||||
1. Scale
|
||||
2. Log potential issuer and scale
|
||||
3. Terminate testing and resolve issues
|
||||
|
||||
|
||||
## Methodology
|
||||
|
||||
### Benchmark definition
|
||||
|
||||
Tests are implemented in the ecosystem of [k6](https://k6.io). The tests are publicly available in the [zitadel repository](https://github.com/zitadel/zitadel/tree/main/load-test). Custom extensions of k6 are implemented in the [xk6-modules repository](https://github.com/zitadel/xk6-modules).
|
||||
The tests must at least measure the request duration for each API call. This gives an indication on how zitadel behaves over the duration of the load test.
|
||||
|
||||
### Metrics
|
||||
|
||||
The following metrics must be collected for each test iteration. The metrics are used to follow the decision path of the [After test procedure](https://drive.google.com/open?id=1WVr7aA8dGgV1zd2jUg1y1h_o37mkZF2O6M5Mhafn_NM):
|
||||
|
||||
| Metric | Type | Description | Unit |
|
||||
| :---- | :---- | :---- | :---- |
|
||||
| Baseline | Comparison | Defines the baseline the test is compared against. If not specified the baseline defined in this document is used. | Link to test result |
|
||||
| Purpose | Description | Description what should been proved with this test run | text
|
||||
| Test start | Setup | Timestamp when the test started. This is useful for gathering additional data like metrics or logs later | Date |
|
||||
| Test duration | Setup | Duration of the test | Duration |
|
||||
| Executed test | Setup | Name of the make recipe executed. Further information about specific test cases can be found [here](?tab=t.0#heading=h.xav4f3s5r2f3). | Name of the make recipe |
|
||||
| k6 version | Setup | Version of the test client (k6) used | semantic version |
|
||||
| VUs | Setup | Virtual Users which execute the test scenario in parallel | Number |
|
||||
| Client location | Setup | Region or location of the machine which executed the test client. If not further specified the hoster is Google Cloud | Location / Region |
|
||||
| Client machine specification | Setup | Definition of the client machine the test client ran on. The resources of the machine could be maxed out during tests therefore we collect this metric as well. The description must at least clarify the following metrics: vCPU Memory egress bandwidth | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps |
|
||||
| ZITADEL location | Setup | Region or location of the deployment of zitadel. If not further specified the hoster is Google Cloud | Location / Region |
|
||||
| ZITADEL container specification | Setup | As ZITADEL is mainly run in cloud environments it should also be run as a container during the load tests. The description must at least clarify the following metrics: vCPU Memory egress bandwidth Scale | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps **scale**: The amount of containers running during the test. The amount must not vary during the tests |
|
||||
| ZITADEL Version | Setup | The version of zitadel deployed | Semantic version or commit |
|
||||
| ZITADEL Configuration | Setup | Configuration of zitadel which deviates from the defaults and is not secret | yaml |
|
||||
| ZITADEL feature flags | Setup | Changed feature flags | yaml |
|
||||
| Database | Setup | Database type and version | **type**: crdb / psql **version**: semantic version |
|
||||
| Database location | Setup | Region or location of the deployment of the database. If not further specified the hoster is Google Cloud SQL | Location / Region |
|
||||
| Database specification | Setup | The description must at least clarify the following metrics: vCPU, Memory and egress bandwidth (Scale) | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps **scale**: Amount of crdb nodes if crdb is used |
|
||||
| ZITADEL metrics during test | Result | This metric helps understanding the bottlenecks of the executed test. At least the following metrics must be provided: CPU usage Memory usage | **CPU usage** in percent **Memory usage** in percent |
|
||||
| Observed errors | Result | Errors worth mentioning, mostly unexpected errors | description |
|
||||
| Top 3 most expensive database queries | Result | The execution plan of the top 3 most expensive database queries during the test execution | database execution plan |
|
||||
| Database metrics during test | Result | This metric helps understanding the bottlenecks of the executed test. At least the following metrics must be provided: CPU usage Memory usage | **CPU usage** in percent **Memory usage** in percent |
|
||||
| k6 Iterations per second | Result | How many test iterations were done per second | Number |
|
||||
| k6 overview | Result | Shows some basic metrics aggregated over the test run At least the following metrics must be included: duration per request (min, max, avg, p50, p95, p99) VUS For simplicity just add the whole test result printed to the terminal | terminal output |
|
||||
| k6 output | Result | Trends and metrics generated during the test, this contains detailed information for each step executed during each iteration | csv |
|
||||
|
||||
### Test setup
|
||||
|
||||
#### Make recipes
|
||||
|
||||
Details about the tests implemented can be found in [this readme](https://github.com/zitadel/zitadel/blob/main/load-test/README.md#test).
|
||||
|
||||
### Test conclusion
|
||||
|
||||
After each iteration of load-test, we should consult the [Flowchart](#after-test-procedure) to conclude an outcome:
|
||||
|
||||
1. [Scale](#scale)
|
||||
2. [Log potential issue and scale](#potential-issues)
|
||||
3. [Terminate testing](#termination) and resolve issues
|
||||
|
||||
#### Scale {#scale}
|
||||
|
||||
An outcome of scale means that the service hit some kind of resource limit, like CPU or RAM which can be increased. In such cases we increase the suggested parameter and rerun the load-test for the same target. On the next test we should analyse if the increase in scale resulted in a performance improvement proportional to the scale parameter. For example if we scale from 1 to 2 containers, it might be reasonable to expect a doubling of iterations / sec. If such an increase is not noticed, there might be another bottleneck or unlying issue, such as locking.
|
||||
|
||||
#### Potential issues {#potential-issues}
|
||||
|
||||
A potential issue has an impact on performance, but does not prevent us to scale. Such issues must be logged in GH issues and load-testing can continue. The issue can be resolved at a later time and the load-tests repeated when it is. This is primarily for issues which require big changes to ZITADEL.
|
||||
|
||||
#### Termination {#termination}
|
||||
|
||||
Scaling no longer improves iterations / second, or some kind of critical error or bug is experienced. The root cause of the issue must be resolved before we can continue with increasing scale.
|
||||
|
||||
### After test procedure
|
||||
|
||||
This flowchart shows the procedure after running a test.
|
||||
|
||||
![Flowchart](/img/benchmark/Flowchart.svg)
|
||||
|
||||
## Baseline
|
||||
|
||||
Will be established as soon as the goal described above is reached.
|
||||
|
||||
## Test results
|
||||
|
||||
This chapter provides a table linking to the detailed test results.
|
||||
|
||||
<DocCardList />
|
@ -0,0 +1,75 @@
|
||||
---
|
||||
title: machine jwt profile grant benchmark of zitadel v2.65.0
|
||||
sidebar_label: machine jwt profile grant
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
Tests are halted after this test run because of too many [client read events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/wait-event.clientread.html) on the database.
|
||||
|
||||
## Performance test results
|
||||
|
||||
| Metric | Value |
|
||||
| :---- | :---- |
|
||||
| Baseline | none |
|
||||
| Test start | 22-10-2024 16:20 UTC |
|
||||
| Test duration | 30min |
|
||||
| Executed test | machine\_jwt\_profile\_grant |
|
||||
| k6 version | v0.54.0 |
|
||||
| VUs | 50 |
|
||||
| Client location | US1 |
|
||||
| Client machine specification | e2-high-cpu-4 |
|
||||
| Zitadel location | US1 |
|
||||
| Zitadel container specification | vCPUs: 2<br/>Memory: 512 MiB<br/>Container count: 2 |
|
||||
| Zitadel feature flags | none |
|
||||
| Database | postgres v15 |
|
||||
| Database location | US1 |
|
||||
| Database specification | vCPUs: 4<br/>Memory: 16 GiB |
|
||||
| Zitadel metrics during test | |
|
||||
| Observed errors | Many client read events during push |
|
||||
| Top 3 most expensive database queries | 1: Query events `instance_id = $1 AND aggregate_type = $2 AND aggregate_id = $3 AND event_type = ANY($4)`<br/>2: latest sequence query during push events<br/>3: writing events during push (caused lock wait events) |
|
||||
| k6 iterations per second | 193 |
|
||||
| k6 overview | [output](#k6-output) |
|
||||
| flowchart outcome | Halt tests, must resolve an issue |
|
||||
|
||||
## /token endpoint latencies
|
||||
|
||||
import OutputSource from "!!raw-loader!./output.json";
|
||||
|
||||
import { BenchmarkChart } from '/src/components/benchmark_chart';
|
||||
|
||||
<BenchmarkChart testResults={OutputSource} />
|
||||
|
||||
## k6 output {#k6-output}
|
||||
|
||||
```bash
|
||||
checks...............................: 100.00% ✓ 695739 ✗ 0
|
||||
data_received........................: 479 MB 265 kB/s
|
||||
data_sent............................: 276 MB 153 kB/s
|
||||
http_req_blocked.....................: min=178ns avg=5µs max=119.8ms p(50)=460ns p(95)=702ns p(99)=921ns
|
||||
http_req_connecting..................: min=0s avg=1.24µs max=43.45ms p(50)=0s p(95)=0s p(99)=0s
|
||||
http_req_duration....................: min=18ms avg=255.3ms max=1.22s p(50)=241.56ms p(95)=479.19ms p(99)=600.92ms
|
||||
{ expected_response:true }.........: min=18ms avg=255.3ms max=1.22s p(50)=241.56ms p(95)=479.19ms p(99)=600.92ms
|
||||
http_req_failed......................: 0.00% ✓ 0 ✗ 347998
|
||||
http_req_receiving...................: min=25.92µs avg=536.96µs max=401.94ms p(50)=89.44µs p(95)=2.39ms p(99)=11.12ms
|
||||
http_req_sending.....................: min=24.01µs avg=63.86µs max=4.48ms p(50)=60.97µs p(95)=88.69µs p(99)=141.74µs
|
||||
http_req_tls_handshaking.............: min=0s avg=2.8µs max=51.05ms p(50)=0s p(95)=0s p(99)=0s
|
||||
http_req_waiting.....................: min=17.65ms avg=254.7ms max=1.22s p(50)=240.88ms p(95)=478.6ms p(99)=600.6ms
|
||||
http_reqs............................: 347998 192.80552/s
|
||||
iteration_duration...................: min=33.86ms avg=258.77ms max=1.22s p(50)=245ms p(95)=482.61ms p(99)=604.32ms
|
||||
iterations...........................: 347788 192.689171/s
|
||||
login_ui_enter_login_name_duration...: min=218.61ms avg=218.61ms max=218.61ms p(50)=218.61ms p(95)=218.61ms p(99)=218.61ms
|
||||
login_ui_enter_password_duration.....: min=18ms avg=18ms max=18ms p(50)=18ms p(95)=18ms p(99)=18ms
|
||||
login_ui_init_login_duration.........: min=90.96ms avg=90.96ms max=90.96ms p(50)=90.96ms p(95)=90.96ms p(99)=90.96ms
|
||||
login_ui_token_duration..............: min=140.02ms avg=140.02ms max=140.02ms p(50)=140.02ms p(95)=140.02ms p(99)=140.02ms
|
||||
oidc_token_duration..................: min=29.85ms avg=255.38ms max=1.22s p(50)=241.61ms p(95)=479.23ms p(99)=600.95ms
|
||||
org_create_org_duration..............: min=64.51ms avg=64.51ms max=64.51ms p(50)=64.51ms p(95)=64.51ms p(99)=64.51ms
|
||||
user_add_machine_key_duration........: min=44.93ms avg=87.89ms max=159.52ms p(50)=84.43ms p(95)=144.59ms p(99)=155.54ms
|
||||
user_create_machine_duration.........: min=65.75ms avg=266.53ms max=421.58ms p(50)=276.59ms p(95)=380.84ms p(99)=414.43ms
|
||||
vus..................................: 0 min=0 max=50
|
||||
vus_max..............................: 50 min=50 max=50
|
||||
|
||||
running (30m04.9s), 00/50 VUs, 347788 complete and 0 interrupted iterations
|
||||
default ✓ [======================================] 50 VUs 30m0s
|
||||
```
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -144,4 +144,4 @@ The storage layer of ZITADEL is responsible for multiple tasks. For example:
|
||||
- Backup and restore operation for disaster recovery purpose
|
||||
|
||||
ZITADEL currently supports PostgreSQL and CockroachDB..
|
||||
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide on using one of them.
|
||||
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide on using one of them.
|
||||
|
@ -11,7 +11,7 @@ Since the storage layer takes the heavy lifting of making sure that data in sync
|
||||
Depending on your projects needs our general recommendation is to run ZITADEL and ZITADELs storage layer across multiple availability zones in the same region or if you need higher guarantees run the storage layer across multiple regions.
|
||||
Consult the [CockroachDB documentation](https://www.cockroachlabs.com/docs/) for more details or use the [CockroachCloud Service](https://www.cockroachlabs.com/docs/cockroachcloud/create-an-account.html)
|
||||
Alternatively you can run ZITADEL also with Postgres which is [Enterprise Supported](/docs/support/software-release-cycles-support#partially-supported).
|
||||
Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it.
|
||||
Make sure to read our [Production Guide](/self-hosting/manage/production#prefer-postgresql) before you decide to use it.
|
||||
|
||||
## Scalability
|
||||
|
||||
|
@ -75,12 +75,6 @@ Data location refers to a region, consisting of one or many countries or territo
|
||||
|
||||
We can not guarantee that during transit the data will only remain within this region. We take measures, as outlined in our [privacy policy](../policies/privacy-policy), to protect your data in transit and in rest.
|
||||
|
||||
The following regions will be available when using our cloud service. This list is for informational purposes and will be updated in due course, please refer to our website for all available regions at this time.
|
||||
|
||||
- **Global**: All available cloud regions offered by our cloud provider
|
||||
- **Switzerland**: Exclusively on Swiss region
|
||||
- **GDPR safe countries**: Hosting location is within any of the EU member states and [Adequate Countries](https://ec.europa.eu/info/law/law-topic/data-protection/international-dimension-data-protection/adequacy-decisions_en) as recognized by the European Commission under the GDPR
|
||||
|
||||
## Backup
|
||||
|
||||
Our backup strategy executes daily full backups and differential backups on much higher frequency.
|
||||
|
@ -51,6 +51,9 @@ By executing the commands below, you will download the following file:
|
||||
# Download the docker compose example configuration.
|
||||
wget https://raw.githubusercontent.com/zitadel/zitadel/main/docs/docs/self-hosting/deploy/docker-compose-sa.yaml -O docker-compose.yaml
|
||||
|
||||
# create the machine key directory
|
||||
mkdir machinekey
|
||||
|
||||
# Run the database and application containers.
|
||||
docker compose up --detach
|
||||
|
||||
|
@ -1,27 +1,27 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
zitadel:
|
||||
# The user should have the permission to write to ./machinekey
|
||||
user: "${UID:-1000}"
|
||||
restart: 'always'
|
||||
networks:
|
||||
- 'zitadel'
|
||||
image: 'ghcr.io/zitadel/zitadel:latest'
|
||||
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
|
||||
environment:
|
||||
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
|
||||
- 'ZITADEL_EXTERNALSECURE=false'
|
||||
- 'ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH=/machinekey/zitadel-admin-sa.json'
|
||||
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME=zitadel-admin-sa'
|
||||
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME=Admin'
|
||||
- 'ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE=1'
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
ZITADEL_EXTERNALSECURE: false
|
||||
ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH: /machinekey/zitadel-admin-sa.json
|
||||
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_USERNAME: zitadel-admin-sa
|
||||
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINE_NAME: Admin
|
||||
ZITADEL_FIRSTINSTANCE_ORG_MACHINE_MACHINEKEY_TYPE: 1
|
||||
depends_on:
|
||||
db:
|
||||
condition: 'service_healthy'
|
||||
@ -34,12 +34,12 @@ services:
|
||||
restart: 'always'
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
PGUSER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
networks:
|
||||
- 'zitadel'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"]
|
||||
test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
|
||||
interval: '10s'
|
||||
timeout: '30s'
|
||||
retries: 5
|
||||
|
@ -1,5 +1,3 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
zitadel:
|
||||
restart: 'always'
|
||||
@ -8,16 +6,16 @@ services:
|
||||
image: 'ghcr.io/zitadel/zitadel:latest'
|
||||
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
|
||||
environment:
|
||||
- 'ZITADEL_DATABASE_POSTGRES_HOST=db'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_PORT=5432'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres'
|
||||
- 'ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable'
|
||||
- 'ZITADEL_EXTERNALSECURE=false'
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
ZITADEL_EXTERNALSECURE: false
|
||||
depends_on:
|
||||
db:
|
||||
condition: 'service_healthy'
|
||||
@ -28,9 +26,8 @@ services:
|
||||
restart: 'always'
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
- POSTGRES_DB=zitadel
|
||||
PGUSER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
networks:
|
||||
- 'zitadel'
|
||||
healthcheck:
|
||||
|
@ -14,7 +14,7 @@ Choose your platform and run ZITADEL with the most minimal configuration possibl
|
||||
## Prerequisites
|
||||
|
||||
- For test environments, ZITADEL does not need many resources, 1 CPU and 512MB memory are more than enough. (With more CPU, the password hashing might be faster)
|
||||
- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use Postgresql.
|
||||
- A PostgreSQL or CockroachDB as only needed storage. Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-postgresql) before you decide to use Postgresql.
|
||||
|
||||
## Releases
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
## ZITADEL with Postgres
|
||||
|
||||
If you want to use a PostgreSQL database you can [overwrite the default configuration](../configure/configure.mdx).
|
||||
Make sure to read our [Production Guide](/docs/self-hosting/manage/production#prefer-cockroachdb) before you decide to use it.
|
||||
|
||||
Currently versions >= 14 are supported.
|
||||
|
||||
|
@ -109,17 +109,16 @@ but in the Projections.Customizations.Telemetry section
|
||||
|
||||
## Database
|
||||
|
||||
### Prefer CockroachDB
|
||||
### Prefer PostgreSQL
|
||||
|
||||
ZITADEL supports [CockroachDB](https://www.cockroachlabs.com/) and [PostgreSQL](https://www.postgresql.org/).
|
||||
We recommend using CockroachDB,
|
||||
as horizontal scaling is much easier than with PostgreSQL.
|
||||
Also, if you are concerned about multi-regional data locality,
|
||||
[the way to go is with CockroachDB](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html).
|
||||
We recommend using PostgreSQL, as it is the better choice when you want to prioritize performance and latency.
|
||||
|
||||
However, if [multi-regional data locality](https://www.cockroachlabs.com/docs/stable/multiregion-overview.html) is a critical requirement, CockroachDB might be a suitable option.
|
||||
|
||||
The indexes for the database are optimized using load tests from [ZITADEL Cloud](https://zitadel.com),
|
||||
which runs with CockroachDB.
|
||||
If you identify problems with your Postgresql during load tests that indicate that the indexes are not optimized,
|
||||
which runs with PostgreSQL.
|
||||
If you identify problems with your CockroachDB during load tests that indicate that the indexes are not optimized,
|
||||
please create an issue in our [github repository](https://github.com/zitadel/zitadel).
|
||||
|
||||
### Configure ZITADEL
|
||||
@ -128,7 +127,7 @@ Depending on your environment, you maybe would want to tweak some settings about
|
||||
|
||||
```yaml
|
||||
Database:
|
||||
cockroach:
|
||||
postgres:
|
||||
Host: localhost
|
||||
Port: 26257
|
||||
Database: zitadel
|
||||
@ -140,6 +139,7 @@ Database:
|
||||
Options: ""
|
||||
```
|
||||
|
||||
|
||||
You also might want to configure how [projections](/concepts/eventstore/implementation#projections) are computed. These are the default values:
|
||||
|
||||
```yaml
|
||||
|
@ -7,19 +7,19 @@ services:
|
||||
service: zitadel-init
|
||||
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
|
||||
environment:
|
||||
- ZITADEL_EXTERNALPORT=80
|
||||
- ZITADEL_EXTERNALSECURE=false
|
||||
- ZITADEL_TLS_ENABLED=false
|
||||
ZITADEL_EXTERNALPORT: 80
|
||||
ZITADEL_EXTERNALSECURE: false
|
||||
ZITADEL_TLS_ENABLED: false
|
||||
# database configuration
|
||||
- ZITADEL_DATABASE_POSTGRES_HOST=db
|
||||
- ZITADEL_DATABASE_POSTGRES_PORT=5432
|
||||
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
networks:
|
||||
- 'zitadel'
|
||||
depends_on:
|
||||
@ -33,19 +33,19 @@ services:
|
||||
service: zitadel-init
|
||||
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
|
||||
environment:
|
||||
- ZITADEL_EXTERNALPORT=443
|
||||
- ZITADEL_EXTERNALSECURE=true
|
||||
- ZITADEL_TLS_ENABLED=false
|
||||
ZITADEL_EXTERNALPORT: 443
|
||||
ZITADEL_EXTERNALSECURE: true
|
||||
ZITADEL_TLS_ENABLED: false
|
||||
# database configuration
|
||||
- ZITADEL_DATABASE_POSTGRES_HOST=db
|
||||
- ZITADEL_DATABASE_POSTGRES_PORT=5432
|
||||
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
networks:
|
||||
- 'zitadel'
|
||||
depends_on:
|
||||
@ -59,21 +59,21 @@ services:
|
||||
service: zitadel-init
|
||||
command: 'start-from-setup --init-projections --masterkey "MasterkeyNeedsToHave32Characters" --config /zitadel.yaml --steps /zitadel.yaml'
|
||||
environment:
|
||||
- ZITADEL_EXTERNALPORT=443
|
||||
- ZITADEL_EXTERNALSECURE=true
|
||||
- ZITADEL_TLS_ENABLED=true
|
||||
- ZITADEL_TLS_CERTPATH=/etc/certs/selfsigned.crt
|
||||
- ZITADEL_TLS_KEYPATH=/etc/certs/selfsigned.key
|
||||
ZITADEL_EXTERNALPORT: 443
|
||||
ZITADEL_EXTERNALSECURE: true
|
||||
ZITADEL_TLS_ENABLED: true
|
||||
ZITADEL_TLS_CERTPATH: /etc/certs/selfsigned.crt
|
||||
ZITADEL_TLS_KEYPATH: /etc/certs/selfsigned.key
|
||||
# database configuration
|
||||
- ZITADEL_DATABASE_POSTGRES_HOST=db
|
||||
- ZITADEL_DATABASE_POSTGRES_PORT=5432
|
||||
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
volumes:
|
||||
- ./selfsigned.crt:/etc/certs/selfsigned.crt
|
||||
- ./selfsigned.key:/etc/certs/selfsigned.key
|
||||
@ -96,22 +96,22 @@ services:
|
||||
# Using an external domain other than localhost proofs, that the proxy configuration works.
|
||||
# If ZITADEL can't resolve a requests original host to this domain,
|
||||
# it will return a 404 Instance not found error.
|
||||
- ZITADEL_EXTERNALDOMAIN=127.0.0.1.sslip.io
|
||||
ZITADEL_EXTERNALDOMAIN: 127.0.0.1.sslip.io
|
||||
# In case something doesn't work as expected,
|
||||
# it can be handy to be able to read the access logs.
|
||||
- ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED=true
|
||||
ZITADEL_LOGSTORE_ACCESS_STDOUT_ENABLED: true
|
||||
# For convenience, ZITADEL should not ask to change the initial admin users password.
|
||||
- ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED=false
|
||||
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORDCHANGEREQUIRED: false
|
||||
# database configuration
|
||||
- ZITADEL_DATABASE_POSTGRES_HOST=db
|
||||
- ZITADEL_DATABASE_POSTGRES_PORT=5432
|
||||
- ZITADEL_DATABASE_POSTGRES_DATABASE=zitadel
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_USERNAME=zitadel_user
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_PASSWORD=zitadel_pw
|
||||
- ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE=disable
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME=root
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD=postgres
|
||||
- ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE=disable
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel_user
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_pw
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: root
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: postgres
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
networks:
|
||||
- 'zitadel'
|
||||
healthcheck:
|
||||
@ -125,10 +125,10 @@ services:
|
||||
restart: 'always'
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
- POSTGRES_USER=root
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
PGUSER: root
|
||||
POSTGRES_PASSWORD: postgres
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"]
|
||||
test: ["CMD-SHELL", "pg_isready", "-d", "zitadel", "-U", "postgres"]
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 10
|
||||
|
@ -289,7 +289,7 @@ module.exports = {
|
||||
outputDir: "docs/apis/resources/user_service_v2",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
categoryLinkSource: "auto",
|
||||
},
|
||||
},
|
||||
session_v2: {
|
||||
@ -297,7 +297,7 @@ module.exports = {
|
||||
outputDir: "docs/apis/resources/session_service_v2",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
categoryLinkSource: "auto",
|
||||
},
|
||||
},
|
||||
oidc_v2: {
|
||||
@ -305,7 +305,7 @@ module.exports = {
|
||||
outputDir: "docs/apis/resources/oidc_service_v2",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
categoryLinkSource: "auto",
|
||||
},
|
||||
},
|
||||
settings_v2: {
|
||||
@ -313,7 +313,7 @@ module.exports = {
|
||||
outputDir: "docs/apis/resources/settings_service_v2",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
categoryLinkSource: "auto",
|
||||
},
|
||||
},
|
||||
user_schema_v3: {
|
||||
|
@ -44,6 +44,7 @@
|
||||
"react": "^18.2.0",
|
||||
"react-copy-to-clipboard": "^5.1.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-google-charts": "^5.2.1",
|
||||
"react-player": "^2.15.1",
|
||||
"sitemap": "7.1.1",
|
||||
"swc-loader": "^0.2.3",
|
||||
|
@ -841,6 +841,30 @@ module.exports = {
|
||||
label: "Rate Limits (Cloud)", // The link label
|
||||
href: "/legal/policies/rate-limit-policy", // The internal path
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Benchmarks",
|
||||
collapsed: false,
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "apis/benchmarks/index",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
type: "category",
|
||||
label: "v2.65.0",
|
||||
link: {
|
||||
title: "v2.65.0",
|
||||
slug: "/apis/benchmarks/v2.65.0",
|
||||
description:
|
||||
"Benchmark results of Zitadel v2.65.0\n"
|
||||
},
|
||||
items: [
|
||||
"apis/benchmarks/v2.65.0/machine_jwt_profile_grant/index",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
selfHosting: [
|
||||
{
|
||||
|
45
docs/src/components/benchmark_chart.jsx
Normal file
45
docs/src/components/benchmark_chart.jsx
Normal file
@ -0,0 +1,45 @@
|
||||
import React from "react";
|
||||
import Chart from "react-google-charts";
|
||||
|
||||
export function BenchmarkChart(testResults=[], height='500px') {
|
||||
|
||||
const options = {
|
||||
legend: { position: 'bottom' },
|
||||
focusTarget: 'category',
|
||||
hAxis: {
|
||||
title: 'timestamp',
|
||||
},
|
||||
vAxis: {
|
||||
title: 'latency (ms)',
|
||||
},
|
||||
};
|
||||
|
||||
const data = [
|
||||
[
|
||||
{type:"datetime", label: "timestamp"},
|
||||
{type:"number", label: "p50"},
|
||||
{type:"number", label: "p95"},
|
||||
{type:"number", label: "p99"},
|
||||
],
|
||||
]
|
||||
|
||||
JSON.parse(testResults.testResults).forEach((result) => {
|
||||
data.push([
|
||||
new Date(result.timestamp),
|
||||
result.p50,
|
||||
result.p95,
|
||||
result.p99,
|
||||
])
|
||||
});
|
||||
|
||||
return (
|
||||
<Chart
|
||||
chartType="LineChart"
|
||||
width="100%"
|
||||
height="500px"
|
||||
options={options}
|
||||
data={data}
|
||||
legendToggle
|
||||
/>
|
||||
);
|
||||
}
|
1
docs/static/img/benchmark/Flowchart.svg
vendored
Normal file
1
docs/static/img/benchmark/Flowchart.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 439 KiB |
@ -9479,6 +9479,11 @@ react-fast-compare@^3.0.1, react-fast-compare@^3.2.0, react-fast-compare@^3.2.2:
|
||||
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.2.tgz#929a97a532304ce9fee4bcae44234f1ce2c21d49"
|
||||
integrity sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==
|
||||
|
||||
react-google-charts@^5.2.1:
|
||||
version "5.2.1"
|
||||
resolved "https://registry.yarnpkg.com/react-google-charts/-/react-google-charts-5.2.1.tgz#d9cbe8ed45d7c0fafefea5c7c3361bee76648454"
|
||||
integrity sha512-mCbPiObP8yWM5A9ogej7Qp3/HX4EzOwuEzUYvcfHtL98Xt4V/brD14KgfDzSNNtyD48MNXCpq5oVaYKt0ykQUQ==
|
||||
|
||||
react-helmet-async@*:
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-2.0.5.tgz#cfc70cd7bb32df7883a8ed55502a1513747223ec"
|
||||
|
40
go.mod
40
go.mod
@ -10,6 +10,7 @@ require (
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.24.0
|
||||
github.com/Masterminds/squirrel v1.5.4
|
||||
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b
|
||||
github.com/alicebob/miniredis/v2 v2.33.0
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/boombuler/barcode v1.0.2
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0
|
||||
@ -34,7 +35,7 @@ require (
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0
|
||||
github.com/h2non/gock v1.2.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/improbable-eng/grpc-web v0.15.0
|
||||
@ -52,8 +53,10 @@ require (
|
||||
github.com/pashagolub/pgxmock/v4 v4.3.0
|
||||
github.com/pquerna/otp v1.4.0
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rs/cors v1.11.0
|
||||
github.com/redis/go-redis/v9 v9.7.0
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||
github.com/sony/gobreaker/v2 v2.0.0
|
||||
github.com/sony/sonyflake v1.2.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.19.0
|
||||
@ -62,29 +65,29 @@ require (
|
||||
github.com/ttacon/libphonenumber v1.2.1
|
||||
github.com/twilio/twilio-go v1.22.2
|
||||
github.com/zitadel/logging v0.6.1
|
||||
github.com/zitadel/oidc/v3 v3.28.1
|
||||
github.com/zitadel/oidc/v3 v3.32.0
|
||||
github.com/zitadel/passwap v0.6.0
|
||||
github.com/zitadel/saml v0.2.0
|
||||
github.com/zitadel/schema v1.3.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.50.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0
|
||||
go.opentelemetry.io/otel/metric v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0
|
||||
go.opentelemetry.io/otel/metric v1.29.0
|
||||
go.opentelemetry.io/otel/sdk v1.29.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0
|
||||
go.opentelemetry.io/otel/trace v1.29.0
|
||||
go.uber.org/mock v0.4.0
|
||||
golang.org/x/crypto v0.27.0
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.22.0
|
||||
golang.org/x/net v0.28.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/text v0.18.0
|
||||
golang.org/x/text v0.19.0
|
||||
google.golang.org/api v0.187.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
@ -94,8 +97,10 @@ require (
|
||||
cloud.google.com/go/auth v0.6.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.0 // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
|
||||
github.com/crewjam/httperr v0.2.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
@ -121,11 +126,12 @@ require (
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/yuin/gopher-lua v1.1.1 // indirect
|
||||
github.com/zenazn/goji v1.0.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@ -197,7 +203,7 @@ require (
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/sys v0.25.0
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
84
go.sum
84
go.sum
@ -56,6 +56,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA=
|
||||
github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0=
|
||||
github.com/amdonov/xmlsig v0.1.0 h1:i0iQ3neKLmUhcfIRgiiR3eRPKgXZj+n5lAfqnfKoeXI=
|
||||
github.com/amdonov/xmlsig v0.1.0/go.mod h1:jTR/jO0E8fSl/cLvMesP+RjxyV4Ux4WL1Ip64ZnQpA0=
|
||||
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||
@ -80,13 +84,17 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/boombuler/barcode v1.0.2 h1:79yrbttoZrLGkL/oOI8hBrUKucwOL0oOjUgEguGMcJ4=
|
||||
github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
|
||||
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
@ -127,6 +135,8 @@ github.com/descope/virtualwebauthn v1.0.2/go.mod h1:iJvinjD1iZYqQ09J5lF0+795OdDb
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg=
|
||||
@ -354,8 +364,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
|
||||
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
|
||||
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
|
||||
@ -620,6 +630,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
|
||||
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
|
||||
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@ -628,8 +640,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po=
|
||||
github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys=
|
||||
@ -658,6 +670,8 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/sony/gobreaker/v2 v2.0.0 h1:23AaR4JQ65y4rz8JWMzgXw2gKOykZ/qfqYunll4OwJ4=
|
||||
github.com/sony/gobreaker/v2 v2.0.0/go.mod h1:8JnRUz80DJ1/ne8M8v7nmTs2713i58nIt4s7XcGe/DI=
|
||||
github.com/sony/sonyflake v1.2.0 h1:Pfr3A+ejSg+0SPqpoAmQgEtNDAhc2G1SUYk205qVMLQ=
|
||||
github.com/sony/sonyflake v1.2.0/go.mod h1:LORtCywH/cq10ZbyfhKrHYgAUGH7mOBa76enV9txy/Y=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
@ -719,12 +733,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/zenazn/goji v1.0.1 h1:4lbD8Mx2h7IvloP7r2C0D6ltZP6Ufip8Hn0wmSK5LR8=
|
||||
github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y=
|
||||
github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow=
|
||||
github.com/zitadel/oidc/v3 v3.28.1 h1:PsbFm5CzEMQq9HBXUNJ8yvnWmtVYxpwV5Cinj7TTsHo=
|
||||
github.com/zitadel/oidc/v3 v3.28.1/go.mod h1:WmDFu3dZ9YNKrIoZkmxjGG8QyUR4PbbhsVVSY+rpojM=
|
||||
github.com/zitadel/oidc/v3 v3.32.0 h1:Mw0EPZRC6h+OXAuT0Uk2BZIjJQNHLqUpaJCm6c3IByc=
|
||||
github.com/zitadel/oidc/v3 v3.32.0/go.mod h1:DyE/XClysRK/ozFaZSqlYamKVnTh4l6Ln25ihSNI03w=
|
||||
github.com/zitadel/passwap v0.6.0 h1:m9F3epFC0VkBXu25rihSLGyHvWiNlCzU5kk8RoI+SXQ=
|
||||
github.com/zitadel/passwap v0.6.0/go.mod h1:kqAiJ4I4eZvm3Y6oAk6hlEqlZZOkjMHraGXF90GG7LI=
|
||||
github.com/zitadel/saml v0.2.0 h1:vv7r+Xz43eAPCb+fImMaospD+TWRZQDkb78AbSJRcL4=
|
||||
@ -742,24 +758,24 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U=
|
||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
|
||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@ -857,13 +873,13 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -934,8 +950,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
@ -983,10 +999,10 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
|
||||
google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls=
|
||||
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -42,7 +42,7 @@ func (s *Server) ExportData(ctx context.Context, req *admin_pb.ExportDataRequest
|
||||
}
|
||||
|
||||
orgs := make([]*admin_pb.DataOrg, len(queriedOrgs.Orgs))
|
||||
processedOrgs := make([]string, len(queriedOrgs.Orgs))
|
||||
processedOrgs := make([]string, 0, len(queriedOrgs.Orgs))
|
||||
processedProjects := make([]string, 0)
|
||||
processedGrants := make([]string, 0)
|
||||
processedUsers := make([]string, 0)
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/muhlemmer/gu"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -70,28 +71,34 @@ func awaitPubOrgRegDisallowed(t *testing.T, ctx context.Context, cc *integration
|
||||
// awaitGetSSRGetResponse cuts the CSRF token from the response body if it exists
|
||||
func awaitGetSSRGetResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int) string {
|
||||
var csrfToken []byte
|
||||
await(t, ctx, func(tt *assert.CollectT) {
|
||||
resp, err := client.Get(parsedURL.String())
|
||||
require.NoError(tt, err)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(tt, err)
|
||||
searchField := `<input type="hidden" name="gorilla.csrf.Token" value="`
|
||||
_, after, hasCsrfToken := bytes.Cut(body, []byte(searchField))
|
||||
if hasCsrfToken {
|
||||
csrfToken, _, _ = bytes.Cut(after, []byte(`">`))
|
||||
}
|
||||
assert.Equal(tt, resp.StatusCode, expectCode)
|
||||
})
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t,
|
||||
func(tt *assert.CollectT) {
|
||||
resp, err := client.Get(parsedURL.String())
|
||||
require.NoError(tt, err)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(tt, err)
|
||||
searchField := `<input type="hidden" name="gorilla.csrf.Token" value="`
|
||||
_, after, hasCsrfToken := bytes.Cut(body, []byte(searchField))
|
||||
if hasCsrfToken {
|
||||
csrfToken, _, _ = bytes.Cut(after, []byte(`">`))
|
||||
}
|
||||
assert.Equal(tt, resp.StatusCode, expectCode)
|
||||
}, retryDuration, tick, "awaiting successful get SSR get response failed",
|
||||
)
|
||||
return string(csrfToken)
|
||||
}
|
||||
|
||||
// awaitPostFormResponse needs a valid CSRF token to make it to the actual endpoint implementation and get the expected status code
|
||||
func awaitPostFormResponse(t *testing.T, ctx context.Context, client *http.Client, parsedURL *url.URL, expectCode int, csrfToken string) {
|
||||
await(t, ctx, func(tt *assert.CollectT) {
|
||||
resp, err := client.PostForm(parsedURL.String(), url.Values{
|
||||
"gorilla.csrf.Token": {csrfToken},
|
||||
})
|
||||
require.NoError(tt, err)
|
||||
assert.Equal(tt, resp.StatusCode, expectCode)
|
||||
})
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t,
|
||||
func(tt *assert.CollectT) {
|
||||
resp, err := client.PostForm(parsedURL.String(), url.Values{
|
||||
"gorilla.csrf.Token": {csrfToken},
|
||||
})
|
||||
require.NoError(tt, err)
|
||||
assert.Equal(tt, resp.StatusCode, expectCode)
|
||||
}, retryDuration, tick, "awaiting successful Post Form failed",
|
||||
)
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
|
||||
require.Equal(ttt, language.Make(defaultLang.Language), language.English)
|
||||
})
|
||||
tt.Run("the discovery endpoint returns all supported languages", func(ttt *testing.T) {
|
||||
awaitDiscoveryEndpoint(ttt, instance.Domain, supportedLanguagesStr, nil)
|
||||
awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, supportedLanguagesStr, nil)
|
||||
})
|
||||
})
|
||||
t.Run("restricting the default language fails", func(tt *testing.T) {
|
||||
@ -92,10 +92,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
|
||||
require.Condition(tt, contains(supported.GetLanguages(), supportedLanguagesStr))
|
||||
})
|
||||
t.Run("the disallowed language is not listed in the discovery endpoint", func(tt *testing.T) {
|
||||
awaitDiscoveryEndpoint(tt, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()})
|
||||
awaitDiscoveryEndpoint(tt, ctx, instance.Domain, []string{defaultAndAllowedLanguage.String()}, []string{disallowedLanguage.String()})
|
||||
})
|
||||
t.Run("the login ui is rendered in the default language", func(tt *testing.T) {
|
||||
awaitLoginUILanguage(tt, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort")
|
||||
awaitLoginUILanguage(tt, ctx, instance.Domain, disallowedLanguage, defaultAndAllowedLanguage, "Passwort")
|
||||
})
|
||||
t.Run("preferred languages are not restricted by the supported languages", func(tt *testing.T) {
|
||||
tt.Run("change user profile", func(ttt *testing.T) {
|
||||
@ -153,10 +153,10 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
|
||||
|
||||
t.Run("allowing the language makes it usable again", func(tt *testing.T) {
|
||||
tt.Run("the previously disallowed language is listed in the discovery endpoint again", func(ttt *testing.T) {
|
||||
awaitDiscoveryEndpoint(ttt, instance.Domain, []string{disallowedLanguage.String()}, nil)
|
||||
awaitDiscoveryEndpoint(ttt, ctx, instance.Domain, []string{disallowedLanguage.String()}, nil)
|
||||
})
|
||||
tt.Run("the login ui is rendered in the previously disallowed language", func(ttt *testing.T) {
|
||||
awaitLoginUILanguage(ttt, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña")
|
||||
awaitLoginUILanguage(ttt, ctx, instance.Domain, disallowedLanguage, disallowedLanguage, "Contraseña")
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -164,36 +164,36 @@ func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
|
||||
func setAndAwaitAllowedLanguages(ctx context.Context, cc *integration.Client, t *testing.T, selectLanguages []string) {
|
||||
_, err := cc.Admin.SetRestrictions(ctx, &admin.SetRestrictionsRequest{AllowedLanguages: &admin.SelectLanguages{List: selectLanguages}})
|
||||
require.NoError(t, err)
|
||||
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer awaitCancel()
|
||||
await(t, awaitCtx, func(tt *assert.CollectT) {
|
||||
restrictions, getErr := cc.Admin.GetRestrictions(awaitCtx, &admin.GetRestrictionsRequest{})
|
||||
expectLanguages := selectLanguages
|
||||
if len(selectLanguages) == 0 {
|
||||
expectLanguages = nil
|
||||
}
|
||||
assert.NoError(tt, getErr)
|
||||
assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages())
|
||||
})
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t,
|
||||
func(tt *assert.CollectT) {
|
||||
restrictions, getErr := cc.Admin.GetRestrictions(ctx, &admin.GetRestrictionsRequest{})
|
||||
expectLanguages := selectLanguages
|
||||
if len(selectLanguages) == 0 {
|
||||
expectLanguages = nil
|
||||
}
|
||||
assert.NoError(tt, getErr)
|
||||
assert.Equal(tt, expectLanguages, restrictions.GetAllowedLanguages())
|
||||
}, retryDuration, tick, "awaiting successful GetAllowedLanguages failed",
|
||||
)
|
||||
}
|
||||
|
||||
func setAndAwaitDefaultLanguage(ctx context.Context, cc *integration.Client, t *testing.T, lang language.Tag) {
|
||||
_, err := cc.Admin.SetDefaultLanguage(ctx, &admin.SetDefaultLanguageRequest{Language: lang.String()})
|
||||
require.NoError(t, err)
|
||||
awaitCtx, awaitCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer awaitCancel()
|
||||
await(t, awaitCtx, func(tt *assert.CollectT) {
|
||||
defaultLang, getErr := cc.Admin.GetDefaultLanguage(awaitCtx, &admin.GetDefaultLanguageRequest{})
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t, func(tt *assert.CollectT) {
|
||||
defaultLang, getErr := cc.Admin.GetDefaultLanguage(ctx, &admin.GetDefaultLanguageRequest{})
|
||||
assert.NoError(tt, getErr)
|
||||
assert.Equal(tt, lang.String(), defaultLang.GetLanguage())
|
||||
})
|
||||
}, retryDuration, tick, "awaiting successful GetDefaultLanguage failed",
|
||||
)
|
||||
}
|
||||
|
||||
func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notContainsUILocales []string) {
|
||||
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer awaitCancel()
|
||||
await(t, awaitCtx, func(tt *assert.CollectT) {
|
||||
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
|
||||
func awaitDiscoveryEndpoint(t *testing.T, ctx context.Context, domain string, containsUILocales, notContainsUILocales []string) {
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t, func(tt *assert.CollectT) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/.well-known/openid-configuration", nil)
|
||||
require.NoError(tt, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(tt, err)
|
||||
@ -213,14 +213,14 @@ func awaitDiscoveryEndpoint(t *testing.T, domain string, containsUILocales, notC
|
||||
if notContainsUILocales != nil {
|
||||
assert.Condition(tt, not(contains(doc.UILocalesSupported, notContainsUILocales)))
|
||||
}
|
||||
})
|
||||
}, retryDuration, tick, "awaiting successful call to Discovery endpoint failed",
|
||||
)
|
||||
}
|
||||
|
||||
func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) {
|
||||
awaitCtx, awaitCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer awaitCancel()
|
||||
await(t, awaitCtx, func(tt *assert.CollectT) {
|
||||
req, err := http.NewRequestWithContext(awaitCtx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
|
||||
func awaitLoginUILanguage(t *testing.T, ctx context.Context, domain string, acceptLanguage language.Tag, expectLang language.Tag, containsText string) {
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(t, func(tt *assert.CollectT) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+domain+":8080/ui/login/register", nil)
|
||||
req.Header.Set("Accept-Language", acceptLanguage.String())
|
||||
require.NoError(tt, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
@ -232,7 +232,8 @@ func awaitLoginUILanguage(t *testing.T, domain string, acceptLanguage language.T
|
||||
}()
|
||||
require.NoError(tt, err)
|
||||
assert.Containsf(tt, string(body), containsText, "login ui language is in "+expectLang.String())
|
||||
})
|
||||
}, retryDuration, tick, "awaiting successful LoginUI in specific language failed",
|
||||
)
|
||||
}
|
||||
|
||||
// We would love to use assert.Contains here, but it doesn't work with slices of strings
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/integration"
|
||||
admin_pb "github.com/zitadel/zitadel/pkg/grpc/admin"
|
||||
@ -34,23 +33,6 @@ func TestMain(m *testing.M) {
|
||||
}())
|
||||
}
|
||||
|
||||
func await(t *testing.T, ctx context.Context, cb func(*assert.CollectT)) {
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(ctx, time.Minute)
|
||||
require.EventuallyWithT(
|
||||
t,
|
||||
func(tt *assert.CollectT) {
|
||||
defer func() {
|
||||
// Panics are not recovered and don't mark the test as failed, so we need to do that ourselves
|
||||
assert.Nil(tt, recover(), "panic in await callback")
|
||||
}()
|
||||
cb(tt)
|
||||
},
|
||||
retryDuration,
|
||||
tick,
|
||||
"awaiting successful callback failed",
|
||||
)
|
||||
}
|
||||
|
||||
var _ assert.TestingT = (*noopAssertionT)(nil)
|
||||
|
||||
type noopAssertionT struct{}
|
||||
|
@ -19,6 +19,7 @@ func systemFeaturesToCommand(req *feature_pb.SetSystemFeaturesRequest) *command.
|
||||
ImprovedPerformance: improvedPerformanceListToDomain(req.ImprovedPerformance),
|
||||
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
|
||||
DisableUserTokenEvent: req.DisableUserTokenEvent,
|
||||
EnableBackChannelLogout: req.EnableBackChannelLogout,
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,6 +35,7 @@ func systemFeaturesToPb(f *query.SystemFeatures) *feature_pb.GetSystemFeaturesRe
|
||||
ImprovedPerformance: featureSourceToImprovedPerformanceFlagPb(&f.ImprovedPerformance),
|
||||
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
|
||||
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
|
||||
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,6 +52,7 @@ func instanceFeaturesToCommand(req *feature_pb.SetInstanceFeaturesRequest) *comm
|
||||
DebugOIDCParentError: req.DebugOidcParentError,
|
||||
OIDCSingleV1SessionTermination: req.OidcSingleV1SessionTermination,
|
||||
DisableUserTokenEvent: req.DisableUserTokenEvent,
|
||||
EnableBackChannelLogout: req.EnableBackChannelLogout,
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,6 +70,7 @@ func instanceFeaturesToPb(f *query.InstanceFeatures) *feature_pb.GetInstanceFeat
|
||||
DebugOidcParentError: featureSourceToFlagPb(&f.DebugOIDCParentError),
|
||||
OidcSingleV1SessionTermination: featureSourceToFlagPb(&f.OIDCSingleV1SessionTermination),
|
||||
DisableUserTokenEvent: featureSourceToFlagPb(&f.DisableUserTokenEvent),
|
||||
EnableBackChannelLogout: featureSourceToFlagPb(&f.EnableBackChannelLogout),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
|
||||
Level: feature.LevelSystem,
|
||||
Value: true,
|
||||
},
|
||||
EnableBackChannelLogout: query.FeatureSource[bool]{
|
||||
Level: feature.LevelSystem,
|
||||
Value: true,
|
||||
},
|
||||
}
|
||||
want := &feature_pb.GetSystemFeaturesResponse{
|
||||
Details: &object.Details{
|
||||
@ -123,6 +127,10 @@ func Test_systemFeaturesToPb(t *testing.T) {
|
||||
Enabled: false,
|
||||
Source: feature_pb.Source_SOURCE_UNSPECIFIED,
|
||||
},
|
||||
EnableBackChannelLogout: &feature_pb.FeatureFlag{
|
||||
Enabled: true,
|
||||
Source: feature_pb.Source_SOURCE_SYSTEM,
|
||||
},
|
||||
}
|
||||
got := systemFeaturesToPb(arg)
|
||||
assert.Equal(t, want, got)
|
||||
@ -140,6 +148,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
|
||||
WebKey: gu.Ptr(true),
|
||||
DebugOidcParentError: gu.Ptr(true),
|
||||
OidcSingleV1SessionTermination: gu.Ptr(true),
|
||||
EnableBackChannelLogout: gu.Ptr(true),
|
||||
}
|
||||
want := &command.InstanceFeatures{
|
||||
LoginDefaultOrg: gu.Ptr(true),
|
||||
@ -152,6 +161,7 @@ func Test_instanceFeaturesToCommand(t *testing.T) {
|
||||
WebKey: gu.Ptr(true),
|
||||
DebugOIDCParentError: gu.Ptr(true),
|
||||
OIDCSingleV1SessionTermination: gu.Ptr(true),
|
||||
EnableBackChannelLogout: gu.Ptr(true),
|
||||
}
|
||||
got := instanceFeaturesToCommand(arg)
|
||||
assert.Equal(t, want, got)
|
||||
@ -200,6 +210,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
|
||||
Level: feature.LevelInstance,
|
||||
Value: true,
|
||||
},
|
||||
EnableBackChannelLogout: query.FeatureSource[bool]{
|
||||
Level: feature.LevelInstance,
|
||||
Value: true,
|
||||
},
|
||||
}
|
||||
want := &feature_pb.GetInstanceFeaturesResponse{
|
||||
Details: &object.Details{
|
||||
@ -251,6 +265,10 @@ func Test_instanceFeaturesToPb(t *testing.T) {
|
||||
Enabled: false,
|
||||
Source: feature_pb.Source_SOURCE_UNSPECIFIED,
|
||||
},
|
||||
EnableBackChannelLogout: &feature_pb.FeatureFlag{
|
||||
Enabled: true,
|
||||
Source: feature_pb.Source_SOURCE_INSTANCE,
|
||||
},
|
||||
}
|
||||
got := instanceFeaturesToPb(arg)
|
||||
assert.Equal(t, want, got)
|
||||
|
@ -57,6 +57,7 @@ func AddOIDCAppRequestToDomain(req *mgmt_pb.AddOIDCAppRequest) *domain.OIDCApp {
|
||||
ClockSkew: req.ClockSkew.AsDuration(),
|
||||
AdditionalOrigins: req.AdditionalOrigins,
|
||||
SkipNativeAppSuccessPage: req.SkipNativeAppSuccessPage,
|
||||
BackChannelLogoutURI: req.GetBackChannelLogoutUri(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -108,6 +109,7 @@ func UpdateOIDCAppConfigRequestToDomain(app *mgmt_pb.UpdateOIDCAppConfigRequest)
|
||||
ClockSkew: app.ClockSkew.AsDuration(),
|
||||
AdditionalOrigins: app.AdditionalOrigins,
|
||||
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
|
||||
BackChannelLogoutURI: app.BackChannelLogoutUri,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ func AppOIDCConfigToPb(app *query.OIDCApp) *app_pb.App_OidcConfig {
|
||||
AdditionalOrigins: app.AdditionalOrigins,
|
||||
AllowedOrigins: app.AllowedOrigins,
|
||||
SkipNativeAppSuccessPage: app.SkipNativeAppSuccessPage,
|
||||
BackChannelLogoutUri: app.BackChannelLogoutURI,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -288,7 +288,9 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in
|
||||
if !assert.NoError(ttt, err) {
|
||||
return
|
||||
}
|
||||
assert.Len(ttt, got.GetResult(), 1)
|
||||
if !assert.Len(ttt, got.GetResult(), 1) {
|
||||
return
|
||||
}
|
||||
gotTargets := got.GetResult()[0].GetExecution().GetTargets()
|
||||
// always first check length, otherwise its failed anyway
|
||||
if assert.Len(ttt, gotTargets, len(targets)) {
|
||||
@ -296,7 +298,6 @@ func waitForExecutionOnCondition(ctx context.Context, t *testing.T, instance *in
|
||||
assert.EqualExportedValues(ttt, targets[i].GetType(), gotTargets[i].GetType())
|
||||
}
|
||||
}
|
||||
|
||||
}, retryDuration, tick, "timeout waiting for expected execution result")
|
||||
return
|
||||
}
|
||||
@ -316,7 +317,9 @@ func waitForTarget(ctx context.Context, t *testing.T, instance *integration.Inst
|
||||
if !assert.NoError(ttt, err) {
|
||||
return
|
||||
}
|
||||
assert.Len(ttt, got.GetResult(), 1)
|
||||
if !assert.Len(ttt, got.GetResult(), 1) {
|
||||
return
|
||||
}
|
||||
config := got.GetResult()[0].GetConfig()
|
||||
assert.Equal(ttt, config.GetEndpoint(), endpoint)
|
||||
switch ty {
|
||||
|
@ -216,7 +216,7 @@ func TestServer_GetTarget(t *testing.T) {
|
||||
err := tt.args.dep(tt.args.ctx, tt.args.req, tt.want)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, time.Minute)
|
||||
retryDuration, tick := integration.WaitForAndTickWithMaxDuration(isolatedIAMOwnerCTX, 2*time.Minute)
|
||||
require.EventuallyWithT(t, func(ttt *assert.CollectT) {
|
||||
got, err := instance.Client.ActionV3Alpha.GetTarget(tt.args.ctx, tt.args.req)
|
||||
if tt.wantErr {
|
||||
|
@ -8,9 +8,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/crewjam/saml"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/muhlemmer/gu"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
http_utils "github.com/zitadel/zitadel/internal/api/http"
|
||||
@ -49,6 +51,7 @@ const (
|
||||
paramError = "error"
|
||||
paramErrorDescription = "error_description"
|
||||
varIDPID = "idpid"
|
||||
paramInternalUI = "internalUI"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
@ -187,21 +190,8 @@ func (h *Handler) handleMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
metadata := sp.ServiceProvider.Metadata()
|
||||
|
||||
for i, spDesc := range metadata.SPSSODescriptors {
|
||||
spDesc.AssertionConsumerServices = append(
|
||||
spDesc.AssertionConsumerServices,
|
||||
saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: len(spDesc.AssertionConsumerServices) + 1,
|
||||
}, saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: len(spDesc.AssertionConsumerServices) + 2,
|
||||
},
|
||||
)
|
||||
metadata.SPSSODescriptors[i] = spDesc
|
||||
}
|
||||
internalUI, _ := strconv.ParseBool(r.URL.Query().Get(paramInternalUI))
|
||||
h.assertionConsumerServices(ctx, metadata, internalUI)
|
||||
|
||||
buf, _ := xml.MarshalIndent(metadata, "", " ")
|
||||
w.Header().Set("Content-Type", "application/samlmetadata+xml")
|
||||
@ -212,6 +202,48 @@ func (h *Handler) handleMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) assertionConsumerServices(ctx context.Context, metadata *saml.EntityDescriptor, internalUI bool) {
|
||||
if !internalUI {
|
||||
for i, spDesc := range metadata.SPSSODescriptors {
|
||||
spDesc.AssertionConsumerServices = append(
|
||||
spDesc.AssertionConsumerServices,
|
||||
saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: len(spDesc.AssertionConsumerServices) + 1,
|
||||
}, saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: len(spDesc.AssertionConsumerServices) + 2,
|
||||
},
|
||||
)
|
||||
metadata.SPSSODescriptors[i] = spDesc
|
||||
}
|
||||
return
|
||||
}
|
||||
for i, spDesc := range metadata.SPSSODescriptors {
|
||||
acs := make([]saml.IndexedEndpoint, 0, len(spDesc.AssertionConsumerServices)+2)
|
||||
acs = append(acs,
|
||||
saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: 0,
|
||||
IsDefault: gu.Ptr(true),
|
||||
},
|
||||
saml.IndexedEndpoint{
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: h.loginSAMLRootURL(ctx),
|
||||
Index: 1,
|
||||
})
|
||||
for i := 0; i < len(spDesc.AssertionConsumerServices); i++ {
|
||||
spDesc.AssertionConsumerServices[i].Index = 2 + i
|
||||
acs = append(acs, spDesc.AssertionConsumerServices[i])
|
||||
}
|
||||
spDesc.AssertionConsumerServices = acs
|
||||
metadata.SPSSODescriptors[i] = spDesc
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) handleACS(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
data := parseSAMLRequest(r)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
saml_xml "github.com/zitadel/saml/pkg/provider/xml"
|
||||
"github.com/zitadel/saml/pkg/provider/xml/md"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
http_util "github.com/zitadel/zitadel/internal/api/http"
|
||||
@ -111,13 +112,15 @@ func TestServer_SAMLMetadata(t *testing.T) {
|
||||
oauthIdpResp := Instance.AddGenericOAuthProvider(CTX, Instance.DefaultOrg.Id)
|
||||
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
idpID string
|
||||
ctx context.Context
|
||||
idpID string
|
||||
internalUI bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want int
|
||||
name string
|
||||
args args
|
||||
want int
|
||||
wantACS []md.IndexedEndpointType
|
||||
}{
|
||||
{
|
||||
name: "saml metadata, invalid idp",
|
||||
@ -142,11 +145,115 @@ func TestServer_SAMLMetadata(t *testing.T) {
|
||||
idpID: samlRedirectIdpID,
|
||||
},
|
||||
want: http.StatusOK,
|
||||
wantACS: []md.IndexedEndpointType{
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "1",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/idps/" + samlRedirectIdpID + "/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "2",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/idps/" + samlRedirectIdpID + "/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "3",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/ui/login/login/externalidp/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "4",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/ui/login/login/externalidp/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "saml metadata, ok (internalUI)",
|
||||
args: args{
|
||||
ctx: CTX,
|
||||
idpID: samlRedirectIdpID,
|
||||
internalUI: true,
|
||||
},
|
||||
want: http.StatusOK,
|
||||
wantACS: []md.IndexedEndpointType{
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "0",
|
||||
IsDefault: "true",
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/ui/login/login/externalidp/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "1",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/ui/login/login/externalidp/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "2",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPPostBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/idps/" + samlRedirectIdpID + "/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
{
|
||||
XMLName: xml.Name{
|
||||
Space: "urn:oasis:names:tc:SAML:2.0:metadata",
|
||||
Local: "AssertionConsumerService",
|
||||
},
|
||||
Index: "3",
|
||||
IsDefault: "",
|
||||
Binding: saml.HTTPArtifactBinding,
|
||||
Location: http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/idps/" + samlRedirectIdpID + "/saml/acs",
|
||||
ResponseLocation: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metadataURL := http_util.BuildOrigin(Instance.Host(), Instance.Config.Secure) + "/idps/" + tt.args.idpID + "/saml/metadata"
|
||||
if tt.args.internalUI {
|
||||
metadataURL = metadataURL + "?internalUI=true"
|
||||
}
|
||||
resp, err := http.Get(metadataURL)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, resp.StatusCode)
|
||||
@ -155,10 +262,11 @@ func TestServer_SAMLMetadata(t *testing.T) {
|
||||
defer resp.Body.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = saml_xml.ParseMetadataXmlIntoStruct(b)
|
||||
metadata, err := saml_xml.ParseMetadataXmlIntoStruct(b)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, metadata.SPSSODescriptor.AssertionConsumerService, tt.wantACS)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -215,18 +215,18 @@ func (o *OPStorage) TerminateSession(ctx context.Context, userID, clientID strin
|
||||
logging.Error("no user agent id")
|
||||
return zerrors.ThrowPreconditionFailed(nil, "OIDC-fso7F", "no user agent id")
|
||||
}
|
||||
userIDs, err := o.repo.UserSessionUserIDsByAgentID(ctx, userAgentID)
|
||||
sessions, err := o.repo.UserSessionsByAgentID(ctx, userAgentID)
|
||||
if err != nil {
|
||||
logging.WithError(err).Error("error retrieving user sessions")
|
||||
return err
|
||||
}
|
||||
if len(userIDs) == 0 {
|
||||
if len(sessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
data := authz.CtxData{
|
||||
UserID: userID,
|
||||
}
|
||||
err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, userIDs)
|
||||
err = o.command.HumansSignOut(authz.SetCtxData(ctx, data), userAgentID, sessions)
|
||||
logging.OnError(err).Error("error signing out")
|
||||
return err
|
||||
}
|
||||
@ -278,18 +278,18 @@ func (o *OPStorage) terminateV1Session(ctx context.Context, userID, sessionID st
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.command.HumansSignOut(ctx, userAgentID, []string{userID})
|
||||
return o.command.HumansSignOut(ctx, userAgentID, []command.HumanSignOutSession{{ID: sessionID, UserID: userID}})
|
||||
}
|
||||
// otherwise we search for all active sessions within the same user agent of the current session id
|
||||
userAgentID, userIDs, err := o.repo.ActiveUserIDsBySessionID(ctx, sessionID)
|
||||
userAgentID, sessions, err := o.repo.ActiveUserSessionsBySessionID(ctx, sessionID)
|
||||
if err != nil {
|
||||
logging.WithError(err).Error("error retrieving user sessions")
|
||||
return err
|
||||
}
|
||||
if len(userIDs) == 0 {
|
||||
if len(sessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
return o.command.HumansSignOut(ctx, userAgentID, userIDs)
|
||||
return o.command.HumansSignOut(ctx, userAgentID, sessions)
|
||||
}
|
||||
|
||||
func (o *OPStorage) RevokeToken(ctx context.Context, token, userID, clientID string) (err *oidc.Error) {
|
||||
@ -588,6 +588,7 @@ func (s *Server) authResponseToken(authReq *AuthRequest, authorizer op.Authorize
|
||||
authReq.UserID,
|
||||
authReq.UserOrgID,
|
||||
client.client.ClientID,
|
||||
client.client.BackChannelLogoutURI,
|
||||
scope,
|
||||
authReq.Audience,
|
||||
authReq.AuthMethods(),
|
||||
@ -599,6 +600,7 @@ func (s *Server) authResponseToken(authReq *AuthRequest, authorizer op.Authorize
|
||||
nil,
|
||||
slices.Contains(scope, oidc.ScopeOfflineAccess),
|
||||
authReq.SessionID,
|
||||
authReq.oidc().ResponseType,
|
||||
)
|
||||
if err != nil {
|
||||
op.AuthRequestError(w, r, authReq, err, authorizer)
|
||||
|
@ -348,7 +348,7 @@ func (o *OPStorage) getSigningKey(ctx context.Context) (op.SigningKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
if len(keys.Keys) > 0 {
|
||||
return o.privateKeyToSigningKey(selectSigningKey(keys.Keys))
|
||||
return PrivateKeyToSigningKey(SelectSigningKey(keys.Keys), o.encAlg)
|
||||
}
|
||||
var position float64
|
||||
if keys.State != nil {
|
||||
@ -377,8 +377,8 @@ func (o *OPStorage) ensureIsLatestKey(ctx context.Context, position float64) (bo
|
||||
return position >= maxSequence, nil
|
||||
}
|
||||
|
||||
func (o *OPStorage) privateKeyToSigningKey(key query.PrivateKey) (_ op.SigningKey, err error) {
|
||||
keyData, err := crypto.Decrypt(key.Key(), o.encAlg)
|
||||
func PrivateKeyToSigningKey(key query.PrivateKey, algorithm crypto.EncryptionAlgorithm) (_ op.SigningKey, err error) {
|
||||
keyData, err := crypto.Decrypt(key.Key(), algorithm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -430,7 +430,7 @@ func (o *OPStorage) getMaxKeySequence(ctx context.Context) (float64, error) {
|
||||
)
|
||||
}
|
||||
|
||||
func selectSigningKey(keys []query.PrivateKey) query.PrivateKey {
|
||||
func SelectSigningKey(keys []query.PrivateKey) query.PrivateKey {
|
||||
return keys[len(keys)-1]
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ type Config struct {
|
||||
DefaultLoginURLV2 string
|
||||
DefaultLogoutURLV2 string
|
||||
PublicKeyCacheMaxAge time.Duration
|
||||
DefaultBackChannelLogoutLifetime time.Duration
|
||||
}
|
||||
|
||||
type EndpointConfig struct {
|
||||
|
@ -167,6 +167,7 @@ func (s *Server) EndSession(ctx context.Context, r *op.Request[oidc.EndSessionRe
|
||||
|
||||
func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales oidc.Locales) *oidc.DiscoveryConfiguration {
|
||||
issuer := op.IssuerFromContext(ctx)
|
||||
backChannelLogoutSupported := authz.GetInstance(ctx).Features().EnableBackChannelLogout
|
||||
|
||||
return &oidc.DiscoveryConfiguration{
|
||||
Issuer: issuer,
|
||||
@ -199,6 +200,8 @@ func (s *Server) createDiscoveryConfig(ctx context.Context, supportedUILocales o
|
||||
CodeChallengeMethodsSupported: op.CodeChallengeMethods(s.Provider()),
|
||||
UILocalesSupported: supportedUILocales,
|
||||
RequestParameterSupported: s.Provider().RequestObjectSupported(),
|
||||
BackChannelLogoutSupported: backChannelLogoutSupported,
|
||||
BackChannelLogoutSessionSupported: backChannelLogoutSupported,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,12 +60,19 @@ func (s *Server) accessTokenResponseFromSession(ctx context.Context, client op.C
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// signerFunc is a getter function that allows add-hoc retrieval of the instance's signer.
|
||||
type signerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error)
|
||||
// SignerFunc is a getter function that allows add-hoc retrieval of the instance's signer.
|
||||
type SignerFunc func(ctx context.Context) (jose.Signer, jose.SignatureAlgorithm, error)
|
||||
|
||||
// getSignerOnce returns a function which retrieves the instance's signer from the database once.
|
||||
func (s *Server) getSignerOnce() SignerFunc {
|
||||
return GetSignerOnce(s.query.GetActiveSigningWebKey, s.Provider().Storage().SigningKey)
|
||||
}
|
||||
|
||||
// GetSignerOnce returns a function which retrieves the instance's signer from the database once.
|
||||
// Repeated calls of the returned function return the same results.
|
||||
func (s *Server) getSignerOnce() signerFunc {
|
||||
func GetSignerOnce(
|
||||
getActiveSigningWebKey func(ctx context.Context) (*jose.JSONWebKey, error),
|
||||
getSigningKey func(ctx context.Context) (op.SigningKey, error),
|
||||
) SignerFunc {
|
||||
var (
|
||||
once sync.Once
|
||||
signer jose.Signer
|
||||
@ -79,7 +86,7 @@ func (s *Server) getSignerOnce() signerFunc {
|
||||
|
||||
if authz.GetFeatures(ctx).WebKey {
|
||||
var webKey *jose.JSONWebKey
|
||||
webKey, err = s.query.GetActiveSigningWebKey(ctx)
|
||||
webKey, err = getActiveSigningWebKey(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -88,7 +95,7 @@ func (s *Server) getSignerOnce() signerFunc {
|
||||
}
|
||||
|
||||
var signingKey op.SigningKey
|
||||
signingKey, err = s.Provider().Storage().SigningKey(ctx)
|
||||
signingKey, err = getSigningKey(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -126,7 +133,7 @@ func (s *Server) getUserInfo(userID, projectID string, projectRoleAssertion, use
|
||||
}
|
||||
}
|
||||
|
||||
func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey signerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) {
|
||||
func (*Server) createIDToken(ctx context.Context, client op.Client, getUserInfo userInfoFunc, roleAssertion bool, getSigningKey SignerFunc, sessionID, accessToken string, audience []string, authMethods []domain.UserAuthMethodType, authTime time.Time, nonce string, actor *domain.TokenActor) (idToken string, exp uint64, err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
@ -170,7 +177,7 @@ func timeToOIDCExpiresIn(exp time.Time) uint64 {
|
||||
return uint64(time.Until(exp) / time.Second)
|
||||
}
|
||||
|
||||
func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner signerFunc) (_ string, err error) {
|
||||
func (s *Server) createJWT(ctx context.Context, client op.Client, session *command.OIDCSession, getUserInfo userInfoFunc, assertRoles bool, getSigner SignerFunc) (_ string, err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
|
@ -35,6 +35,7 @@ func (s *Server) ClientCredentialsExchange(ctx context.Context, r *op.ClientRequ
|
||||
client.userID,
|
||||
client.resourceOwner,
|
||||
client.clientID,
|
||||
"", // backChannelLogoutURI not needed for service user session
|
||||
scope,
|
||||
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
|
||||
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePassword},
|
||||
@ -46,6 +47,7 @@ func (s *Server) ClientCredentialsExchange(ctx context.Context, r *op.ClientRequ
|
||||
nil,
|
||||
false,
|
||||
"",
|
||||
domain.OIDCResponseTypeUnspecified,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -75,6 +75,7 @@ func (s *Server) codeExchangeV1(ctx context.Context, client *Client, req *oidc.A
|
||||
authReq.UserID,
|
||||
authReq.UserOrgID,
|
||||
client.client.ClientID,
|
||||
client.client.BackChannelLogoutURI,
|
||||
scope,
|
||||
authReq.Audience,
|
||||
authReq.AuthMethods(),
|
||||
@ -86,6 +87,7 @@ func (s *Server) codeExchangeV1(ctx context.Context, client *Client, req *oidc.A
|
||||
nil,
|
||||
slices.Contains(scope, oidc.ScopeOfflineAccess),
|
||||
authReq.SessionID,
|
||||
authReq.oidc().ResponseType,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -288,6 +288,7 @@ func (s *Server) createExchangeAccessToken(
|
||||
userID,
|
||||
resourceOwner,
|
||||
client.client.ClientID,
|
||||
client.client.BackChannelLogoutURI,
|
||||
scope,
|
||||
audience,
|
||||
authMethods,
|
||||
@ -299,6 +300,7 @@ func (s *Server) createExchangeAccessToken(
|
||||
actor,
|
||||
slices.Contains(scope, oidc.ScopeOfflineAccess),
|
||||
"",
|
||||
domain.OIDCResponseTypeUnspecified,
|
||||
)
|
||||
if err != nil {
|
||||
return "", "", "", 0, err
|
||||
@ -315,7 +317,7 @@ func (s *Server) createExchangeJWT(
|
||||
client *Client,
|
||||
getUserInfo userInfoFunc,
|
||||
roleAssertion bool,
|
||||
getSigner signerFunc,
|
||||
getSigner SignerFunc,
|
||||
userID,
|
||||
resourceOwner string,
|
||||
audience,
|
||||
@ -333,6 +335,7 @@ func (s *Server) createExchangeJWT(
|
||||
userID,
|
||||
resourceOwner,
|
||||
client.client.ClientID,
|
||||
client.client.BackChannelLogoutURI,
|
||||
scope,
|
||||
audience,
|
||||
authMethods,
|
||||
@ -344,6 +347,7 @@ func (s *Server) createExchangeJWT(
|
||||
actor,
|
||||
slices.Contains(scope, oidc.ScopeOfflineAccess),
|
||||
"",
|
||||
domain.OIDCResponseTypeUnspecified,
|
||||
)
|
||||
accessToken, err = s.createJWT(ctx, client, session, getUserInfo, roleAssertion, getSigner)
|
||||
if err != nil {
|
||||
|
@ -45,6 +45,7 @@ func (s *Server) JWTProfile(ctx context.Context, r *op.Request[oidc.JWTProfileGr
|
||||
client.userID,
|
||||
client.resourceOwner,
|
||||
client.clientID,
|
||||
"", // backChannelLogoutURI not needed for service user session
|
||||
scope,
|
||||
domain.AddAudScopeToAudience(ctx, nil, r.Data.Scope),
|
||||
[]domain.UserAuthMethodType{domain.UserAuthMethodTypePrivateKey},
|
||||
@ -56,6 +57,7 @@ func (s *Server) JWTProfile(ctx context.Context, r *op.Request[oidc.JWTProfileGr
|
||||
nil,
|
||||
false,
|
||||
"",
|
||||
domain.OIDCResponseTypeUnspecified,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -54,6 +54,7 @@ func (s *Server) refreshTokenV1(ctx context.Context, client *Client, r *op.Clien
|
||||
refreshToken.UserID,
|
||||
refreshToken.ResourceOwner,
|
||||
refreshToken.ClientID,
|
||||
"", // backChannelLogoutURI is not in refresh token view
|
||||
scope,
|
||||
refreshToken.Audience,
|
||||
AMRToAuthMethodTypes(refreshToken.AuthMethodsReferences),
|
||||
@ -68,6 +69,7 @@ func (s *Server) refreshTokenV1(ctx context.Context, client *Client, r *op.Clien
|
||||
refreshToken.Actor,
|
||||
true,
|
||||
"",
|
||||
domain.OIDCResponseTypeUnspecified,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -112,10 +112,10 @@
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<script src="{{ resourceUrl " scripts/input_suffix_offset.js" }}"></script>
|
||||
<script src="{{ resourceUrl " scripts/form_submit.js" }}"></script>
|
||||
<script src="{{ resourceUrl " scripts/password_policy_check.js" }}"></script>
|
||||
<script src="{{ resourceUrl " scripts/register_check.js" }}"></script>
|
||||
<script src="{{ resourceUrl " scripts/loginname_suffix.js" }}"></script>
|
||||
<script src="{{ resourceUrl "scripts/input_suffix_offset.js" }}"></script>
|
||||
<script src="{{ resourceUrl "scripts/form_submit.js" }}"></script>
|
||||
<script src="{{ resourceUrl "scripts/password_policy_check.js" }}"></script>
|
||||
<script src="{{ resourceUrl "scripts/register_check.js" }}"></script>
|
||||
<script src="{{ resourceUrl "scripts/loginname_suffix.js" }}"></script>
|
||||
|
||||
{{template "main-bottom" .}}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/auth/repository/eventsourcing/view"
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
@ -27,26 +28,40 @@ func (repo *UserRepo) Health(ctx context.Context) error {
|
||||
return repo.Eventstore.Health(ctx)
|
||||
}
|
||||
|
||||
func (repo *UserRepo) UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error) {
|
||||
userSessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID())
|
||||
func (repo *UserRepo) UserSessionsByAgentID(ctx context.Context, agentID string) ([]command.HumanSignOutSession, error) {
|
||||
sessions, err := repo.View.UserSessionsByAgentID(ctx, agentID, authz.GetInstance(ctx).InstanceID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs := make([]string, 0, len(userSessions))
|
||||
for _, session := range userSessions {
|
||||
if session.State.V == domain.UserSessionStateActive {
|
||||
userIDs = append(userIDs, session.UserID)
|
||||
signoutSessions := make([]command.HumanSignOutSession, 0, len(sessions))
|
||||
for _, session := range sessions {
|
||||
if session.State.V == domain.UserSessionStateActive && session.ID.Valid {
|
||||
signoutSessions = append(signoutSessions, command.HumanSignOutSession{
|
||||
ID: session.ID.String,
|
||||
UserID: session.UserID,
|
||||
})
|
||||
}
|
||||
}
|
||||
return userIDs, nil
|
||||
return signoutSessions, nil
|
||||
}
|
||||
|
||||
func (repo *UserRepo) UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error) {
|
||||
return repo.View.UserAgentIDBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
|
||||
}
|
||||
|
||||
func (repo *UserRepo) ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error) {
|
||||
return repo.View.ActiveUserIDsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
|
||||
func (repo *UserRepo) ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, signoutSessions []command.HumanSignOutSession, err error) {
|
||||
userAgentID, sessions, err := repo.View.ActiveUserSessionsBySessionID(ctx, sessionID, authz.GetInstance(ctx).InstanceID())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
signoutSessions = make([]command.HumanSignOutSession, 0, len(sessions))
|
||||
for sessionID, userID := range sessions {
|
||||
signoutSessions = append(signoutSessions, command.HumanSignOutSession{
|
||||
ID: sessionID,
|
||||
UserID: userID,
|
||||
})
|
||||
}
|
||||
return userAgentID, signoutSessions, nil
|
||||
}
|
||||
|
||||
func (repo *UserRepo) UserEventsByID(ctx context.Context, id string, changeDate time.Time, eventTypes []eventstore.EventType) ([]eventstore.Event, error) {
|
||||
|
@ -24,8 +24,8 @@ func (v *View) UserAgentIDBySessionID(ctx context.Context, sessionID, instanceID
|
||||
return view.UserAgentIDBySessionID(ctx, v.client, sessionID, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) ActiveUserIDsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, userIDs []string, err error) {
|
||||
return view.ActiveUserIDsBySessionID(ctx, v.client, sessionID, instanceID)
|
||||
func (v *View) ActiveUserSessionsBySessionID(ctx context.Context, sessionID, instanceID string) (userAgentID string, sessions map[string]string, err error) {
|
||||
return view.ActiveUserSessionsBySessionID(ctx, v.client, sessionID, instanceID)
|
||||
}
|
||||
|
||||
func (v *View) GetLatestUserSessionSequence(ctx context.Context, instanceID string) (_ *query.CurrentState, err error) {
|
||||
|
@ -2,10 +2,12 @@ package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
)
|
||||
|
||||
type UserRepository interface {
|
||||
UserSessionUserIDsByAgentID(ctx context.Context, agentID string) ([]string, error)
|
||||
UserSessionsByAgentID(ctx context.Context, agentID string) (sessions []command.HumanSignOutSession, err error)
|
||||
UserAgentIDBySessionID(ctx context.Context, sessionID string) (string, error)
|
||||
ActiveUserIDsBySessionID(ctx context.Context, sessionID string) (userAgentID string, userIDs []string, err error)
|
||||
ActiveUserSessionsBySessionID(ctx context.Context, sessionID string) (userAgentID string, sessions []command.HumanSignOutSession, err error)
|
||||
}
|
||||
|
44
internal/cache/cache.go
vendored
44
internal/cache/cache.go
vendored
@ -6,8 +6,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
"github.com/zitadel/zitadel/internal/database/postgres"
|
||||
// Purpose describes which object types are stored by a cache.
|
||||
type Purpose int
|
||||
|
||||
//go:generate enumer -type Purpose -transform snake -trimprefix Purpose
|
||||
const (
|
||||
PurposeUnspecified Purpose = iota
|
||||
PurposeAuthzInstance
|
||||
PurposeMilestones
|
||||
)
|
||||
|
||||
// Cache stores objects with a value of type `V`.
|
||||
@ -72,18 +80,19 @@ type Entry[I, K comparable] interface {
|
||||
Keys(index I) (key []K)
|
||||
}
|
||||
|
||||
type CachesConfig struct {
|
||||
Connectors struct {
|
||||
Memory MemoryConnectorConfig
|
||||
Postgres PostgresConnectorConfig
|
||||
// Redis redis.Config?
|
||||
}
|
||||
Instance *CacheConfig
|
||||
Milestones *CacheConfig
|
||||
}
|
||||
type Connector int
|
||||
|
||||
type CacheConfig struct {
|
||||
Connector string
|
||||
//go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text
|
||||
const (
|
||||
// Empty line comment ensures empty string for unspecified value
|
||||
ConnectorUnspecified Connector = iota //
|
||||
ConnectorMemory
|
||||
ConnectorPostgres
|
||||
ConnectorRedis
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Connector Connector
|
||||
|
||||
// Age since an object was added to the cache,
|
||||
// after which the object is considered invalid.
|
||||
@ -99,14 +108,3 @@ type CacheConfig struct {
|
||||
// By default only errors are logged to stdout.
|
||||
Log *logging.Config
|
||||
}
|
||||
|
||||
type MemoryConnectorConfig struct {
|
||||
Enabled bool
|
||||
AutoPrune AutoPruneConfig
|
||||
}
|
||||
|
||||
type PostgresConnectorConfig struct {
|
||||
Enabled bool
|
||||
AutoPrune AutoPruneConfig
|
||||
Connection postgres.Config
|
||||
}
|
||||
|
69
internal/cache/connector/connector.go
vendored
Normal file
69
internal/cache/connector/connector.go
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages.
|
||||
package connector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/noop"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/pg"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/redis"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
type CachesConfig struct {
|
||||
Connectors struct {
|
||||
Memory gomap.Config
|
||||
Postgres pg.Config
|
||||
Redis redis.Config
|
||||
}
|
||||
Instance *cache.Config
|
||||
Milestones *cache.Config
|
||||
}
|
||||
|
||||
type Connectors struct {
|
||||
Config CachesConfig
|
||||
Memory *gomap.Connector
|
||||
Postgres *pg.Connector
|
||||
Redis *redis.Connector
|
||||
}
|
||||
|
||||
func StartConnectors(conf *CachesConfig, client *database.DB) (Connectors, error) {
|
||||
if conf == nil {
|
||||
return Connectors{}, nil
|
||||
}
|
||||
return Connectors{
|
||||
Config: *conf,
|
||||
Memory: gomap.NewConnector(conf.Connectors.Memory),
|
||||
Postgres: pg.NewConnector(conf.Connectors.Postgres, client),
|
||||
Redis: redis.NewConnector(conf.Connectors.Redis),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) {
|
||||
if conf == nil || conf.Connector == cache.ConnectorUnspecified {
|
||||
return noop.NewCache[I, K, V](), nil
|
||||
}
|
||||
if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil {
|
||||
c := gomap.NewCache[I, K, V](background, indices, *conf)
|
||||
connectors.Memory.Config.StartAutoPrune(background, c, purpose)
|
||||
return c, nil
|
||||
}
|
||||
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
|
||||
c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("start cache: %w", err)
|
||||
}
|
||||
connectors.Postgres.Config.AutoPrune.StartAutoPrune(background, c, purpose)
|
||||
return c, nil
|
||||
}
|
||||
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
|
||||
db := connectors.Redis.Config.DBOffset + int(purpose)
|
||||
c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
|
||||
}
|
23
internal/cache/connector/gomap/connector.go
vendored
Normal file
23
internal/cache/connector/gomap/connector.go
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package gomap
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Enabled bool
|
||||
AutoPrune cache.AutoPruneConfig
|
||||
}
|
||||
|
||||
type Connector struct {
|
||||
Config cache.AutoPruneConfig
|
||||
}
|
||||
|
||||
func NewConnector(config Config) *Connector {
|
||||
if !config.Enabled {
|
||||
return nil
|
||||
}
|
||||
return &Connector{
|
||||
Config: config.AutoPrune,
|
||||
}
|
||||
}
|
@ -14,14 +14,14 @@ import (
|
||||
)
|
||||
|
||||
type mapCache[I, K comparable, V cache.Entry[I, K]] struct {
|
||||
config *cache.CacheConfig
|
||||
config *cache.Config
|
||||
indexMap map[I]*index[K, V]
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCache returns an in-memory Cache implementation based on the builtin go map type.
|
||||
// Object values are stored as-is and there is no encoding or decoding involved.
|
||||
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.CacheConfig) cache.PrunerCache[I, K, V] {
|
||||
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] {
|
||||
m := &mapCache[I, K, V]{
|
||||
config: &config,
|
||||
indexMap: make(map[I]*index[K, V], len(indices)),
|
||||
@ -116,7 +116,7 @@ func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
|
||||
|
||||
type index[K comparable, V any] struct {
|
||||
mutex sync.RWMutex
|
||||
config *cache.CacheConfig
|
||||
config *cache.Config
|
||||
entries map[K]*entry[V]
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ type entry[V any] struct {
|
||||
lastUse atomic.Int64 // UnixMicro time
|
||||
}
|
||||
|
||||
func (e *entry[V]) isValid(c *cache.CacheConfig) bool {
|
||||
func (e *entry[V]) isValid(c *cache.Config) bool {
|
||||
if e.invalid.Load() {
|
||||
return false
|
||||
}
|
@ -41,7 +41,7 @@ func (o *testObject) Keys(index testIndex) []string {
|
||||
}
|
||||
|
||||
func Test_mapCache_Get(t *testing.T) {
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
|
||||
MaxAge: time.Second,
|
||||
LastUseAge: time.Second / 4,
|
||||
Log: &logging.Config{
|
||||
@ -103,7 +103,7 @@ func Test_mapCache_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_mapCache_Invalidate(t *testing.T) {
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
|
||||
MaxAge: time.Second,
|
||||
LastUseAge: time.Second / 4,
|
||||
Log: &logging.Config{
|
||||
@ -124,7 +124,7 @@ func Test_mapCache_Invalidate(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_mapCache_Delete(t *testing.T) {
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
|
||||
MaxAge: time.Second,
|
||||
LastUseAge: time.Second / 4,
|
||||
Log: &logging.Config{
|
||||
@ -157,7 +157,7 @@ func Test_mapCache_Delete(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_mapCache_Prune(t *testing.T) {
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
|
||||
MaxAge: time.Second,
|
||||
LastUseAge: time.Second / 4,
|
||||
Log: &logging.Config{
|
||||
@ -193,7 +193,7 @@ func Test_mapCache_Prune(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_mapCache_Truncate(t *testing.T) {
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.CacheConfig{
|
||||
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
|
||||
MaxAge: time.Second,
|
||||
LastUseAge: time.Second / 4,
|
||||
Log: &logging.Config{
|
||||
@ -235,7 +235,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
config *cache.CacheConfig
|
||||
config *cache.Config
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
@ -245,7 +245,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: true,
|
||||
lastUse: time.Now(),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -258,7 +258,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: false,
|
||||
lastUse: time.Now(),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -271,7 +271,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: false,
|
||||
lastUse: time.Now(),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
want: true,
|
||||
@ -283,7 +283,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: false,
|
||||
lastUse: time.Now().Add(-(time.Second * 2)),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -296,7 +296,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: false,
|
||||
lastUse: time.Now().Add(-(time.Second * 2)),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
},
|
||||
want: true,
|
||||
@ -308,7 +308,7 @@ func Test_entry_isValid(t *testing.T) {
|
||||
invalid: false,
|
||||
lastUse: time.Now(),
|
||||
},
|
||||
config: &cache.CacheConfig{
|
||||
config: &cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
28
internal/cache/connector/pg/connector.go
vendored
Normal file
28
internal/cache/connector/pg/connector.go
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package pg
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Enabled bool
|
||||
AutoPrune cache.AutoPruneConfig
|
||||
}
|
||||
|
||||
type Connector struct {
|
||||
PGXPool
|
||||
Dialect string
|
||||
Config Config
|
||||
}
|
||||
|
||||
func NewConnector(config Config, client *database.DB) *Connector {
|
||||
if !config.Enabled {
|
||||
return nil
|
||||
}
|
||||
return &Connector{
|
||||
PGXPool: client.Pool,
|
||||
Dialect: client.Type(),
|
||||
Config: config,
|
||||
}
|
||||
}
|
@ -40,25 +40,25 @@ type PGXPool interface {
|
||||
}
|
||||
|
||||
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
|
||||
name string
|
||||
config *cache.CacheConfig
|
||||
indices []I
|
||||
pool PGXPool
|
||||
logger *slog.Logger
|
||||
purpose cache.Purpose
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
|
||||
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name string, config cache.CacheConfig, indices []I, pool PGXPool, dialect string) (cache.PrunerCache[I, K, V], error) {
|
||||
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
|
||||
c := &pgCache[I, K, V]{
|
||||
name: name,
|
||||
config: &config,
|
||||
indices: indices,
|
||||
pool: pool,
|
||||
logger: config.Log.Slog().With("cache_name", name),
|
||||
purpose: purpose,
|
||||
config: &config,
|
||||
indices: indices,
|
||||
connector: connector,
|
||||
logger: config.Log.Slog().With("cache_purpose", purpose),
|
||||
}
|
||||
c.logger.InfoContext(ctx, "pg cache logging enabled")
|
||||
|
||||
if dialect == "postgres" {
|
||||
if connector.Dialect == "postgres" {
|
||||
if err := c.createPartition(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -68,10 +68,10 @@ func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name
|
||||
|
||||
func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error {
|
||||
var query strings.Builder
|
||||
if err := createPartitionTmpl.Execute(&query, c.name); err != nil {
|
||||
if err := createPartitionTmpl.Execute(&query, c.purpose.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := c.pool.Exec(ctx, query.String())
|
||||
_, err := c.connector.Exec(ctx, query.String())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func (c *pgCache[I, K, V]) set(ctx context.Context, entry V) (err error) {
|
||||
keys := c.indexKeysFromEntry(entry)
|
||||
c.logger.DebugContext(ctx, "pg cache set", "index_key", keys)
|
||||
|
||||
_, err = c.pool.Exec(ctx, setQuery, c.name, keys, entry)
|
||||
_, err = c.connector.Exec(ctx, setQuery, c.purpose.String(), keys, entry)
|
||||
if err != nil {
|
||||
c.logger.ErrorContext(ctx, "pg cache set", "err", err)
|
||||
return err
|
||||
@ -117,7 +117,7 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er
|
||||
if !slices.Contains(c.indices, index) {
|
||||
return value, cache.NewIndexUnknownErr(index)
|
||||
}
|
||||
err = c.pool.QueryRow(ctx, getQuery, c.name, index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
|
||||
err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
|
||||
return value, err
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.pool.Exec(ctx, invalidateQuery, c.name, index, keys)
|
||||
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys)
|
||||
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
|
||||
return err
|
||||
}
|
||||
@ -134,7 +134,7 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.pool.Exec(ctx, deleteQuery, c.name, index, keys)
|
||||
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys)
|
||||
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
|
||||
return err
|
||||
}
|
||||
@ -143,7 +143,7 @@ func (c *pgCache[I, K, V]) Prune(ctx context.Context) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.pool.Exec(ctx, pruneQuery, c.name, c.config.MaxAge, c.config.LastUseAge)
|
||||
_, err = c.connector.Exec(ctx, pruneQuery, c.purpose.String(), c.config.MaxAge, c.config.LastUseAge)
|
||||
c.logger.DebugContext(ctx, "pg cache prune")
|
||||
return err
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.pool.Exec(ctx, truncateQuery, c.name)
|
||||
_, err = c.connector.Exec(ctx, truncateQuery, c.purpose.String())
|
||||
c.logger.DebugContext(ctx, "pg cache truncate")
|
||||
return err
|
||||
}
|
@ -67,7 +67,7 @@ func TestNewCache(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
conf := cache.CacheConfig{
|
||||
conf := cache.Config{
|
||||
Log: &logging.Config{
|
||||
Level: "debug",
|
||||
AddSource: true,
|
||||
@ -76,8 +76,12 @@ func TestNewCache(t *testing.T) {
|
||||
pool, err := pgxmock.NewPool()
|
||||
require.NoError(t, err)
|
||||
tt.expect(pool)
|
||||
connector := &Connector{
|
||||
PGXPool: pool,
|
||||
Dialect: "postgres",
|
||||
}
|
||||
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
||||
require.ErrorIs(t, err, tt.wantErr)
|
||||
if tt.wantErr == nil {
|
||||
assert.NotNil(t, c)
|
||||
@ -111,7 +115,7 @@ func Test_pgCache_Set(t *testing.T) {
|
||||
},
|
||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||
ppi.ExpectExec(queryExpect).
|
||||
WithArgs("test",
|
||||
WithArgs(cachePurpose.String(),
|
||||
[]indexKey[testIndex, string]{
|
||||
{IndexID: testIndexID, IndexKey: "id1"},
|
||||
{IndexID: testIndexName, IndexKey: "foo"},
|
||||
@ -135,7 +139,7 @@ func Test_pgCache_Set(t *testing.T) {
|
||||
},
|
||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||
ppi.ExpectExec(queryExpect).
|
||||
WithArgs("test",
|
||||
WithArgs(cachePurpose.String(),
|
||||
[]indexKey[testIndex, string]{
|
||||
{IndexID: testIndexID, IndexKey: "id1"},
|
||||
{IndexID: testIndexName, IndexKey: "foo"},
|
||||
@ -151,7 +155,7 @@ func Test_pgCache_Set(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, pool := prepareCache(t, cache.CacheConfig{})
|
||||
c, pool := prepareCache(t, cache.Config{})
|
||||
defer pool.Close()
|
||||
tt.expect(pool)
|
||||
|
||||
@ -173,7 +177,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.CacheConfig
|
||||
config cache.Config
|
||||
args args
|
||||
expect func(pgxmock.PgxCommonIface)
|
||||
want *testObject
|
||||
@ -181,7 +185,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "invalid index",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -194,7 +198,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no rows",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
@ -204,14 +208,14 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
|
||||
},
|
||||
wantOk: false,
|
||||
},
|
||||
{
|
||||
name: "error",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
@ -221,14 +225,14 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantOk: false,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -238,7 +242,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs("test", testIndexID, "id1", time.Minute, time.Second).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second).
|
||||
WillReturnRows(
|
||||
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
|
||||
ID: "id1",
|
||||
@ -276,14 +280,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.CacheConfig
|
||||
config cache.Config
|
||||
args args
|
||||
expect func(pgxmock.PgxCommonIface)
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
@ -293,14 +297,14 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -310,7 +314,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@ -338,14 +342,14 @@ func Test_pgCache_Delete(t *testing.T) {
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.CacheConfig
|
||||
config cache.Config
|
||||
args args
|
||||
expect func(pgxmock.PgxCommonIface)
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
@ -355,14 +359,14 @@ func Test_pgCache_Delete(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
@ -372,7 +376,7 @@ func Test_pgCache_Delete(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@ -396,32 +400,32 @@ func Test_pgCache_Prune(t *testing.T) {
|
||||
queryExpect := regexp.QuoteMeta(pruneQuery)
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.CacheConfig
|
||||
config cache.Config
|
||||
expect func(pgxmock.PgxCommonIface)
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", time.Duration(0), time.Duration(0)).
|
||||
WithArgs(cachePurpose.String(), time.Duration(0), time.Duration(0)).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test", time.Minute, time.Second).
|
||||
WithArgs(cachePurpose.String(), time.Minute, time.Second).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@ -445,32 +449,32 @@ func Test_pgCache_Truncate(t *testing.T) {
|
||||
queryExpect := regexp.QuoteMeta(truncateQuery)
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.CacheConfig
|
||||
config cache.Config
|
||||
expect func(pgxmock.PgxCommonIface)
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: 0,
|
||||
LastUseAge: 0,
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test").
|
||||
WithArgs(cachePurpose.String()).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.CacheConfig{
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs("test").
|
||||
WithArgs(cachePurpose.String()).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@ -491,18 +495,18 @@ func Test_pgCache_Truncate(t *testing.T) {
|
||||
}
|
||||
|
||||
const (
|
||||
cacheName = "test"
|
||||
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_test
|
||||
cachePurpose = cache.PurposeAuthzInstance
|
||||
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_authz_instance
|
||||
partition of cache.objects
|
||||
for values in ('test');
|
||||
for values in ('authz_instance');
|
||||
|
||||
create unlogged table if not exists cache.string_keys_test
|
||||
create unlogged table if not exists cache.string_keys_authz_instance
|
||||
partition of cache.string_keys
|
||||
for values in ('test');
|
||||
for values in ('authz_instance');
|
||||
`
|
||||
)
|
||||
|
||||
func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
|
||||
func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
|
||||
conf.Log = &logging.Config{
|
||||
Level: "debug",
|
||||
AddSource: true,
|
||||
@ -512,8 +516,11 @@ func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testI
|
||||
|
||||
pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
|
||||
WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
|
||||
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
|
||||
connector := &Connector{
|
||||
PGXPool: pool,
|
||||
Dialect: "postgres",
|
||||
}
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
||||
require.NoError(t, err)
|
||||
return c, pool
|
||||
}
|
10
internal/cache/connector/redis/_remove.lua
vendored
Normal file
10
internal/cache/connector/redis/_remove.lua
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
local function remove(object_id)
|
||||
local setKey = keySetKey(object_id)
|
||||
local keys = redis.call("SMEMBERS", setKey)
|
||||
local n = #keys
|
||||
for i = 1, n do
|
||||
redis.call("DEL", keys[i])
|
||||
end
|
||||
redis.call("DEL", setKey)
|
||||
redis.call("DEL", object_id)
|
||||
end
|
3
internal/cache/connector/redis/_select.lua
vendored
Normal file
3
internal/cache/connector/redis/_select.lua
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
-- SELECT ensures the DB namespace for each script.
|
||||
-- When used, it consumes the first ARGV entry.
|
||||
redis.call("SELECT", ARGV[1])
|
17
internal/cache/connector/redis/_util.lua
vendored
Normal file
17
internal/cache/connector/redis/_util.lua
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
-- keySetKey returns the redis key of the set containing all keys to the object.
|
||||
local function keySetKey (object_id)
|
||||
return object_id .. "-keys"
|
||||
end
|
||||
|
||||
local function getTime()
|
||||
return tonumber(redis.call('TIME')[1])
|
||||
end
|
||||
|
||||
-- getCall wrapts redis.call so a nil is returned instead of false.
|
||||
local function getCall (...)
|
||||
local result = redis.call(...)
|
||||
if result == false then
|
||||
return nil
|
||||
end
|
||||
return result
|
||||
end
|
90
internal/cache/connector/redis/circuit_breaker.go
vendored
Normal file
90
internal/cache/connector/redis/circuit_breaker.go
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/sony/gobreaker/v2"
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
const defaultInflightSize = 100000
|
||||
|
||||
type CBConfig struct {
|
||||
// Interval when the counters are reset to 0.
|
||||
// 0 interval never resets the counters until the CB is opened.
|
||||
Interval time.Duration
|
||||
// Amount of consecutive failures permitted
|
||||
MaxConsecutiveFailures uint32
|
||||
// The ratio of failed requests out of total requests
|
||||
MaxFailureRatio float64
|
||||
// Timeout after opening of the CB, until the state is set to half-open.
|
||||
Timeout time.Duration
|
||||
// The allowed amount of requests that are allowed to pass when the CB is half-open.
|
||||
MaxRetryRequests uint32
|
||||
}
|
||||
|
||||
func (config *CBConfig) readyToTrip(counts gobreaker.Counts) bool {
|
||||
if config.MaxConsecutiveFailures > 0 && counts.ConsecutiveFailures > config.MaxConsecutiveFailures {
|
||||
return true
|
||||
}
|
||||
if config.MaxFailureRatio > 0 && counts.Requests > 0 {
|
||||
failureRatio := float64(counts.TotalFailures) / float64(counts.Requests)
|
||||
return failureRatio > config.MaxFailureRatio
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// limiter implements [redis.Limiter] as a circuit breaker.
|
||||
type limiter struct {
|
||||
inflight chan func(success bool)
|
||||
cb *gobreaker.TwoStepCircuitBreaker[struct{}]
|
||||
}
|
||||
|
||||
func newLimiter(config *CBConfig, maxActiveConns int) redis.Limiter {
|
||||
if config == nil {
|
||||
return nil
|
||||
}
|
||||
// The size of the inflight channel needs to be big enough for maxActiveConns to prevent blocking.
|
||||
// When that is 0 (no limit), we must set a sane default.
|
||||
if maxActiveConns <= 0 {
|
||||
maxActiveConns = defaultInflightSize
|
||||
}
|
||||
return &limiter{
|
||||
inflight: make(chan func(success bool), maxActiveConns),
|
||||
cb: gobreaker.NewTwoStepCircuitBreaker[struct{}](gobreaker.Settings{
|
||||
Name: "redis cache",
|
||||
MaxRequests: config.MaxRetryRequests,
|
||||
Interval: config.Interval,
|
||||
Timeout: config.Timeout,
|
||||
ReadyToTrip: config.readyToTrip,
|
||||
OnStateChange: func(name string, from, to gobreaker.State) {
|
||||
logging.WithFields("name", name, "from", from, "to", to).Warn("circuit breaker state change")
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Allow implements [redis.Limiter].
|
||||
func (l *limiter) Allow() error {
|
||||
done, err := l.cb.Allow()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.inflight <- done
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReportResult implements [redis.Limiter].
|
||||
//
|
||||
// ReportResult checks the error returned by the Redis client.
|
||||
// `nil`, [redis.Nil] and [context.Canceled] are not considered failures.
|
||||
// Any other error, like connection or [context.DeadlineExceeded] is counted as a failure.
|
||||
func (l *limiter) ReportResult(err error) {
|
||||
done := <-l.inflight
|
||||
done(err == nil ||
|
||||
errors.Is(err, redis.Nil) ||
|
||||
errors.Is(err, context.Canceled))
|
||||
}
|
168
internal/cache/connector/redis/circuit_breaker_test.go
vendored
Normal file
168
internal/cache/connector/redis/circuit_breaker_test.go
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sony/gobreaker/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
)
|
||||
|
||||
func TestCBConfig_readyToTrip(t *testing.T) {
|
||||
type fields struct {
|
||||
MaxConsecutiveFailures uint32
|
||||
MaxFailureRatio float64
|
||||
}
|
||||
type args struct {
|
||||
counts gobreaker.Counts
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "disabled",
|
||||
fields: fields{},
|
||||
args: args{
|
||||
counts: gobreaker.Counts{
|
||||
Requests: 100,
|
||||
ConsecutiveFailures: 5,
|
||||
TotalFailures: 10,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "no failures",
|
||||
fields: fields{
|
||||
MaxConsecutiveFailures: 5,
|
||||
MaxFailureRatio: 0.1,
|
||||
},
|
||||
args: args{
|
||||
counts: gobreaker.Counts{
|
||||
Requests: 100,
|
||||
ConsecutiveFailures: 0,
|
||||
TotalFailures: 0,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "some failures",
|
||||
fields: fields{
|
||||
MaxConsecutiveFailures: 5,
|
||||
MaxFailureRatio: 0.1,
|
||||
},
|
||||
args: args{
|
||||
counts: gobreaker.Counts{
|
||||
Requests: 100,
|
||||
ConsecutiveFailures: 5,
|
||||
TotalFailures: 10,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "consecutive exceeded",
|
||||
fields: fields{
|
||||
MaxConsecutiveFailures: 5,
|
||||
MaxFailureRatio: 0.1,
|
||||
},
|
||||
args: args{
|
||||
counts: gobreaker.Counts{
|
||||
Requests: 100,
|
||||
ConsecutiveFailures: 6,
|
||||
TotalFailures: 0,
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "ratio exceeded",
|
||||
fields: fields{
|
||||
MaxConsecutiveFailures: 5,
|
||||
MaxFailureRatio: 0.1,
|
||||
},
|
||||
args: args{
|
||||
counts: gobreaker.Counts{
|
||||
Requests: 100,
|
||||
ConsecutiveFailures: 1,
|
||||
TotalFailures: 11,
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := &CBConfig{
|
||||
MaxConsecutiveFailures: tt.fields.MaxConsecutiveFailures,
|
||||
MaxFailureRatio: tt.fields.MaxFailureRatio,
|
||||
}
|
||||
if got := config.readyToTrip(tt.args.counts); got != tt.want {
|
||||
t.Errorf("CBConfig.readyToTrip() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_limiter(t *testing.T) {
|
||||
c, _ := prepareCache(t, cache.Config{}, withCircuitBreakerOption(
|
||||
&CBConfig{
|
||||
MaxConsecutiveFailures: 2,
|
||||
MaxFailureRatio: 0.4,
|
||||
Timeout: 100 * time.Millisecond,
|
||||
MaxRetryRequests: 1,
|
||||
},
|
||||
))
|
||||
|
||||
ctx := context.Background()
|
||||
canceledCtx, cancel := context.WithCancel(ctx)
|
||||
cancel()
|
||||
timedOutCtx, cancel := context.WithTimeout(ctx, -1)
|
||||
defer cancel()
|
||||
|
||||
// CB is and should remain closed
|
||||
for i := 0; i < 10; i++ {
|
||||
err := c.Truncate(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
err := c.Truncate(canceledCtx)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
}
|
||||
|
||||
// Timeout err should open the CB after more than 2 failures
|
||||
for i := 0; i < 3; i++ {
|
||||
err := c.Truncate(timedOutCtx)
|
||||
if i > 2 {
|
||||
require.ErrorIs(t, err, gobreaker.ErrOpenState)
|
||||
} else {
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// CB should be half-open. If the first command fails, the CB will be Open again
|
||||
err := c.Truncate(timedOutCtx)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
err = c.Truncate(timedOutCtx)
|
||||
require.ErrorIs(t, err, gobreaker.ErrOpenState)
|
||||
|
||||
// Reset the DB to closed
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
err = c.Truncate(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exceed the ratio
|
||||
err = c.Truncate(timedOutCtx)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
err = c.Truncate(ctx)
|
||||
require.ErrorIs(t, err, gobreaker.ErrOpenState)
|
||||
}
|
157
internal/cache/connector/redis/connector.go
vendored
Normal file
157
internal/cache/connector/redis/connector.go
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Enabled bool
|
||||
|
||||
// The network type, either tcp or unix.
|
||||
// Default is tcp.
|
||||
Network string
|
||||
// host:port address.
|
||||
Addr string
|
||||
// ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
|
||||
ClientName string
|
||||
// Use the specified Username to authenticate the current connection
|
||||
// with one of the connections defined in the ACL list when connecting
|
||||
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
|
||||
Username string
|
||||
// Optional password. Must match the password specified in the
|
||||
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
|
||||
// or the User Password when connecting to a Redis 6.0 instance, or greater,
|
||||
// that is using the Redis ACL system.
|
||||
Password string
|
||||
// Each ZITADEL cache uses an incremental DB namespace.
|
||||
// This option offsets the first DB so it doesn't conflict with other databases on the same server.
|
||||
// Note that ZITADEL uses FLUSHDB command to truncate a cache.
|
||||
// This can have destructive consequences when overlapping DB namespaces are used.
|
||||
DBOffset int
|
||||
|
||||
// Maximum number of retries before giving up.
|
||||
// Default is 3 retries; -1 (not 0) disables retries.
|
||||
MaxRetries int
|
||||
// Minimum backoff between each retry.
|
||||
// Default is 8 milliseconds; -1 disables backoff.
|
||||
MinRetryBackoff time.Duration
|
||||
// Maximum backoff between each retry.
|
||||
// Default is 512 milliseconds; -1 disables backoff.
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
// Dial timeout for establishing new connections.
|
||||
// Default is 5 seconds.
|
||||
DialTimeout time.Duration
|
||||
// Timeout for socket reads. If reached, commands will fail
|
||||
// with a timeout instead of blocking. Supported values:
|
||||
// - `0` - default timeout (3 seconds).
|
||||
// - `-1` - no timeout (block indefinitely).
|
||||
// - `-2` - disables SetReadDeadline calls completely.
|
||||
ReadTimeout time.Duration
|
||||
// Timeout for socket writes. If reached, commands will fail
|
||||
// with a timeout instead of blocking. Supported values:
|
||||
// - `0` - default timeout (3 seconds).
|
||||
// - `-1` - no timeout (block indefinitely).
|
||||
// - `-2` - disables SetWriteDeadline calls completely.
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// Type of connection pool.
|
||||
// true for FIFO pool, false for LIFO pool.
|
||||
// Note that FIFO has slightly higher overhead compared to LIFO,
|
||||
// but it helps closing idle connections faster reducing the pool size.
|
||||
PoolFIFO bool
|
||||
// Base number of socket connections.
|
||||
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
|
||||
// If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
|
||||
// you can limit it through MaxActiveConns
|
||||
PoolSize int
|
||||
// Amount of time client waits for connection if all connections
|
||||
// are busy before returning an error.
|
||||
// Default is ReadTimeout + 1 second.
|
||||
PoolTimeout time.Duration
|
||||
// Minimum number of idle connections which is useful when establishing
|
||||
// new connection is slow.
|
||||
// Default is 0. the idle connections are not closed by default.
|
||||
MinIdleConns int
|
||||
// Maximum number of idle connections.
|
||||
// Default is 0. the idle connections are not closed by default.
|
||||
MaxIdleConns int
|
||||
// Maximum number of connections allocated by the pool at a given time.
|
||||
// When zero, there is no limit on the number of connections in the pool.
|
||||
MaxActiveConns int
|
||||
// ConnMaxIdleTime is the maximum amount of time a connection may be idle.
|
||||
// Should be less than server's timeout.
|
||||
//
|
||||
// Expired connections may be closed lazily before reuse.
|
||||
// If d <= 0, connections are not closed due to a connection's idle time.
|
||||
//
|
||||
// Default is 30 minutes. -1 disables idle timeout check.
|
||||
ConnMaxIdleTime time.Duration
|
||||
// ConnMaxLifetime is the maximum amount of time a connection may be reused.
|
||||
//
|
||||
// Expired connections may be closed lazily before reuse.
|
||||
// If <= 0, connections are not closed due to a connection's age.
|
||||
//
|
||||
// Default is to not close idle connections.
|
||||
ConnMaxLifetime time.Duration
|
||||
|
||||
EnableTLS bool
|
||||
|
||||
// Disable set-lib on connect. Default is false.
|
||||
DisableIndentity bool
|
||||
|
||||
// Add suffix to client name. Default is empty.
|
||||
IdentitySuffix string
|
||||
|
||||
CircuitBreaker *CBConfig
|
||||
}
|
||||
|
||||
type Connector struct {
|
||||
*redis.Client
|
||||
Config Config
|
||||
}
|
||||
|
||||
func NewConnector(config Config) *Connector {
|
||||
if !config.Enabled {
|
||||
return nil
|
||||
}
|
||||
return &Connector{
|
||||
Client: redis.NewClient(optionsFromConfig(config)),
|
||||
Config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func optionsFromConfig(c Config) *redis.Options {
|
||||
opts := &redis.Options{
|
||||
Network: c.Network,
|
||||
Addr: c.Addr,
|
||||
ClientName: c.ClientName,
|
||||
Protocol: 3,
|
||||
Username: c.Username,
|
||||
Password: c.Password,
|
||||
MaxRetries: c.MaxRetries,
|
||||
MinRetryBackoff: c.MinRetryBackoff,
|
||||
MaxRetryBackoff: c.MaxRetryBackoff,
|
||||
DialTimeout: c.DialTimeout,
|
||||
ReadTimeout: c.ReadTimeout,
|
||||
WriteTimeout: c.WriteTimeout,
|
||||
ContextTimeoutEnabled: true,
|
||||
PoolFIFO: c.PoolFIFO,
|
||||
PoolTimeout: c.PoolTimeout,
|
||||
MinIdleConns: c.MinIdleConns,
|
||||
MaxIdleConns: c.MaxIdleConns,
|
||||
MaxActiveConns: c.MaxActiveConns,
|
||||
ConnMaxIdleTime: c.ConnMaxIdleTime,
|
||||
ConnMaxLifetime: c.ConnMaxLifetime,
|
||||
DisableIndentity: c.DisableIndentity,
|
||||
IdentitySuffix: c.IdentitySuffix,
|
||||
Limiter: newLimiter(c.CircuitBreaker, c.MaxActiveConns),
|
||||
}
|
||||
if c.EnableTLS {
|
||||
opts.TLSConfig = new(tls.Config)
|
||||
}
|
||||
return opts
|
||||
}
|
29
internal/cache/connector/redis/get.lua
vendored
Normal file
29
internal/cache/connector/redis/get.lua
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
local result = redis.call("GET", KEYS[1])
|
||||
if result == false then
|
||||
return nil
|
||||
end
|
||||
local object_id = tostring(result)
|
||||
|
||||
local object = getCall("HGET", object_id, "object")
|
||||
if object == nil then
|
||||
-- object expired, but there are keys that need to be cleaned up
|
||||
remove(object_id)
|
||||
return nil
|
||||
end
|
||||
|
||||
-- max-age must be checked manually
|
||||
local expiry = getCall("HGET", object_id, "expiry")
|
||||
if not (expiry == nil) and expiry > 0 then
|
||||
if getTime() > expiry then
|
||||
remove(object_id)
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
local usage_lifetime = getCall("HGET", object_id, "usage_lifetime")
|
||||
-- reset usage based TTL
|
||||
if not (usage_lifetime == nil) and tonumber(usage_lifetime) > 0 then
|
||||
redis.call('EXPIRE', object_id, usage_lifetime)
|
||||
end
|
||||
|
||||
return object
|
9
internal/cache/connector/redis/invalidate.lua
vendored
Normal file
9
internal/cache/connector/redis/invalidate.lua
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
local n = #KEYS
|
||||
for i = 1, n do
|
||||
local result = redis.call("GET", KEYS[i])
|
||||
if result == false then
|
||||
return nil
|
||||
end
|
||||
local object_id = tostring(result)
|
||||
remove(object_id)
|
||||
end
|
172
internal/cache/connector/redis/redis.go
vendored
Normal file
172
internal/cache/connector/redis/redis.go
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed _select.lua
|
||||
selectComponent string
|
||||
//go:embed _util.lua
|
||||
utilComponent string
|
||||
//go:embed _remove.lua
|
||||
removeComponent string
|
||||
//go:embed set.lua
|
||||
setScript string
|
||||
//go:embed get.lua
|
||||
getScript string
|
||||
//go:embed invalidate.lua
|
||||
invalidateScript string
|
||||
|
||||
// Don't mind the creative "import"
|
||||
setParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, setScript}, "\n"))
|
||||
getParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, removeComponent, getScript}, "\n"))
|
||||
invalidateParsed = redis.NewScript(strings.Join([]string{selectComponent, utilComponent, removeComponent, invalidateScript}, "\n"))
|
||||
)
|
||||
|
||||
type redisCache[I, K comparable, V cache.Entry[I, K]] struct {
|
||||
db int
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a cache that stores and retrieves object using single Redis.
|
||||
func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, client *Connector, db int, indices []I) cache.Cache[I, K, V] {
|
||||
return &redisCache[I, K, V]{
|
||||
config: &config,
|
||||
db: db,
|
||||
indices: indices,
|
||||
connector: client,
|
||||
logger: config.Log.Slog(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) Set(ctx context.Context, value V) {
|
||||
if _, err := c.set(ctx, value); err != nil {
|
||||
c.logger.ErrorContext(ctx, "redis cache set", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) set(ctx context.Context, value V) (objectID string, err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
// Internal ID used for the object
|
||||
objectID = uuid.NewString()
|
||||
keys := []string{objectID}
|
||||
// flatten the secondary keys
|
||||
for _, index := range c.indices {
|
||||
keys = append(keys, c.redisIndexKeys(index, value.Keys(index)...)...)
|
||||
}
|
||||
var buf strings.Builder
|
||||
err = json.NewEncoder(&buf).Encode(value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = setParsed.Run(ctx, c.connector, keys,
|
||||
c.db, // DB namespace
|
||||
buf.String(), // object
|
||||
int64(c.config.LastUseAge/time.Second), // usage_lifetime
|
||||
int64(c.config.MaxAge/time.Second), // max_age,
|
||||
).Err()
|
||||
// redis.Nil is always returned because the script doesn't have a return value.
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return "", err
|
||||
}
|
||||
return objectID, nil
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) Get(ctx context.Context, index I, key K) (value V, ok bool) {
|
||||
var (
|
||||
obj any
|
||||
err error
|
||||
)
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
err = nil
|
||||
}
|
||||
span.EndWithError(err)
|
||||
}()
|
||||
|
||||
logger := c.logger.With("index", index, "key", key)
|
||||
obj, err = getParsed.Run(ctx, c.connector, c.redisIndexKeys(index, key), c.db).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
logger.ErrorContext(ctx, "redis cache get", "err", err)
|
||||
return value, false
|
||||
}
|
||||
data, ok := obj.(string)
|
||||
if !ok {
|
||||
logger.With("err", err).InfoContext(ctx, "redis cache miss")
|
||||
return value, false
|
||||
}
|
||||
err = json.NewDecoder(strings.NewReader(data)).Decode(&value)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "redis cache get", "err", fmt.Errorf("decode: %w", err))
|
||||
return value, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) Invalidate(ctx context.Context, index I, key ...K) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = invalidateParsed.Run(ctx, c.connector, c.redisIndexKeys(index, key...), c.db).Err()
|
||||
// redis.Nil is always returned because the script doesn't have a return value.
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) Delete(ctx context.Context, index I, key ...K) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
pipe := c.connector.Pipeline()
|
||||
pipe.Select(ctx, c.db)
|
||||
pipe.Del(ctx, c.redisIndexKeys(index, key...)...)
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
pipe := c.connector.Pipeline()
|
||||
pipe.Select(ctx, c.db)
|
||||
pipe.FlushDB(ctx)
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *redisCache[I, K, V]) redisIndexKeys(index I, keys ...K) []string {
|
||||
out := make([]string, len(keys))
|
||||
for i, k := range keys {
|
||||
out[i] = fmt.Sprintf("%v:%v", index, k)
|
||||
}
|
||||
return out
|
||||
}
|
721
internal/cache/connector/redis/redis_test.go
vendored
Normal file
721
internal/cache/connector/redis/redis_test.go
vendored
Normal file
@ -0,0 +1,721 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
)
|
||||
|
||||
type testIndex int
|
||||
|
||||
const (
|
||||
testIndexID testIndex = iota
|
||||
testIndexName
|
||||
)
|
||||
|
||||
const (
|
||||
testDB = 99
|
||||
)
|
||||
|
||||
var testIndices = []testIndex{
|
||||
testIndexID,
|
||||
testIndexName,
|
||||
}
|
||||
|
||||
type testObject struct {
|
||||
ID string
|
||||
Name []string
|
||||
}
|
||||
|
||||
func (o *testObject) Keys(index testIndex) []string {
|
||||
switch index {
|
||||
case testIndexID:
|
||||
return []string{o.ID}
|
||||
case testIndexName:
|
||||
return o.Name
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_set(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
value *testObject
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.Config
|
||||
args args
|
||||
assertions func(t *testing.T, s *miniredis.Miniredis, objectID string)
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.Config{},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
value: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with last use TTL",
|
||||
config: cache.Config{
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
value: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
|
||||
s.FastForward(2 * time.Second)
|
||||
v, err := s.Get(objectID)
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, v)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with last use TTL and max age",
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
LastUseAge: time.Second,
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
value: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
assert.NotEmpty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
|
||||
s.FastForward(2 * time.Second)
|
||||
v, err := s.Get(objectID)
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, v)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with max age TTL",
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
value: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
|
||||
s.FastForward(2 * time.Minute)
|
||||
v, err := s.Get(objectID)
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, v)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, server := prepareCache(t, tt.config)
|
||||
rc := c.(*redisCache[testIndex, string, *testObject])
|
||||
objectID, err := rc.set(tt.args.ctx, tt.args.value)
|
||||
require.ErrorIs(t, err, tt.wantErr)
|
||||
t.Log(rc.connector.HGetAll(context.Background(), objectID))
|
||||
tt.assertions(t, server, objectID)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_Get(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
index testIndex
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.Config
|
||||
preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis)
|
||||
args args
|
||||
want *testObject
|
||||
wantOK bool
|
||||
}{
|
||||
{
|
||||
name: "connection error",
|
||||
config: cache.Config{},
|
||||
preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
s.RequireAuth("foobar")
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: "foo",
|
||||
},
|
||||
wantOK: false,
|
||||
},
|
||||
{
|
||||
name: "get by ID",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: "one",
|
||||
},
|
||||
want: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
wantOK: true,
|
||||
},
|
||||
{
|
||||
name: "get by name",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: "foo",
|
||||
},
|
||||
want: &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
},
|
||||
wantOK: true,
|
||||
},
|
||||
{
|
||||
name: "usage timeout",
|
||||
config: cache.Config{
|
||||
LastUseAge: time.Minute,
|
||||
},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
_, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
require.True(t, ok)
|
||||
s.FastForward(2 * time.Minute)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: "foo",
|
||||
},
|
||||
want: nil,
|
||||
wantOK: false,
|
||||
},
|
||||
{
|
||||
name: "max age timeout",
|
||||
config: cache.Config{
|
||||
MaxAge: time.Minute,
|
||||
},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
_, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
require.True(t, ok)
|
||||
s.FastForward(2 * time.Minute)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: "foo",
|
||||
},
|
||||
want: nil,
|
||||
wantOK: false,
|
||||
},
|
||||
{
|
||||
name: "not found",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: "spanac",
|
||||
},
|
||||
wantOK: false,
|
||||
},
|
||||
{
|
||||
name: "json decode error",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
objectID, err := s.Get(c.(*redisCache[testIndex, string, *testObject]).redisIndexKeys(testIndexID, "one")[0])
|
||||
require.NoError(t, err)
|
||||
s.HSet(objectID, "object", "~~~")
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: "one",
|
||||
},
|
||||
wantOK: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, server := prepareCache(t, tt.config)
|
||||
tt.preparation(t, c, server)
|
||||
t.Log(server.Keys())
|
||||
|
||||
got, ok := c.Get(tt.args.ctx, tt.args.index, tt.args.key)
|
||||
require.Equal(t, tt.wantOK, ok)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_Invalidate(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
index testIndex
|
||||
key []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.Config
|
||||
preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis)
|
||||
assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject])
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "connection error",
|
||||
config: cache.Config{},
|
||||
preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
s.RequireAuth("foobar")
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"foo"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no keys, noop",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: []string{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalidate by ID",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: []string{"one"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalidate by name",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"foo"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalidate after timeout",
|
||||
config: cache.Config{
|
||||
LastUseAge: time.Minute,
|
||||
},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
_, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
require.True(t, ok)
|
||||
s.FastForward(2 * time.Minute)
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"foo"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not found",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"spanac"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, server := prepareCache(t, tt.config)
|
||||
tt.preparation(t, c, server)
|
||||
t.Log(server.Keys())
|
||||
|
||||
err := c.Invalidate(tt.args.ctx, tt.args.index, tt.args.key...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_Delete(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
index testIndex
|
||||
key []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.Config
|
||||
preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis)
|
||||
assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject])
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "connection error",
|
||||
config: cache.Config{},
|
||||
preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
s.RequireAuth("foobar")
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"foo"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no keys, noop",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: []string{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "delete ID",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
// Get be name should still work
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexID,
|
||||
key: []string{"one"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "delete name",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
// get by ID should still work
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"foo"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not found",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "foo")
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
index: testIndexName,
|
||||
key: []string{"spanac"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, server := prepareCache(t, tt.config)
|
||||
tt.preparation(t, c, server)
|
||||
t.Log(server.Keys())
|
||||
|
||||
err := c.Delete(tt.args.ctx, tt.args.index, tt.args.key...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_redisCache_Truncate(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config cache.Config
|
||||
preparation func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis)
|
||||
assertions func(t *testing.T, c cache.Cache[testIndex, string, *testObject])
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "connection error",
|
||||
config: cache.Config{},
|
||||
preparation: func(_ *testing.T, _ cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
s.RequireAuth("foobar")
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
config: cache.Config{},
|
||||
preparation: func(t *testing.T, c cache.Cache[testIndex, string, *testObject], s *miniredis.Miniredis) {
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "one",
|
||||
Name: []string{"foo", "bar"},
|
||||
})
|
||||
c.Set(context.Background(), &testObject{
|
||||
ID: "two",
|
||||
Name: []string{"Hello", "World"},
|
||||
})
|
||||
},
|
||||
assertions: func(t *testing.T, c cache.Cache[testIndex, string, *testObject]) {
|
||||
obj, ok := c.Get(context.Background(), testIndexID, "one")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
obj, ok = c.Get(context.Background(), testIndexName, "World")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, obj)
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, server := prepareCache(t, tt.config)
|
||||
tt.preparation(t, c, server)
|
||||
t.Log(server.Keys())
|
||||
|
||||
err := c.Truncate(tt.args.ctx)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func prepareCache(t *testing.T, conf cache.Config, options ...func(*Config)) (cache.Cache[testIndex, string, *testObject], *miniredis.Miniredis) {
|
||||
conf.Log = &logging.Config{
|
||||
Level: "debug",
|
||||
AddSource: true,
|
||||
}
|
||||
server := miniredis.RunT(t)
|
||||
server.Select(testDB)
|
||||
|
||||
connConfig := Config{
|
||||
Enabled: true,
|
||||
Network: "tcp",
|
||||
Addr: server.Addr(),
|
||||
DisableIndentity: true,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&connConfig)
|
||||
}
|
||||
connector := NewConnector(connConfig)
|
||||
t.Cleanup(func() {
|
||||
connector.Close()
|
||||
server.Close()
|
||||
})
|
||||
c := NewCache[testIndex, string, *testObject](conf, connector, testDB, testIndices)
|
||||
return c, server
|
||||
}
|
||||
|
||||
func withCircuitBreakerOption(cb *CBConfig) func(*Config) {
|
||||
return func(c *Config) {
|
||||
c.CircuitBreaker = cb
|
||||
}
|
||||
}
|
27
internal/cache/connector/redis/set.lua
vendored
Normal file
27
internal/cache/connector/redis/set.lua
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
-- KEYS: [1]: object_id; [>1]: index keys.
|
||||
local object_id = KEYS[1]
|
||||
local object = ARGV[2]
|
||||
local usage_lifetime = tonumber(ARGV[3]) -- usage based lifetime in seconds
|
||||
local max_age = tonumber(ARGV[4]) -- max age liftime in seconds
|
||||
|
||||
redis.call("HSET", object_id,"object", object)
|
||||
if usage_lifetime > 0 then
|
||||
redis.call("HSET", object_id, "usage_lifetime", usage_lifetime)
|
||||
-- enable usage based TTL
|
||||
redis.call("EXPIRE", object_id, usage_lifetime)
|
||||
if max_age > 0 then
|
||||
-- set max_age to hash map for expired remove on Get
|
||||
local expiry = getTime() + max_age
|
||||
redis.call("HSET", object_id, "expiry", expiry)
|
||||
end
|
||||
elseif max_age > 0 then
|
||||
-- enable max_age based TTL
|
||||
redis.call("EXPIRE", object_id, max_age)
|
||||
end
|
||||
|
||||
local n = #KEYS
|
||||
local setKey = keySetKey(object_id)
|
||||
for i = 2, n do -- offset to the second element to skip object_id
|
||||
redis.call("SADD", setKey, KEYS[i]) -- set of all keys used for housekeeping
|
||||
redis.call("SET", KEYS[i], object_id) -- key to object_id mapping
|
||||
end
|
98
internal/cache/connector_enumer.go
vendored
Normal file
98
internal/cache/connector_enumer.go
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Code generated by "enumer -type Connector -transform snake -trimprefix Connector -linecomment -text"; DO NOT EDIT.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const _ConnectorName = "memorypostgresredis"
|
||||
|
||||
var _ConnectorIndex = [...]uint8{0, 0, 6, 14, 19}
|
||||
|
||||
const _ConnectorLowerName = "memorypostgresredis"
|
||||
|
||||
func (i Connector) String() string {
|
||||
if i < 0 || i >= Connector(len(_ConnectorIndex)-1) {
|
||||
return fmt.Sprintf("Connector(%d)", i)
|
||||
}
|
||||
return _ConnectorName[_ConnectorIndex[i]:_ConnectorIndex[i+1]]
|
||||
}
|
||||
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
func _ConnectorNoOp() {
|
||||
var x [1]struct{}
|
||||
_ = x[ConnectorUnspecified-(0)]
|
||||
_ = x[ConnectorMemory-(1)]
|
||||
_ = x[ConnectorPostgres-(2)]
|
||||
_ = x[ConnectorRedis-(3)]
|
||||
}
|
||||
|
||||
var _ConnectorValues = []Connector{ConnectorUnspecified, ConnectorMemory, ConnectorPostgres, ConnectorRedis}
|
||||
|
||||
var _ConnectorNameToValueMap = map[string]Connector{
|
||||
_ConnectorName[0:0]: ConnectorUnspecified,
|
||||
_ConnectorLowerName[0:0]: ConnectorUnspecified,
|
||||
_ConnectorName[0:6]: ConnectorMemory,
|
||||
_ConnectorLowerName[0:6]: ConnectorMemory,
|
||||
_ConnectorName[6:14]: ConnectorPostgres,
|
||||
_ConnectorLowerName[6:14]: ConnectorPostgres,
|
||||
_ConnectorName[14:19]: ConnectorRedis,
|
||||
_ConnectorLowerName[14:19]: ConnectorRedis,
|
||||
}
|
||||
|
||||
var _ConnectorNames = []string{
|
||||
_ConnectorName[0:0],
|
||||
_ConnectorName[0:6],
|
||||
_ConnectorName[6:14],
|
||||
_ConnectorName[14:19],
|
||||
}
|
||||
|
||||
// ConnectorString retrieves an enum value from the enum constants string name.
|
||||
// Throws an error if the param is not part of the enum.
|
||||
func ConnectorString(s string) (Connector, error) {
|
||||
if val, ok := _ConnectorNameToValueMap[s]; ok {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
if val, ok := _ConnectorNameToValueMap[strings.ToLower(s)]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return 0, fmt.Errorf("%s does not belong to Connector values", s)
|
||||
}
|
||||
|
||||
// ConnectorValues returns all values of the enum
|
||||
func ConnectorValues() []Connector {
|
||||
return _ConnectorValues
|
||||
}
|
||||
|
||||
// ConnectorStrings returns a slice of all String values of the enum
|
||||
func ConnectorStrings() []string {
|
||||
strs := make([]string, len(_ConnectorNames))
|
||||
copy(strs, _ConnectorNames)
|
||||
return strs
|
||||
}
|
||||
|
||||
// IsAConnector returns "true" if the value is listed in the enum definition. "false" otherwise
|
||||
func (i Connector) IsAConnector() bool {
|
||||
for _, v := range _ConnectorValues {
|
||||
if i == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface for Connector
|
||||
func (i Connector) MarshalText() ([]byte, error) {
|
||||
return []byte(i.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface for Connector
|
||||
func (i *Connector) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
*i, err = ConnectorString(string(text))
|
||||
return err
|
||||
}
|
14
internal/cache/pruner.go
vendored
14
internal/cache/pruner.go
vendored
@ -31,22 +31,22 @@ type AutoPruneConfig struct {
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, name string) (close func()) {
|
||||
return c.startAutoPrune(background, pruner, name, clockwork.NewRealClock())
|
||||
func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, purpose Purpose) (close func()) {
|
||||
return c.startAutoPrune(background, pruner, purpose, clockwork.NewRealClock())
|
||||
}
|
||||
|
||||
func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, name string, clock clockwork.Clock) (close func()) {
|
||||
func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, purpose Purpose, clock clockwork.Clock) (close func()) {
|
||||
if c.Interval <= 0 {
|
||||
return func() {}
|
||||
}
|
||||
background, cancel := context.WithCancel(background)
|
||||
// randomize the first interval
|
||||
timer := clock.NewTimer(time.Duration(rand.Int63n(int64(c.Interval))))
|
||||
go c.pruneTimer(background, pruner, name, timer)
|
||||
go c.pruneTimer(background, pruner, purpose, timer)
|
||||
return cancel
|
||||
}
|
||||
|
||||
func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, name string, timer clockwork.Timer) {
|
||||
func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, purpose Purpose, timer clockwork.Timer) {
|
||||
defer func() {
|
||||
if !timer.Stop() {
|
||||
<-timer.Chan()
|
||||
@ -58,9 +58,9 @@ func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner,
|
||||
case <-background.Done():
|
||||
return
|
||||
case <-timer.Chan():
|
||||
timer.Reset(c.Interval)
|
||||
err := c.doPrune(background, pruner)
|
||||
logging.OnError(err).WithField("name", name).Error("cache auto prune")
|
||||
logging.OnError(err).WithField("purpose", purpose).Error("cache auto prune")
|
||||
timer.Reset(c.Interval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
internal/cache/pruner_test.go
vendored
2
internal/cache/pruner_test.go
vendored
@ -30,7 +30,7 @@ func TestAutoPruneConfig_startAutoPrune(t *testing.T) {
|
||||
called: make(chan struct{}),
|
||||
}
|
||||
clock := clockwork.NewFakeClock()
|
||||
close := c.startAutoPrune(ctx, &pruner, "foo", clock)
|
||||
close := c.startAutoPrune(ctx, &pruner, PurposeAuthzInstance, clock)
|
||||
defer close()
|
||||
clock.Advance(time.Second)
|
||||
|
||||
|
82
internal/cache/purpose_enumer.go
vendored
Normal file
82
internal/cache/purpose_enumer.go
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Code generated by "enumer -type Purpose -transform snake -trimprefix Purpose"; DO NOT EDIT.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const _PurposeName = "unspecifiedauthz_instancemilestones"
|
||||
|
||||
var _PurposeIndex = [...]uint8{0, 11, 25, 35}
|
||||
|
||||
const _PurposeLowerName = "unspecifiedauthz_instancemilestones"
|
||||
|
||||
func (i Purpose) String() string {
|
||||
if i < 0 || i >= Purpose(len(_PurposeIndex)-1) {
|
||||
return fmt.Sprintf("Purpose(%d)", i)
|
||||
}
|
||||
return _PurposeName[_PurposeIndex[i]:_PurposeIndex[i+1]]
|
||||
}
|
||||
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
func _PurposeNoOp() {
|
||||
var x [1]struct{}
|
||||
_ = x[PurposeUnspecified-(0)]
|
||||
_ = x[PurposeAuthzInstance-(1)]
|
||||
_ = x[PurposeMilestones-(2)]
|
||||
}
|
||||
|
||||
var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones}
|
||||
|
||||
var _PurposeNameToValueMap = map[string]Purpose{
|
||||
_PurposeName[0:11]: PurposeUnspecified,
|
||||
_PurposeLowerName[0:11]: PurposeUnspecified,
|
||||
_PurposeName[11:25]: PurposeAuthzInstance,
|
||||
_PurposeLowerName[11:25]: PurposeAuthzInstance,
|
||||
_PurposeName[25:35]: PurposeMilestones,
|
||||
_PurposeLowerName[25:35]: PurposeMilestones,
|
||||
}
|
||||
|
||||
var _PurposeNames = []string{
|
||||
_PurposeName[0:11],
|
||||
_PurposeName[11:25],
|
||||
_PurposeName[25:35],
|
||||
}
|
||||
|
||||
// PurposeString retrieves an enum value from the enum constants string name.
|
||||
// Throws an error if the param is not part of the enum.
|
||||
func PurposeString(s string) (Purpose, error) {
|
||||
if val, ok := _PurposeNameToValueMap[s]; ok {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
if val, ok := _PurposeNameToValueMap[strings.ToLower(s)]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return 0, fmt.Errorf("%s does not belong to Purpose values", s)
|
||||
}
|
||||
|
||||
// PurposeValues returns all values of the enum
|
||||
func PurposeValues() []Purpose {
|
||||
return _PurposeValues
|
||||
}
|
||||
|
||||
// PurposeStrings returns a slice of all String values of the enum
|
||||
func PurposeStrings() []string {
|
||||
strs := make([]string, len(_PurposeNames))
|
||||
copy(strs, _PurposeNames)
|
||||
return strs
|
||||
}
|
||||
|
||||
// IsAPurpose returns "true" if the value is listed in the enum definition. "false" otherwise
|
||||
func (i Purpose) IsAPurpose() bool {
|
||||
for _, v := range _PurposeValues {
|
||||
if i == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@ -2,81 +2,20 @@ package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/gomap"
|
||||
"github.com/zitadel/zitadel/internal/cache/noop"
|
||||
"github.com/zitadel/zitadel/internal/cache/pg"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector"
|
||||
)
|
||||
|
||||
type Caches struct {
|
||||
connectors *cacheConnectors
|
||||
milestones cache.Cache[milestoneIndex, string, *MilestonesReached]
|
||||
}
|
||||
|
||||
func startCaches(background context.Context, conf *cache.CachesConfig, client *database.DB) (_ *Caches, err error) {
|
||||
caches := &Caches{
|
||||
milestones: noop.NewCache[milestoneIndex, string, *MilestonesReached](),
|
||||
}
|
||||
if conf == nil {
|
||||
return caches, nil
|
||||
}
|
||||
caches.connectors, err = startCacheConnectors(background, conf, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caches.milestones, err = startCache[milestoneIndex, string, *MilestonesReached](background, []milestoneIndex{milestoneIndexInstanceID}, "milestones", conf.Instance, caches.connectors)
|
||||
func startCaches(background context.Context, connectors connector.Connectors) (_ *Caches, err error) {
|
||||
caches := new(Caches)
|
||||
caches.milestones, err = connector.StartCache[milestoneIndex, string, *MilestonesReached](background, []milestoneIndex{milestoneIndexInstanceID}, cache.PurposeMilestones, connectors.Config.Milestones, connectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return caches, nil
|
||||
}
|
||||
|
||||
type cacheConnectors struct {
|
||||
memory *cache.AutoPruneConfig
|
||||
postgres *pgxPoolCacheConnector
|
||||
}
|
||||
|
||||
type pgxPoolCacheConnector struct {
|
||||
*cache.AutoPruneConfig
|
||||
client *database.DB
|
||||
}
|
||||
|
||||
func startCacheConnectors(_ context.Context, conf *cache.CachesConfig, client *database.DB) (_ *cacheConnectors, err error) {
|
||||
connectors := new(cacheConnectors)
|
||||
if conf.Connectors.Memory.Enabled {
|
||||
connectors.memory = &conf.Connectors.Memory.AutoPrune
|
||||
}
|
||||
if conf.Connectors.Postgres.Enabled {
|
||||
connectors.postgres = &pgxPoolCacheConnector{
|
||||
AutoPruneConfig: &conf.Connectors.Postgres.AutoPrune,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
return connectors, nil
|
||||
}
|
||||
|
||||
func startCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, name string, conf *cache.CacheConfig, connectors *cacheConnectors) (cache.Cache[I, K, V], error) {
|
||||
if conf == nil || conf.Connector == "" {
|
||||
return noop.NewCache[I, K, V](), nil
|
||||
}
|
||||
if strings.EqualFold(conf.Connector, "memory") && connectors.memory != nil {
|
||||
c := gomap.NewCache[I, K, V](background, indices, *conf)
|
||||
connectors.memory.StartAutoPrune(background, c, name)
|
||||
return c, nil
|
||||
}
|
||||
if strings.EqualFold(conf.Connector, "postgres") && connectors.postgres != nil {
|
||||
client := connectors.postgres.client
|
||||
c, err := pg.NewCache[I, K, V](background, name, *conf, indices, client.Pool, client.Type())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query start cache: %w", err)
|
||||
}
|
||||
connectors.postgres.StartAutoPrune(background, c, name)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user