2022-03-28 08:05:09 +00:00
|
|
|
package start
|
|
|
|
|
|
|
|
import (
|
2022-05-02 09:18:17 +00:00
|
|
|
"time"
|
|
|
|
|
2022-04-21 10:37:39 +00:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2022-03-29 09:53:19 +00:00
|
|
|
"github.com/spf13/viper"
|
2022-04-26 23:01:45 +00:00
|
|
|
"github.com/zitadel/logging"
|
2022-03-29 09:53:19 +00:00
|
|
|
|
2024-01-25 16:28:20 +00:00
|
|
|
"github.com/zitadel/zitadel/cmd/encryption"
|
2024-02-16 16:04:42 +00:00
|
|
|
"github.com/zitadel/zitadel/cmd/hooks"
|
2022-10-06 12:23:59 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/actions"
|
2022-04-26 23:01:45 +00:00
|
|
|
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
|
|
|
|
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
|
|
|
|
"github.com/zitadel/zitadel/internal/api/http/middleware"
|
|
|
|
"github.com/zitadel/zitadel/internal/api/oidc"
|
2022-12-09 13:04:33 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/api/saml"
|
2022-04-26 23:01:45 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/api/ui/console"
|
|
|
|
"github.com/zitadel/zitadel/internal/api/ui/login"
|
|
|
|
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
|
2024-11-04 10:44:51 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/cache/connector"
|
2022-04-28 12:44:13 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/command"
|
|
|
|
"github.com/zitadel/zitadel/internal/config/hook"
|
2022-06-24 12:38:22 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/config/network"
|
2022-04-26 23:01:45 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/config/systemdefaults"
|
|
|
|
"github.com/zitadel/zitadel/internal/database"
|
2023-10-25 15:10:45 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/domain"
|
2022-12-15 09:40:13 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/eventstore"
|
2022-09-07 08:35:12 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/id"
|
2023-02-15 01:52:11 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/logstore"
|
2023-07-06 06:38:13 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/notification/handlers"
|
2022-04-26 23:01:45 +00:00
|
|
|
"github.com/zitadel/zitadel/internal/query/projection"
|
|
|
|
static_config "github.com/zitadel/zitadel/internal/static/config"
|
2022-07-18 08:42:32 +00:00
|
|
|
metrics "github.com/zitadel/zitadel/internal/telemetry/metrics/config"
|
2024-08-16 13:26:53 +00:00
|
|
|
profiler "github.com/zitadel/zitadel/internal/telemetry/profiler/config"
|
2022-04-28 12:44:13 +00:00
|
|
|
tracing "github.com/zitadel/zitadel/internal/telemetry/tracing/config"
|
2022-03-28 08:05:09 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Config struct {
|
feat: trusted (instance) domains (#8369)
# Which Problems Are Solved
ZITADEL currently selects the instance context based on a HTTP header
(see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and
checks it against the list of instance domains. Let's call it instance
or API domain.
For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in
emails, ...) the requested domain (instance domain) will be used. Let's
call it the public domain.
In cases of proxied setups, all exposed domains (public domains) require
the domain to be managed as instance domain.
This can either be done using the "ExternalDomain" in the runtime config
or via system API, which requires a validation through CustomerPortal on
zitadel.cloud.
# How the Problems Are Solved
- Two new headers / header list are added:
- `InstanceHostHeaders`: an ordered list (first sent wins), which will
be used to match the instance.
(For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader`
and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked
afterwards as well)
- `PublicHostHeaders`: an ordered list (first sent wins), which will be
used as public host / domain. This will be checked against a list of
trusted domains on the instance.
- The middleware intercepts all requests to the API and passes a
`DomainCtx` object with the hosts and protocol into the context
(previously only a computed `origin` was passed)
- HTTP / GRPC server do not longer try to match the headers to instances
themself, but use the passed `http.DomainContext` in their interceptors.
- The `RequestedHost` and `RequestedDomain` from authz.Instance are
removed in favor of the `http.DomainContext`
- When authenticating to or signing out from Console UI, the current
`http.DomainContext(ctx).Origin` (already checked by instance
interceptor for validity) is used to compute and dynamically add a
`redirect_uri` and `post_logout_redirect_uri`.
- Gateway passes all configured host headers (previously only did
`x-zitadel-*`)
- Admin API allows to manage trusted domain
# Additional Changes
None
# Additional Context
- part of #8279
- open topics:
- "single-instance" mode
- Console UI
2024-07-31 15:00:38 +00:00
|
|
|
Log *logging.Config
|
|
|
|
Port uint16
|
|
|
|
ExternalPort uint16
|
|
|
|
ExternalDomain string
|
|
|
|
ExternalSecure bool
|
|
|
|
TLS network.TLS
|
|
|
|
InstanceHostHeaders []string
|
|
|
|
PublicHostHeaders []string
|
|
|
|
HTTP2HostHeader string
|
|
|
|
HTTP1HostHeader string
|
|
|
|
WebAuthNName string
|
|
|
|
Database database.Config
|
2024-11-04 10:44:51 +00:00
|
|
|
Caches *connector.CachesConfig
|
feat: trusted (instance) domains (#8369)
# Which Problems Are Solved
ZITADEL currently selects the instance context based on a HTTP header
(see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and
checks it against the list of instance domains. Let's call it instance
or API domain.
For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in
emails, ...) the requested domain (instance domain) will be used. Let's
call it the public domain.
In cases of proxied setups, all exposed domains (public domains) require
the domain to be managed as instance domain.
This can either be done using the "ExternalDomain" in the runtime config
or via system API, which requires a validation through CustomerPortal on
zitadel.cloud.
# How the Problems Are Solved
- Two new headers / header list are added:
- `InstanceHostHeaders`: an ordered list (first sent wins), which will
be used to match the instance.
(For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader`
and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked
afterwards as well)
- `PublicHostHeaders`: an ordered list (first sent wins), which will be
used as public host / domain. This will be checked against a list of
trusted domains on the instance.
- The middleware intercepts all requests to the API and passes a
`DomainCtx` object with the hosts and protocol into the context
(previously only a computed `origin` was passed)
- HTTP / GRPC server do not longer try to match the headers to instances
themself, but use the passed `http.DomainContext` in their interceptors.
- The `RequestedHost` and `RequestedDomain` from authz.Instance are
removed in favor of the `http.DomainContext`
- When authenticating to or signing out from Console UI, the current
`http.DomainContext(ctx).Origin` (already checked by instance
interceptor for validity) is used to compute and dynamically add a
`redirect_uri` and `post_logout_redirect_uri`.
- Gateway passes all configured host headers (previously only did
`x-zitadel-*`)
- Admin API allows to manage trusted domain
# Additional Changes
None
# Additional Context
- part of #8279
- open topics:
- "single-instance" mode
- Console UI
2024-07-31 15:00:38 +00:00
|
|
|
Tracing tracing.Config
|
|
|
|
Metrics metrics.Config
|
2024-08-16 13:26:53 +00:00
|
|
|
Profiler profiler.Config
|
feat: trusted (instance) domains (#8369)
# Which Problems Are Solved
ZITADEL currently selects the instance context based on a HTTP header
(see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and
checks it against the list of instance domains. Let's call it instance
or API domain.
For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in
emails, ...) the requested domain (instance domain) will be used. Let's
call it the public domain.
In cases of proxied setups, all exposed domains (public domains) require
the domain to be managed as instance domain.
This can either be done using the "ExternalDomain" in the runtime config
or via system API, which requires a validation through CustomerPortal on
zitadel.cloud.
# How the Problems Are Solved
- Two new headers / header list are added:
- `InstanceHostHeaders`: an ordered list (first sent wins), which will
be used to match the instance.
(For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader`
and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked
afterwards as well)
- `PublicHostHeaders`: an ordered list (first sent wins), which will be
used as public host / domain. This will be checked against a list of
trusted domains on the instance.
- The middleware intercepts all requests to the API and passes a
`DomainCtx` object with the hosts and protocol into the context
(previously only a computed `origin` was passed)
- HTTP / GRPC server do not longer try to match the headers to instances
themself, but use the passed `http.DomainContext` in their interceptors.
- The `RequestedHost` and `RequestedDomain` from authz.Instance are
removed in favor of the `http.DomainContext`
- When authenticating to or signing out from Console UI, the current
`http.DomainContext(ctx).Origin` (already checked by instance
interceptor for validity) is used to compute and dynamically add a
`redirect_uri` and `post_logout_redirect_uri`.
- Gateway passes all configured host headers (previously only did
`x-zitadel-*`)
- Admin API allows to manage trusted domain
# Additional Changes
None
# Additional Context
- part of #8279
- open topics:
- "single-instance" mode
- Console UI
2024-07-31 15:00:38 +00:00
|
|
|
Projections projection.Config
|
feat(notification): use event worker pool (#8962)
# Which Problems Are Solved
The current handling of notification follows the same pattern as all
other projections:
Created events are handled sequentially (based on "position") by a
handler. During the process, a lot of information is aggregated (user,
texts, templates, ...).
This leads to back pressure on the projection since the handling of
events might take longer than the time before a new event (to be
handled) is created.
# How the Problems Are Solved
- The current user notification handler creates separate notification
events based on the user / session events.
- These events contain all the present and required information
including the userID.
- These notification events get processed by notification workers, which
gather the necessary information (recipient address, texts, templates)
to send out these notifications.
- If a notification fails, a retry event is created based on the current
notification request including the current state of the user (this
prevents race conditions, where a user is changed in the meantime and
the notification already gets the new state).
- The retry event will be handled after a backoff delay. This delay
increases with every attempt.
- If the configured amount of attempts is reached or the message expired
(based on config), a cancel event is created, letting the workers know,
the notification must no longer be handled.
- In case of successful send, a sent event is created for the
notification aggregate and the existing "sent" events for the user /
session object is stored.
- The following is added to the defaults.yaml to allow configuration of
the notification workers:
```yaml
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
```
# Additional Changes
None
# Additional Context
- closes #8931
2024-11-27 15:01:17 +00:00
|
|
|
Notifications handlers.WorkerConfig
|
feat: trusted (instance) domains (#8369)
# Which Problems Are Solved
ZITADEL currently selects the instance context based on a HTTP header
(see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and
checks it against the list of instance domains. Let's call it instance
or API domain.
For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in
emails, ...) the requested domain (instance domain) will be used. Let's
call it the public domain.
In cases of proxied setups, all exposed domains (public domains) require
the domain to be managed as instance domain.
This can either be done using the "ExternalDomain" in the runtime config
or via system API, which requires a validation through CustomerPortal on
zitadel.cloud.
# How the Problems Are Solved
- Two new headers / header list are added:
- `InstanceHostHeaders`: an ordered list (first sent wins), which will
be used to match the instance.
(For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader`
and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked
afterwards as well)
- `PublicHostHeaders`: an ordered list (first sent wins), which will be
used as public host / domain. This will be checked against a list of
trusted domains on the instance.
- The middleware intercepts all requests to the API and passes a
`DomainCtx` object with the hosts and protocol into the context
(previously only a computed `origin` was passed)
- HTTP / GRPC server do not longer try to match the headers to instances
themself, but use the passed `http.DomainContext` in their interceptors.
- The `RequestedHost` and `RequestedDomain` from authz.Instance are
removed in favor of the `http.DomainContext`
- When authenticating to or signing out from Console UI, the current
`http.DomainContext(ctx).Origin` (already checked by instance
interceptor for validity) is used to compute and dynamically add a
`redirect_uri` and `post_logout_redirect_uri`.
- Gateway passes all configured host headers (previously only did
`x-zitadel-*`)
- Admin API allows to manage trusted domain
# Additional Changes
None
# Additional Context
- part of #8279
- open topics:
- "single-instance" mode
- Console UI
2024-07-31 15:00:38 +00:00
|
|
|
Auth auth_es.Config
|
|
|
|
Admin admin_es.Config
|
|
|
|
UserAgentCookie *middleware.UserAgentCookieConfig
|
|
|
|
OIDC oidc.Config
|
|
|
|
SAML saml.Config
|
|
|
|
Login login.Config
|
|
|
|
Console console.Config
|
|
|
|
AssetStorage static_config.AssetStorageConfig
|
|
|
|
InternalAuthZ internal_authz.Config
|
|
|
|
SystemDefaults systemdefaults.SystemDefaults
|
|
|
|
EncryptionKeys *encryption.EncryptionKeyConfig
|
|
|
|
DefaultInstance command.InstanceSetup
|
|
|
|
AuditLogRetention time.Duration
|
|
|
|
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
|
|
|
|
CustomerPortal string
|
|
|
|
Machine *id.Config
|
|
|
|
Actions *actions.Config
|
|
|
|
Eventstore *eventstore.Config
|
|
|
|
LogStore *logstore.Configs
|
|
|
|
Quotas *QuotasConfig
|
|
|
|
Telemetry *handlers.TelemetryPusherConfig
|
2023-02-15 01:52:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type QuotasConfig struct {
|
2023-09-15 14:58:45 +00:00
|
|
|
Access struct {
|
|
|
|
logstore.EmitterConfig `mapstructure:",squash"`
|
|
|
|
middleware.AccessConfig `mapstructure:",squash"`
|
|
|
|
}
|
|
|
|
Execution *logstore.EmitterConfig
|
2022-03-28 08:05:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func MustNewConfig(v *viper.Viper) *Config {
|
|
|
|
config := new(Config)
|
|
|
|
|
2022-04-21 10:37:39 +00:00
|
|
|
err := v.Unmarshal(config,
|
|
|
|
viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(
|
2024-02-16 16:04:42 +00:00
|
|
|
hooks.SliceTypeStringDecode[*domain.CustomMessageText],
|
|
|
|
hooks.SliceTypeStringDecode[internal_authz.RoleMapping],
|
|
|
|
hooks.MapTypeStringDecode[string, *internal_authz.SystemAPIUser],
|
|
|
|
hooks.MapHTTPHeaderStringDecode,
|
2024-05-01 10:17:27 +00:00
|
|
|
database.DecodeHook,
|
|
|
|
actions.HTTPConfigDecodeHook,
|
|
|
|
hook.EnumHookFunc(internal_authz.MemberTypeString),
|
|
|
|
hooks.MapTypeStringDecode[domain.Feature, any],
|
|
|
|
hooks.SliceTypeStringDecode[*command.SetQuota],
|
2022-04-21 10:37:39 +00:00
|
|
|
hook.Base64ToBytesHookFunc(),
|
|
|
|
hook.TagToLanguageHookFunc(),
|
|
|
|
mapstructure.StringToTimeDurationHookFunc(),
|
2022-12-09 13:04:33 +00:00
|
|
|
mapstructure.StringToTimeHookFunc(time.RFC3339),
|
2022-04-21 10:37:39 +00:00
|
|
|
mapstructure.StringToSliceHookFunc(","),
|
2024-08-14 14:18:14 +00:00
|
|
|
mapstructure.TextUnmarshallerHookFunc(),
|
2022-04-21 10:37:39 +00:00
|
|
|
)),
|
|
|
|
)
|
2022-07-18 08:42:32 +00:00
|
|
|
logging.OnError(err).Fatal("unable to read config")
|
|
|
|
|
2022-03-28 08:05:09 +00:00
|
|
|
err = config.Log.SetLogger()
|
|
|
|
logging.OnError(err).Fatal("unable to set logger")
|
|
|
|
|
2022-04-28 12:44:13 +00:00
|
|
|
err = config.Tracing.NewTracer()
|
|
|
|
logging.OnError(err).Fatal("unable to set tracer")
|
|
|
|
|
2022-07-18 08:42:32 +00:00
|
|
|
err = config.Metrics.NewMeter()
|
|
|
|
logging.OnError(err).Fatal("unable to set meter")
|
|
|
|
|
2024-08-16 13:26:53 +00:00
|
|
|
err = config.Profiler.NewProfiler()
|
|
|
|
logging.OnError(err).Fatal("unable to set profiler")
|
|
|
|
|
2022-09-07 08:35:12 +00:00
|
|
|
id.Configure(config.Machine)
|
2022-10-06 12:23:59 +00:00
|
|
|
actions.SetHTTPConfig(&config.Actions.HTTP)
|
2022-09-07 08:35:12 +00:00
|
|
|
|
2022-03-28 08:05:09 +00:00
|
|
|
return config
|
|
|
|
}
|