zitadel/cmd/start/start.go

639 lines
24 KiB
Go
Raw Normal View History

package start
import (
"context"
"crypto/tls"
_ "embed"
"fmt"
"math"
"net/http"
"os"
"os/signal"
"slices"
"syscall"
"time"
clockpkg "github.com/benbjohnson/clock"
"github.com/common-nighthawk/go-figure"
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/zitadel/logging"
2023-10-19 10:34:00 +00:00
"github.com/zitadel/oidc/v3/pkg/op"
"github.com/zitadel/saml/pkg/provider"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"github.com/zitadel/zitadel/cmd/build"
"github.com/zitadel/zitadel/cmd/encryption"
"github.com/zitadel/zitadel/cmd/key"
cmd_tls "github.com/zitadel/zitadel/cmd/tls"
"github.com/zitadel/zitadel/internal/actions"
admin_es "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing"
"github.com/zitadel/zitadel/internal/api"
"github.com/zitadel/zitadel/internal/api/assets"
internal_authz "github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/api/grpc/admin"
"github.com/zitadel/zitadel/internal/api/grpc/auth"
feature_v2 "github.com/zitadel/zitadel/internal/api/grpc/feature/v2"
feature_v2beta "github.com/zitadel/zitadel/internal/api/grpc/feature/v2beta"
idp_v2 "github.com/zitadel/zitadel/internal/api/grpc/idp/v2"
"github.com/zitadel/zitadel/internal/api/grpc/management"
oidc_v2 "github.com/zitadel/zitadel/internal/api/grpc/oidc/v2"
oidc_v2beta "github.com/zitadel/zitadel/internal/api/grpc/oidc/v2beta"
org_v2 "github.com/zitadel/zitadel/internal/api/grpc/org/v2"
org_v2beta "github.com/zitadel/zitadel/internal/api/grpc/org/v2beta"
action_v3_alpha "github.com/zitadel/zitadel/internal/api/grpc/resources/action/v3alpha"
"github.com/zitadel/zitadel/internal/api/grpc/resources/debug_events/debug_events"
user_v3_alpha "github.com/zitadel/zitadel/internal/api/grpc/resources/user/v3alpha"
userschema_v3_alpha "github.com/zitadel/zitadel/internal/api/grpc/resources/userschema/v3alpha"
feat(v3alpha): web key resource (#8262) # Which Problems Are Solved Implement a new API service that allows management of OIDC signing web keys. This allows users to manage rotation of the instance level keys. which are currently managed based on expiry. The API accepts the generation of the following key types and parameters: - RSA keys with 2048, 3072 or 4096 bit in size and: - Signing with SHA-256 (RS256) - Signing with SHA-384 (RS384) - Signing with SHA-512 (RS512) - ECDSA keys with - P256 curve - P384 curve - P512 curve - ED25519 keys # How the Problems Are Solved Keys are serialized for storage using the JSON web key format from the `jose` library. This is the format that will be used by OIDC for signing, verification and publication. Each instance can have a number of key pairs. All existing public keys are meant to be used for token verification and publication the keys endpoint. Keys can be activated and the active private key is meant to sign new tokens. There is always exactly 1 active signing key: 1. When the first key for an instance is generated, it is automatically activated. 2. Activation of the next key automatically deactivates the previously active key. 3. Keys cannot be manually deactivated from the API 4. Active keys cannot be deleted # Additional Changes - Query methods that later will be used by the OIDC package are already implemented. Preparation for #8031 - Fix indentation in french translation for instance event - Move user_schema translations to consistent positions in all translation files # Additional Context - Closes #8030 - Part of #7809 --------- Co-authored-by: Elio Bischof <elio@zitadel.com>
2024-08-14 14:18:14 +00:00
"github.com/zitadel/zitadel/internal/api/grpc/resources/webkey/v3"
session_v2 "github.com/zitadel/zitadel/internal/api/grpc/session/v2"
session_v2beta "github.com/zitadel/zitadel/internal/api/grpc/session/v2beta"
settings_v2 "github.com/zitadel/zitadel/internal/api/grpc/settings/v2"
settings_v2beta "github.com/zitadel/zitadel/internal/api/grpc/settings/v2beta"
"github.com/zitadel/zitadel/internal/api/grpc/system"
user_v2 "github.com/zitadel/zitadel/internal/api/grpc/user/v2"
user_v2beta "github.com/zitadel/zitadel/internal/api/grpc/user/v2beta"
http_util "github.com/zitadel/zitadel/internal/api/http"
"github.com/zitadel/zitadel/internal/api/http/middleware"
"github.com/zitadel/zitadel/internal/api/idp"
"github.com/zitadel/zitadel/internal/api/oidc"
"github.com/zitadel/zitadel/internal/api/robots_txt"
"github.com/zitadel/zitadel/internal/api/saml"
"github.com/zitadel/zitadel/internal/api/ui/console"
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
"github.com/zitadel/zitadel/internal/api/ui/console/path"
"github.com/zitadel/zitadel/internal/api/ui/login"
auth_es "github.com/zitadel/zitadel/internal/auth/repository/eventsourcing"
"github.com/zitadel/zitadel/internal/authz"
authz_repo "github.com/zitadel/zitadel/internal/authz/repository"
authz_es "github.com/zitadel/zitadel/internal/authz/repository/eventsourcing/eventstore"
"github.com/zitadel/zitadel/internal/cache/connector"
"github.com/zitadel/zitadel/internal/command"
"github.com/zitadel/zitadel/internal/crypto"
cryptoDB "github.com/zitadel/zitadel/internal/crypto/database"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/database/dialect"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
old_es "github.com/zitadel/zitadel/internal/eventstore/repository/sql"
new_es "github.com/zitadel/zitadel/internal/eventstore/v3"
feat: restrict languages (#6931) * feat: return 404 or 409 if org reg disallowed * fix: system limit permissions * feat: add iam limits api * feat: disallow public org registrations on default instance * add integration test * test: integration * fix test * docs: describe public org registrations * avoid updating docs deps * fix system limits integration test * silence integration tests * fix linting * ignore strange linter complaints * review * improve reset properties naming * redefine the api * use restrictions aggregate * test query * simplify and test projection * test commands * fix unit tests * move integration test * support restrictions on default instance * also test GetRestrictions * self review * lint * abstract away resource owner * fix tests * configure supported languages * fix allowed languages * fix tests * default lang must not be restricted * preferred language must be allowed * change preferred languages * check languages everywhere * lint * test command side * lint * add integration test * add integration test * restrict supported ui locales * lint * lint * cleanup * lint * allow undefined preferred language * fix integration tests * update main * fix env var * ignore linter * ignore linter * improve integration test config * reduce cognitive complexity * compile * check for duplicates * remove useless restriction checks * review * revert restriction renaming * fix language restrictions * lint * generate * allow custom texts for supported langs for now * fix tests * cleanup * cleanup * cleanup * lint * unsupported preferred lang is allowed * fix integration test * finish reverting to old property name * finish reverting to old property name * load languages * refactor(i18n): centralize translators and fs * lint * amplify no validations on preferred languages * fix integration test * lint * fix resetting allowed languages * test unchanged restrictions
2023-12-05 11:12:01 +00:00
"github.com/zitadel/zitadel/internal/i18n"
"github.com/zitadel/zitadel/internal/id"
chore(tests): use a coverage server binary (#8407) # Which Problems Are Solved Use a single server instance for API integration tests. This optimizes the time taken for the integration test pipeline, because it allows running tests on multiple packages in parallel. Also, it saves time by not start and stopping a zitadel server for every package. # How the Problems Are Solved - Build a binary with `go build -race -cover ....` - Integration tests only construct clients. The server remains running in the background. - The integration package and tested packages now fully utilize the API. No more direct database access trough `query` and `command` packages. - Use Makefile recipes to setup, start and stop the server in the background. - The binary has the race detector enabled - Init and setup jobs are configured to halt immediately on race condition - Because the server runs in the background, races are only logged. When the server is stopped and race logs exist, the Makefile recipe will throw an error and print the logs. - Makefile recipes include logic to print logs and convert coverage reports after the server is stopped. - Some tests need a downstream HTTP server to make requests, like quota and milestones. A new `integration/sink` package creates an HTTP server and uses websockets to forward HTTP request back to the test packages. The package API uses Go channels for abstraction and easy usage. # Additional Changes - Integration test files already used the `//go:build integration` directive. In order to properly split integration from unit tests, integration test files need to be in a `integration_test` subdirectory of their package. - `UseIsolatedInstance` used to overwrite the `Tester.Client` for each instance. Now a `Instance` object is returned with a gRPC client that is connected to the isolated instance's hostname. - The `Tester` type is now `Instance`. The object is created for the first instance, used by default in any test. Isolated instances are also `Instance` objects and therefore benefit from the same methods and values. The first instance and any other us capable of creating an isolated instance over the system API. - All test packages run in an Isolated instance by calling `NewInstance()` - Individual tests that use an isolated instance use `t.Parallel()` # Additional Context - Closes #6684 - https://go.dev/doc/articles/race_detector - https://go.dev/doc/build-cover --------- Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
2024-09-06 12:47:57 +00:00
"github.com/zitadel/zitadel/internal/integration/sink"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/logstore/emitters/access"
"github.com/zitadel/zitadel/internal/logstore/emitters/execution"
"github.com/zitadel/zitadel/internal/logstore/emitters/stdout"
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
"github.com/zitadel/zitadel/internal/logstore/record"
"github.com/zitadel/zitadel/internal/net"
"github.com/zitadel/zitadel/internal/notification"
"github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/static"
feat(cmd): mirror (#7004) # Which Problems Are Solved Adds the possibility to mirror an existing database to a new one. For that a new command was added `zitadel mirror`. Including it's subcommands for a more fine grained mirror of the data. Sub commands: * `zitadel mirror eventstore`: copies only events and their unique constraints * `zitadel mirror system`: mirrors the data of the `system`-schema * `zitadel mirror projections`: runs all projections * `zitadel mirror auth`: copies auth requests * `zitadel mirror verify`: counts the amount of rows in the source and destination database and prints the diff. The command requires one of the following flags: * `--system`: copies all instances of the system * `--instance <instance-id>`, `--instance <comma separated list of instance ids>`: copies only the defined instances The command is save to execute multiple times by adding the `--replace`-flag. This replaces currently existing data except of the `events`-table # Additional Changes A `--for-mirror`-flag was added to `zitadel setup` to prepare the new database. The flag skips the creation of the first instances and initial run of projections. It is now possible to skip the creation of the first instance during setup by setting `FirstInstance.Skip` to true in the steps configuration. # Additional info It is currently not possible to merge multiple databases. See https://github.com/zitadel/zitadel/issues/7964 for more details. It is currently not possible to use files. See https://github.com/zitadel/zitadel/issues/7966 for more information. closes https://github.com/zitadel/zitadel/issues/7586 closes https://github.com/zitadel/zitadel/issues/7486 ### Definition of Ready - [x] I am happy with the code - [x] Short description of the feature/issue is added in the pr description - [x] PR is linked to the corresponding user story - [x] Acceptance criteria are met - [x] All open todos and follow ups are defined in a new ticket and justified - [x] Deviations from the acceptance criteria and design are agreed with the PO and documented. - [x] No debug or dead code - [x] My code has no repetitions - [x] Critical parts are tested automatically - [ ] Where possible E2E tests are implemented - [x] Documentation/examples are up-to-date - [x] All non-functional requirements are met - [x] Functionality of the acceptance criteria is checked manually on the dev system. --------- Co-authored-by: Livio Spring <livio.a@gmail.com>
2024-05-30 09:35:30 +00:00
es_v4 "github.com/zitadel/zitadel/internal/v2/eventstore"
es_v4_pg "github.com/zitadel/zitadel/internal/v2/eventstore/postgres"
"github.com/zitadel/zitadel/internal/webauthn"
"github.com/zitadel/zitadel/openapi"
)
func New(server chan<- *Server) *cobra.Command {
start := &cobra.Command{
Use: "start",
Short: "starts ZITADEL instance",
Long: `starts ZITADEL.
Requirements:
- cockroachdb`,
RunE: func(cmd *cobra.Command, args []string) error {
err := cmd_tls.ModeFromFlag(cmd)
if err != nil {
return err
}
feat(cli): setup (#3267) * commander * commander * selber! * move to packages * fix(errors): implement Is interface * test: command * test: commands * add init steps * setup tenant * add default step yaml * possibility to set password * merge v2 into v2-commander * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: search query builder can filter events in memory * fix: filters for add member * fix(setup): add `ExternalSecure` to config * chore: name iam to instance * fix: matching * remove unsued func * base url * base url * test(command): filter funcs * test: commands * fix: rename orgiampolicy to domain policy * start from init * commands * config * fix indexes and add constraints * fixes * fix: merge conflicts * fix: protos * fix: md files * setup * add deprecated org iam policy again * typo * fix search query * fix filter * Apply suggestions from code review * remove custom org from org setup * add todos for verification * change apps creation * simplify package structure * fix error * move preparation helper for tests * fix unique constraints * fix config mapping in setup * fix error handling in encryption_keys.go * fix projection config * fix query from old views to projection * fix setup of mgmt api * set iam project and fix instance projection * imports Co-authored-by: Livio Amstutz <livio.a@gmail.com> Co-authored-by: fabi <fabienne.gerschwiler@gmail.com>
2022-03-28 08:05:09 +00:00
config := MustNewConfig(viper.GetViper())
masterKey, err := key.MasterKey(cmd)
if err != nil {
return err
}
return startZitadel(cmd.Context(), config, masterKey, server)
},
}
feat(cli): setup (#3267) * commander * commander * selber! * move to packages * fix(errors): implement Is interface * test: command * test: commands * add init steps * setup tenant * add default step yaml * possibility to set password * merge v2 into v2-commander * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: rename iam command side to instance * fix: search query builder can filter events in memory * fix: filters for add member * fix(setup): add `ExternalSecure` to config * chore: name iam to instance * fix: matching * remove unsued func * base url * base url * test(command): filter funcs * test: commands * fix: rename orgiampolicy to domain policy * start from init * commands * config * fix indexes and add constraints * fixes * fix: merge conflicts * fix: protos * fix: md files * setup * add deprecated org iam policy again * typo * fix search query * fix filter * Apply suggestions from code review * remove custom org from org setup * add todos for verification * change apps creation * simplify package structure * fix error * move preparation helper for tests * fix unique constraints * fix config mapping in setup * fix error handling in encryption_keys.go * fix projection config * fix query from old views to projection * fix setup of mgmt api * set iam project and fix instance projection * imports Co-authored-by: Livio Amstutz <livio.a@gmail.com> Co-authored-by: fabi <fabienne.gerschwiler@gmail.com>
2022-03-28 08:05:09 +00:00
startFlags(start)
return start
}
type Server struct {
Config *Config
DB *database.DB
KeyStorage crypto.KeyStorage
Keys *encryption.EncryptionKeys
Eventstore *eventstore.Eventstore
Queries *query.Queries
AuthzRepo authz_repo.Repository
Storage static.Storage
Commands *command.Commands
Router *mux.Router
TLSConfig *tls.Config
Shutdown chan<- os.Signal
}
func startZitadel(ctx context.Context, config *Config, masterKey string, server chan<- *Server) error {
showBasicInformation(config)
chore(tests): use a coverage server binary (#8407) # Which Problems Are Solved Use a single server instance for API integration tests. This optimizes the time taken for the integration test pipeline, because it allows running tests on multiple packages in parallel. Also, it saves time by not start and stopping a zitadel server for every package. # How the Problems Are Solved - Build a binary with `go build -race -cover ....` - Integration tests only construct clients. The server remains running in the background. - The integration package and tested packages now fully utilize the API. No more direct database access trough `query` and `command` packages. - Use Makefile recipes to setup, start and stop the server in the background. - The binary has the race detector enabled - Init and setup jobs are configured to halt immediately on race condition - Because the server runs in the background, races are only logged. When the server is stopped and race logs exist, the Makefile recipe will throw an error and print the logs. - Makefile recipes include logic to print logs and convert coverage reports after the server is stopped. - Some tests need a downstream HTTP server to make requests, like quota and milestones. A new `integration/sink` package creates an HTTP server and uses websockets to forward HTTP request back to the test packages. The package API uses Go channels for abstraction and easy usage. # Additional Changes - Integration test files already used the `//go:build integration` directive. In order to properly split integration from unit tests, integration test files need to be in a `integration_test` subdirectory of their package. - `UseIsolatedInstance` used to overwrite the `Tester.Client` for each instance. Now a `Instance` object is returned with a gRPC client that is connected to the isolated instance's hostname. - The `Tester` type is now `Instance`. The object is created for the first instance, used by default in any test. Isolated instances are also `Instance` objects and therefore benefit from the same methods and values. The first instance and any other us capable of creating an isolated instance over the system API. - All test packages run in an Isolated instance by calling `NewInstance()` - Individual tests that use an isolated instance use `t.Parallel()` # Additional Context - Closes #6684 - https://go.dev/doc/articles/race_detector - https://go.dev/doc/build-cover --------- Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
2024-09-06 12:47:57 +00:00
// sink Server is stubbed out in production builds, see function's godoc.
closeSink := sink.StartServer()
defer closeSink()
feat: restrict languages (#6931) * feat: return 404 or 409 if org reg disallowed * fix: system limit permissions * feat: add iam limits api * feat: disallow public org registrations on default instance * add integration test * test: integration * fix test * docs: describe public org registrations * avoid updating docs deps * fix system limits integration test * silence integration tests * fix linting * ignore strange linter complaints * review * improve reset properties naming * redefine the api * use restrictions aggregate * test query * simplify and test projection * test commands * fix unit tests * move integration test * support restrictions on default instance * also test GetRestrictions * self review * lint * abstract away resource owner * fix tests * configure supported languages * fix allowed languages * fix tests * default lang must not be restricted * preferred language must be allowed * change preferred languages * check languages everywhere * lint * test command side * lint * add integration test * add integration test * restrict supported ui locales * lint * lint * cleanup * lint * allow undefined preferred language * fix integration tests * update main * fix env var * ignore linter * ignore linter * improve integration test config * reduce cognitive complexity * compile * check for duplicates * remove useless restriction checks * review * revert restriction renaming * fix language restrictions * lint * generate * allow custom texts for supported langs for now * fix tests * cleanup * cleanup * cleanup * lint * unsupported preferred lang is allowed * fix integration test * finish reverting to old property name * finish reverting to old property name * load languages * refactor(i18n): centralize translators and fs * lint * amplify no validations on preferred languages * fix integration test * lint * fix resetting allowed languages * test unchanged restrictions
2023-12-05 11:12:01 +00:00
i18n.MustLoadSupportedLanguagesFromDir()
queryDBClient, err := database.Connect(config.Database, false, dialect.DBPurposeQuery)
if err != nil {
return fmt.Errorf("cannot start DB client for queries: %w", err)
}
esPusherDBClient, err := database.Connect(config.Database, false, dialect.DBPurposeEventPusher)
if err != nil {
return fmt.Errorf("cannot start client for event store pusher: %w", err)
}
projectionDBClient, err := database.Connect(config.Database, false, dialect.DBPurposeProjectionSpooler)
if err != nil {
return fmt.Errorf("cannot start client for projection spooler: %w", err)
}
keyStorage, err := cryptoDB.NewKeyStorage(queryDBClient, masterKey)
if err != nil {
return fmt.Errorf("cannot start key storage: %w", err)
}
keys, err := encryption.EnsureEncryptionKeys(ctx, config.EncryptionKeys, keyStorage)
if err != nil {
return err
}
config.Eventstore.Pusher = new_es.NewEventstore(esPusherDBClient)
config.Eventstore.Searcher = new_es.NewEventstore(queryDBClient)
config.Eventstore.Querier = old_es.NewCRDB(queryDBClient)
eventstoreClient := eventstore.NewEventstore(config.Eventstore)
feat(cmd): mirror (#7004) # Which Problems Are Solved Adds the possibility to mirror an existing database to a new one. For that a new command was added `zitadel mirror`. Including it's subcommands for a more fine grained mirror of the data. Sub commands: * `zitadel mirror eventstore`: copies only events and their unique constraints * `zitadel mirror system`: mirrors the data of the `system`-schema * `zitadel mirror projections`: runs all projections * `zitadel mirror auth`: copies auth requests * `zitadel mirror verify`: counts the amount of rows in the source and destination database and prints the diff. The command requires one of the following flags: * `--system`: copies all instances of the system * `--instance <instance-id>`, `--instance <comma separated list of instance ids>`: copies only the defined instances The command is save to execute multiple times by adding the `--replace`-flag. This replaces currently existing data except of the `events`-table # Additional Changes A `--for-mirror`-flag was added to `zitadel setup` to prepare the new database. The flag skips the creation of the first instances and initial run of projections. It is now possible to skip the creation of the first instance during setup by setting `FirstInstance.Skip` to true in the steps configuration. # Additional info It is currently not possible to merge multiple databases. See https://github.com/zitadel/zitadel/issues/7964 for more details. It is currently not possible to use files. See https://github.com/zitadel/zitadel/issues/7966 for more information. closes https://github.com/zitadel/zitadel/issues/7586 closes https://github.com/zitadel/zitadel/issues/7486 ### Definition of Ready - [x] I am happy with the code - [x] Short description of the feature/issue is added in the pr description - [x] PR is linked to the corresponding user story - [x] Acceptance criteria are met - [x] All open todos and follow ups are defined in a new ticket and justified - [x] Deviations from the acceptance criteria and design are agreed with the PO and documented. - [x] No debug or dead code - [x] My code has no repetitions - [x] Critical parts are tested automatically - [ ] Where possible E2E tests are implemented - [x] Documentation/examples are up-to-date - [x] All non-functional requirements are met - [x] Functionality of the acceptance criteria is checked manually on the dev system. --------- Co-authored-by: Livio Spring <livio.a@gmail.com>
2024-05-30 09:35:30 +00:00
eventstoreV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(queryDBClient, &es_v4_pg.Config{
MaxRetries: config.Eventstore.MaxRetries,
}))
sessionTokenVerifier := internal_authz.SessionTokenVerifier(keys.OIDC)
cacheConnectors, err := connector.StartConnectors(config.Caches, queryDBClient)
if err != nil {
return fmt.Errorf("unable to start caches: %w", err)
}
queries, err := query.StartQueries(
ctx,
eventstoreClient,
feat(cmd): mirror (#7004) # Which Problems Are Solved Adds the possibility to mirror an existing database to a new one. For that a new command was added `zitadel mirror`. Including it's subcommands for a more fine grained mirror of the data. Sub commands: * `zitadel mirror eventstore`: copies only events and their unique constraints * `zitadel mirror system`: mirrors the data of the `system`-schema * `zitadel mirror projections`: runs all projections * `zitadel mirror auth`: copies auth requests * `zitadel mirror verify`: counts the amount of rows in the source and destination database and prints the diff. The command requires one of the following flags: * `--system`: copies all instances of the system * `--instance <instance-id>`, `--instance <comma separated list of instance ids>`: copies only the defined instances The command is save to execute multiple times by adding the `--replace`-flag. This replaces currently existing data except of the `events`-table # Additional Changes A `--for-mirror`-flag was added to `zitadel setup` to prepare the new database. The flag skips the creation of the first instances and initial run of projections. It is now possible to skip the creation of the first instance during setup by setting `FirstInstance.Skip` to true in the steps configuration. # Additional info It is currently not possible to merge multiple databases. See https://github.com/zitadel/zitadel/issues/7964 for more details. It is currently not possible to use files. See https://github.com/zitadel/zitadel/issues/7966 for more information. closes https://github.com/zitadel/zitadel/issues/7586 closes https://github.com/zitadel/zitadel/issues/7486 ### Definition of Ready - [x] I am happy with the code - [x] Short description of the feature/issue is added in the pr description - [x] PR is linked to the corresponding user story - [x] Acceptance criteria are met - [x] All open todos and follow ups are defined in a new ticket and justified - [x] Deviations from the acceptance criteria and design are agreed with the PO and documented. - [x] No debug or dead code - [x] My code has no repetitions - [x] Critical parts are tested automatically - [ ] Where possible E2E tests are implemented - [x] Documentation/examples are up-to-date - [x] All non-functional requirements are met - [x] Functionality of the acceptance criteria is checked manually on the dev system. --------- Co-authored-by: Livio Spring <livio.a@gmail.com>
2024-05-30 09:35:30 +00:00
eventstoreV4.Querier,
queryDBClient,
projectionDBClient,
cacheConnectors,
config.Projections,
config.SystemDefaults,
keys.IDPConfig,
keys.OTP,
keys.OIDC,
keys.SAML,
keys.Target,
config.InternalAuthZ.RolePermissionMappings,
sessionTokenVerifier,
func(q *query.Queries) domain.PermissionCheck {
return func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, &authz_es.UserMembershipRepo{Queries: q}, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
},
config.AuditLogRetention,
config.SystemAPIUsers,
true,
)
if err != nil {
return fmt.Errorf("cannot start queries: %w", err)
}
authZRepo, err := authz.Start(queries, eventstoreClient, queryDBClient, keys.OIDC, config.ExternalSecure)
if err != nil {
return fmt.Errorf("error starting authz repo: %w", err)
}
permissionCheck := func(ctx context.Context, permission, orgID, resourceID string) (err error) {
return internal_authz.CheckPermission(ctx, authZRepo, config.InternalAuthZ.RolePermissionMappings, permission, orgID, resourceID)
}
storage, err := config.AssetStorage.NewStorage(queryDBClient.DB)
if err != nil {
return fmt.Errorf("cannot start asset storage client: %w", err)
}
webAuthNConfig := &webauthn.Config{
DisplayName: config.WebAuthNName,
ExternalSecure: config.ExternalSecure,
}
commands, err := command.StartCommands(ctx,
eventstoreClient,
cacheConnectors,
config.SystemDefaults,
config.InternalAuthZ.RolePermissionMappings,
storage,
webAuthNConfig,
config.ExternalDomain,
config.ExternalSecure,
config.ExternalPort,
keys.IDPConfig,
keys.OTP,
keys.SMTP,
keys.SMS,
keys.User,
keys.DomainVerification,
keys.OIDC,
keys.SAML,
keys.Target,
&http.Client{},
permissionCheck,
sessionTokenVerifier,
config.OIDC.DefaultAccessTokenLifetime,
config.OIDC.DefaultRefreshTokenExpiration,
config.OIDC.DefaultRefreshTokenIdleExpiration,
config.DefaultInstance.SecretGenerators,
)
if err != nil {
return fmt.Errorf("cannot start commands: %w", err)
}
defer commands.Close(ctx) // wait for background jobs
clock := clockpkg.New()
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
actionsExecutionStdoutEmitter, err := logstore.NewEmitter[*record.ExecutionLog](ctx, clock, &logstore.EmitterConfig{Enabled: config.LogStore.Execution.Stdout.Enabled}, stdout.NewStdoutEmitter[*record.ExecutionLog]())
if err != nil {
return err
}
actionsExecutionDBEmitter, err := logstore.NewEmitter[*record.ExecutionLog](ctx, clock, config.Quotas.Execution, execution.NewDatabaseLogStorage(queryDBClient, commands, queries))
if err != nil {
return err
}
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
actionsLogstoreSvc := logstore.New(queries, actionsExecutionDBEmitter, actionsExecutionStdoutEmitter)
actions.SetLogstoreService(actionsLogstoreSvc)
notification.Register(
ctx,
config.Projections.Customizations["notifications"],
config.Projections.Customizations["notificationsquotas"],
feat(OIDC): add back channel logout (#8837) # Which Problems Are Solved Currently ZITADEL supports RP-initiated logout for clients. Back-channel logout ensures that user sessions are terminated across all connected applications, even if the user closes their browser or loses connectivity providing a more secure alternative for certain use cases. # How the Problems Are Solved If the feature is activated and the client used for the authentication has a back_channel_logout_uri configured, a `session_logout.back_channel` will be registered. Once a user terminates their session, a (notification) handler will send a SET (form POST) to the registered uri containing a logout_token (with the user's ID and session ID). - A new feature "back_channel_logout" is added on system and instance level - A `back_channel_logout_uri` can be managed on OIDC applications - Added a `session_logout` aggregate to register and inform about sent `back_channel` notifications - Added a `SecurityEventToken` channel and `Form`message type in the notification handlers - Added `TriggeredAtOrigin` fields to `HumanSignedOut` and `TerminateSession` events for notification handling - Exported various functions and types in the `oidc` package to be able to reuse for token signing in the back_channel notifier. - To prevent that current existing session termination events will be handled, a setup step is added to set the `current_states` for the `projections.notifications_back_channel_logout` to the current position - [x] requires https://github.com/zitadel/oidc/pull/671 # Additional Changes - Updated all OTEL dependencies to v1.29.0, since OIDC already updated some of them to that version. - Single Session Termination feature is correctly checked (fixed feature mapping) # Additional Context - closes https://github.com/zitadel/zitadel/issues/8467 - TODO: - Documentation - UI to be done: https://github.com/zitadel/zitadel/issues/8469 --------- Co-authored-by: Hidde Wieringa <hidde@hiddewieringa.nl>
2024-10-31 14:57:17 +00:00
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
feat(notification): use event worker pool (#8962) # Which Problems Are Solved The current handling of notification follows the same pattern as all other projections: Created events are handled sequentially (based on "position") by a handler. During the process, a lot of information is aggregated (user, texts, templates, ...). This leads to back pressure on the projection since the handling of events might take longer than the time before a new event (to be handled) is created. # How the Problems Are Solved - The current user notification handler creates separate notification events based on the user / session events. - These events contain all the present and required information including the userID. - These notification events get processed by notification workers, which gather the necessary information (recipient address, texts, templates) to send out these notifications. - If a notification fails, a retry event is created based on the current notification request including the current state of the user (this prevents race conditions, where a user is changed in the meantime and the notification already gets the new state). - The retry event will be handled after a backoff delay. This delay increases with every attempt. - If the configured amount of attempts is reached or the message expired (based on config), a cancel event is created, letting the workers know, the notification must no longer be handled. - In case of successful send, a sent event is created for the notification aggregate and the existing "sent" events for the user / session object is stored. - The following is added to the defaults.yaml to allow configuration of the notification workers: ```yaml Notifications: # The amount of workers processing the notification request events. # If set to 0, no notification request events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS # The amount of events a single worker will process in a run. BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT # Time interval between scheduled notifications for request events RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY # The amount of workers processing the notification retry events. # If set to 0, no notification retry events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS # Time interval between scheduled notifications for retry events RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY # Only instances are projected, for which at least a projection-relevant event exists within the timeframe # from HandleActiveInstances duration in the past until the projection's current time # If set to 0 (default), every instance is always considered active HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES # The maximum duration a transaction remains open # before it spots left folding additional events # and updates the table. TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION # Automatically cancel the notification after the amount of failed attempts MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS # Automatically cancel the notification if it cannot be handled within a specific time MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL # Failed attempts are retried after a confogired delay (with exponential backoff). # Set a minimum and maximum delay and a factor for the backoff MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY # Any factor below 1 will be set to 1 RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR ``` # Additional Changes None # Additional Context - closes #8931
2024-11-27 15:01:17 +00:00
config.Notifications,
*config.Telemetry,
config.ExternalDomain,
config.ExternalPort,
config.ExternalSecure,
commands,
queries,
eventstoreClient,
config.Login.DefaultOTPEmailURLV2,
config.SystemDefaults.Notifications.FileSystemPath,
keys.User,
keys.SMTP,
keys.SMS,
feat(OIDC): add back channel logout (#8837) # Which Problems Are Solved Currently ZITADEL supports RP-initiated logout for clients. Back-channel logout ensures that user sessions are terminated across all connected applications, even if the user closes their browser or loses connectivity providing a more secure alternative for certain use cases. # How the Problems Are Solved If the feature is activated and the client used for the authentication has a back_channel_logout_uri configured, a `session_logout.back_channel` will be registered. Once a user terminates their session, a (notification) handler will send a SET (form POST) to the registered uri containing a logout_token (with the user's ID and session ID). - A new feature "back_channel_logout" is added on system and instance level - A `back_channel_logout_uri` can be managed on OIDC applications - Added a `session_logout` aggregate to register and inform about sent `back_channel` notifications - Added a `SecurityEventToken` channel and `Form`message type in the notification handlers - Added `TriggeredAtOrigin` fields to `HumanSignedOut` and `TerminateSession` events for notification handling - Exported various functions and types in the `oidc` package to be able to reuse for token signing in the back_channel notifier. - To prevent that current existing session termination events will be handled, a setup step is added to set the `current_states` for the `projections.notifications_back_channel_logout` to the current position - [x] requires https://github.com/zitadel/oidc/pull/671 # Additional Changes - Updated all OTEL dependencies to v1.29.0, since OIDC already updated some of them to that version. - Single Session Termination feature is correctly checked (fixed feature mapping) # Additional Context - closes https://github.com/zitadel/zitadel/issues/8467 - TODO: - Documentation - UI to be done: https://github.com/zitadel/zitadel/issues/8469 --------- Co-authored-by: Hidde Wieringa <hidde@hiddewieringa.nl>
2024-10-31 14:57:17 +00:00
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
feat(notification): use event worker pool (#8962) # Which Problems Are Solved The current handling of notification follows the same pattern as all other projections: Created events are handled sequentially (based on "position") by a handler. During the process, a lot of information is aggregated (user, texts, templates, ...). This leads to back pressure on the projection since the handling of events might take longer than the time before a new event (to be handled) is created. # How the Problems Are Solved - The current user notification handler creates separate notification events based on the user / session events. - These events contain all the present and required information including the userID. - These notification events get processed by notification workers, which gather the necessary information (recipient address, texts, templates) to send out these notifications. - If a notification fails, a retry event is created based on the current notification request including the current state of the user (this prevents race conditions, where a user is changed in the meantime and the notification already gets the new state). - The retry event will be handled after a backoff delay. This delay increases with every attempt. - If the configured amount of attempts is reached or the message expired (based on config), a cancel event is created, letting the workers know, the notification must no longer be handled. - In case of successful send, a sent event is created for the notification aggregate and the existing "sent" events for the user / session object is stored. - The following is added to the defaults.yaml to allow configuration of the notification workers: ```yaml Notifications: # The amount of workers processing the notification request events. # If set to 0, no notification request events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS # The amount of events a single worker will process in a run. BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT # Time interval between scheduled notifications for request events RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY # The amount of workers processing the notification retry events. # If set to 0, no notification retry events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS # Time interval between scheduled notifications for retry events RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY # Only instances are projected, for which at least a projection-relevant event exists within the timeframe # from HandleActiveInstances duration in the past until the projection's current time # If set to 0 (default), every instance is always considered active HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES # The maximum duration a transaction remains open # before it spots left folding additional events # and updates the table. TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION # Automatically cancel the notification after the amount of failed attempts MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS # Automatically cancel the notification if it cannot be handled within a specific time MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL # Failed attempts are retried after a confogired delay (with exponential backoff). # Set a minimum and maximum delay and a factor for the backoff MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY # Any factor below 1 will be set to 1 RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR ``` # Additional Changes None # Additional Context - closes #8931
2024-11-27 15:01:17 +00:00
queryDBClient,
)
notification.Start(ctx)
router := mux.NewRouter()
tlsConfig, err := config.TLS.Config()
if err != nil {
return err
}
api, err := startAPIs(
ctx,
clock,
router,
commands,
queries,
eventstoreClient,
queryDBClient,
config,
storage,
authZRepo,
keys,
permissionCheck,
)
if err != nil {
return err
}
commands.GrpcMethodExisting = checkExisting(api.ListGrpcMethods())
commands.GrpcServiceExisting = checkExisting(api.ListGrpcServices())
shutdown := make(chan os.Signal, 1)
signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM)
if server != nil {
server <- &Server{
Config: config,
DB: queryDBClient,
KeyStorage: keyStorage,
Keys: keys,
Eventstore: eventstoreClient,
Queries: queries,
AuthzRepo: authZRepo,
Storage: storage,
Commands: commands,
Router: router,
TLSConfig: tlsConfig,
Shutdown: shutdown,
}
close(server)
}
return listen(ctx, router, config.Port, tlsConfig, shutdown)
}
func startAPIs(
ctx context.Context,
clock clockpkg.Clock,
router *mux.Router,
commands *command.Commands,
queries *query.Queries,
eventstore *eventstore.Eventstore,
dbClient *database.DB,
config *Config,
store static.Storage,
authZRepo authz_repo.Repository,
keys *encryption.EncryptionKeys,
permissionCheck domain.PermissionCheck,
) (*api.API, error) {
repo := struct {
authz_repo.Repository
*query.Queries
}{
authZRepo,
queries,
}
oidcPrefixes := []string{"/.well-known/openid-configuration", "/oidc/v1", "/oauth/v2"}
// always set the origin in the context if available in the http headers, no matter for what protocol
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
router.Use(middleware.WithOrigin(config.ExternalSecure, config.HTTP1HostHeader, config.HTTP2HostHeader, config.InstanceHostHeaders, config.PublicHostHeaders))
systemTokenVerifier, err := internal_authz.StartSystemTokenVerifierFromConfig(http_util.BuildHTTP(config.ExternalDomain, config.ExternalPort, config.ExternalSecure), config.SystemAPIUsers)
if err != nil {
return nil, err
}
accessTokenVerifer := internal_authz.StartAccessTokenVerifierFromRepo(repo)
verifier := internal_authz.StartAPITokenVerifier(repo, accessTokenVerifer, systemTokenVerifier)
tlsConfig, err := config.TLS.Config()
if err != nil {
return nil, err
}
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
accessStdoutEmitter, err := logstore.NewEmitter[*record.AccessLog](ctx, clock, &logstore.EmitterConfig{Enabled: config.LogStore.Access.Stdout.Enabled}, stdout.NewStdoutEmitter[*record.AccessLog]())
if err != nil {
return nil, err
}
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
accessDBEmitter, err := logstore.NewEmitter[*record.AccessLog](ctx, clock, &config.Quotas.Access.EmitterConfig, access.NewDatabaseLogStorage(dbClient, commands, queries))
if err != nil {
return nil, err
}
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
accessSvc := logstore.New[*record.AccessLog](queries, accessDBEmitter, accessStdoutEmitter)
exhaustedCookieHandler := http_util.NewCookieHandler(
http_util.WithUnsecure(),
http_util.WithNonHttpOnly(),
http_util.WithMaxAge(int(math.Floor(config.Quotas.Access.ExhaustedCookieMaxAge.Seconds()))),
)
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
limitingAccessInterceptor := middleware.NewAccessInterceptor(accessSvc, exhaustedCookieHandler, &config.Quotas.Access.AccessConfig)
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
apis, err := api.New(ctx, config.Port, router, queries, verifier, config.InternalAuthZ, tlsConfig, config.ExternalDomain, append(config.InstanceHostHeaders, config.PublicHostHeaders...), limitingAccessInterceptor)
if err != nil {
return nil, fmt.Errorf("error creating api %w", err)
}
config.Auth.Spooler.Client = dbClient
config.Auth.Spooler.Eventstore = eventstore
config.Auth.Spooler.ActiveInstancer = queries
authRepo, err := auth_es.Start(ctx, config.Auth, config.SystemDefaults, commands, queries, dbClient, eventstore, keys.OIDC, keys.User)
if err != nil {
return nil, fmt.Errorf("error starting auth repo: %w", err)
}
config.Admin.Spooler.Client = dbClient
config.Admin.Spooler.Eventstore = eventstore
config.Admin.Spooler.ActiveInstancer = queries
err = admin_es.Start(ctx, config.Admin, store, dbClient, queries)
if err != nil {
return nil, fmt.Errorf("error starting admin repo: %w", err)
}
if err := apis.RegisterServer(ctx, system.CreateServer(commands, queries, config.Database.DatabaseName(), config.DefaultInstance, config.ExternalDomain), tlsConfig); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterServer(ctx, admin.CreateServer(config.Database.DatabaseName(), commands, queries, keys.User, config.AuditLogRetention), tlsConfig); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterServer(ctx, management.CreateServer(commands, queries, config.SystemDefaults, keys.User), tlsConfig); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterServer(ctx, auth.CreateServer(commands, queries, authRepo, config.SystemDefaults, keys.User), tlsConfig); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterService(ctx, user_v2beta.CreateServer(commands, queries, keys.User, keys.IDPConfig, idp.CallbackURL(), idp.SAMLRootURL(), assets.AssetAPI(), permissionCheck)); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterService(ctx, user_v2.CreateServer(commands, queries, keys.User, keys.IDPConfig, idp.CallbackURL(), idp.SAMLRootURL(), assets.AssetAPI(), permissionCheck)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, session_v2beta.CreateServer(commands, queries)); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterService(ctx, settings_v2beta.CreateServer(commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, org_v2beta.CreateServer(commands, queries, permissionCheck)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, feature_v2beta.CreateServer(commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, session_v2.CreateServer(commands, queries)); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
if err := apis.RegisterService(ctx, settings_v2.CreateServer(commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, org_v2.CreateServer(commands, queries, permissionCheck)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, feature_v2.CreateServer(commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, idp_v2.CreateServer(commands, queries, permissionCheck)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, action_v3_alpha.CreateServer(config.SystemDefaults, commands, queries, domain.AllFunctions, apis.ListGrpcMethods, apis.ListGrpcServices)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, userschema_v3_alpha.CreateServer(config.SystemDefaults, commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, user_v3_alpha.CreateServer(commands)); err != nil {
return nil, err
}
feat(v3alpha): web key resource (#8262) # Which Problems Are Solved Implement a new API service that allows management of OIDC signing web keys. This allows users to manage rotation of the instance level keys. which are currently managed based on expiry. The API accepts the generation of the following key types and parameters: - RSA keys with 2048, 3072 or 4096 bit in size and: - Signing with SHA-256 (RS256) - Signing with SHA-384 (RS384) - Signing with SHA-512 (RS512) - ECDSA keys with - P256 curve - P384 curve - P512 curve - ED25519 keys # How the Problems Are Solved Keys are serialized for storage using the JSON web key format from the `jose` library. This is the format that will be used by OIDC for signing, verification and publication. Each instance can have a number of key pairs. All existing public keys are meant to be used for token verification and publication the keys endpoint. Keys can be activated and the active private key is meant to sign new tokens. There is always exactly 1 active signing key: 1. When the first key for an instance is generated, it is automatically activated. 2. Activation of the next key automatically deactivates the previously active key. 3. Keys cannot be manually deactivated from the API 4. Active keys cannot be deleted # Additional Changes - Query methods that later will be used by the OIDC package are already implemented. Preparation for #8031 - Fix indentation in french translation for instance event - Move user_schema translations to consistent positions in all translation files # Additional Context - Closes #8030 - Part of #7809 --------- Co-authored-by: Elio Bischof <elio@zitadel.com>
2024-08-14 14:18:14 +00:00
if err := apis.RegisterService(ctx, webkey.CreateServer(commands, queries)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, debug_events.CreateServer(commands, queries)); err != nil {
return nil, err
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
instanceInterceptor := middleware.InstanceInterceptor(queries, config.ExternalDomain, login.IgnoreInstanceEndpoints...)
assetsCache := middleware.AssetsCacheInterceptor(config.AssetStorage.Cache.MaxAge, config.AssetStorage.Cache.SharedMaxAge)
apis.RegisterHandlerOnPrefix(assets.HandlerPrefix, assets.NewHandler(commands, verifier, config.InternalAuthZ, id.SonyFlakeGenerator(), store, queries, middleware.CallDurationHandler, instanceInterceptor.Handler, assetsCache.Handler, limitingAccessInterceptor.Handle))
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
apis.RegisterHandlerOnPrefix(idp.HandlerPrefix, idp.NewHandler(commands, queries, keys.IDPConfig, instanceInterceptor.Handler))
userAgentInterceptor, err := middleware.NewUserAgentHandler(config.UserAgentCookie, keys.UserAgentCookieKey, id.SonyFlakeGenerator(), config.ExternalSecure, login.EndpointResources, login.EndpointExternalLoginCallbackFormPost, login.EndpointSAMLACS)
if err != nil {
return nil, err
}
// robots.txt handler
robotsTxtHandler, err := robots_txt.Start()
if err != nil {
return nil, fmt.Errorf("unable to start robots txt handler: %w", err)
}
apis.RegisterHandlerOnPrefix(robots_txt.HandlerPrefix, robotsTxtHandler)
// TODO: Record openapi access logs?
openAPIHandler, err := openapi.Start()
if err != nil {
return nil, fmt.Errorf("unable to start openapi handler: %w", err)
}
apis.RegisterHandlerOnPrefix(openapi.HandlerPrefix, openAPIHandler)
oidcServer, err := oidc.NewServer(ctx, config.OIDC, login.DefaultLoggedOutPath, config.ExternalSecure, commands, queries, authRepo, keys.OIDC, keys.OIDCKey, eventstore, dbClient, userAgentInterceptor, instanceInterceptor.Handler, limitingAccessInterceptor, config.Log.Slog(), config.SystemDefaults.SecretHasher)
if err != nil {
return nil, fmt.Errorf("unable to start oidc provider: %w", err)
}
apis.RegisterHandlerPrefixes(oidcServer, oidcPrefixes...)
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
samlProvider, err := saml.NewProvider(config.SAML, config.ExternalSecure, commands, queries, authRepo, keys.OIDC, keys.SAML, eventstore, dbClient, instanceInterceptor.Handler, userAgentInterceptor, limitingAccessInterceptor)
if err != nil {
return nil, fmt.Errorf("unable to start saml provider: %w", err)
}
apis.RegisterHandlerOnPrefix(saml.HandlerPrefix, samlProvider.HttpHandler())
c, err := console.Start(config.Console, config.ExternalSecure, oidcServer.IssuerFromRequest, middleware.CallDurationHandler, instanceInterceptor.Handler, limitingAccessInterceptor, config.CustomerPortal)
if err != nil {
return nil, fmt.Errorf("unable to start console: %w", err)
}
feat: trusted (instance) domains (#8369) # Which Problems Are Solved ZITADEL currently selects the instance context based on a HTTP header (see https://github.com/zitadel/zitadel/issues/8279#issue-2399959845 and checks it against the list of instance domains. Let's call it instance or API domain. For any context based URL (e.g. OAuth, OIDC, SAML endpoints, links in emails, ...) the requested domain (instance domain) will be used. Let's call it the public domain. In cases of proxied setups, all exposed domains (public domains) require the domain to be managed as instance domain. This can either be done using the "ExternalDomain" in the runtime config or via system API, which requires a validation through CustomerPortal on zitadel.cloud. # How the Problems Are Solved - Two new headers / header list are added: - `InstanceHostHeaders`: an ordered list (first sent wins), which will be used to match the instance. (For backward compatibility: the `HTTP1HostHeader`, `HTTP2HostHeader` and `forwarded`, `x-forwarded-for`, `x-forwarded-host` are checked afterwards as well) - `PublicHostHeaders`: an ordered list (first sent wins), which will be used as public host / domain. This will be checked against a list of trusted domains on the instance. - The middleware intercepts all requests to the API and passes a `DomainCtx` object with the hosts and protocol into the context (previously only a computed `origin` was passed) - HTTP / GRPC server do not longer try to match the headers to instances themself, but use the passed `http.DomainContext` in their interceptors. - The `RequestedHost` and `RequestedDomain` from authz.Instance are removed in favor of the `http.DomainContext` - When authenticating to or signing out from Console UI, the current `http.DomainContext(ctx).Origin` (already checked by instance interceptor for validity) is used to compute and dynamically add a `redirect_uri` and `post_logout_redirect_uri`. - Gateway passes all configured host headers (previously only did `x-zitadel-*`) - Admin API allows to manage trusted domain # Additional Changes None # Additional Context - part of #8279 - open topics: - "single-instance" mode - Console UI
2024-07-31 15:00:38 +00:00
apis.RegisterHandlerOnPrefix(path.HandlerPrefix, c)
consolePath := path.HandlerPrefix + "/"
l, err := login.CreateLogin(
config.Login,
commands,
queries,
authRepo,
store,
consolePath,
oidcServer.AuthCallbackURL(),
provider.AuthCallbackURL(samlProvider),
config.ExternalSecure,
userAgentInterceptor,
op.NewIssuerInterceptor(oidcServer.IssuerFromRequest).Handler,
provider.NewIssuerInterceptor(samlProvider.IssuerFromRequest).Handler,
instanceInterceptor.Handler,
assetsCache.Handler,
limitingAccessInterceptor.WithRedirect(consolePath).Handle,
keys.User,
keys.IDPConfig,
keys.CSRFCookieKey,
)
if err != nil {
return nil, fmt.Errorf("unable to start login: %w", err)
}
apis.RegisterHandlerOnPrefix(login.HandlerPrefix, l.Handler())
apis.HandleFunc(login.EndpointDeviceAuth, login.RedirectDeviceAuthToPrefix)
// After OIDC provider so that the callback endpoint can be used
if err := apis.RegisterService(ctx, oidc_v2beta.CreateServer(commands, queries, oidcServer, config.ExternalSecure)); err != nil {
return nil, err
}
if err := apis.RegisterService(ctx, oidc_v2.CreateServer(commands, queries, oidcServer, config.ExternalSecure)); err != nil {
return nil, err
}
// handle grpc at last to be able to handle the root, because grpc and gateway require a lot of different prefixes
apis.RouteGRPC()
return apis, nil
}
func listen(ctx context.Context, router *mux.Router, port uint16, tlsConfig *tls.Config, shutdown <-chan os.Signal) error {
http2Server := &http2.Server{}
http1Server := &http.Server{Handler: h2c.NewHandler(router, http2Server), TLSConfig: tlsConfig}
2023-05-02 16:24:24 +00:00
perf: project quotas and usages (#6441) * project quota added * project quota removed * add periods table * make log record generic * accumulate usage * query usage * count action run seconds * fix filter in ReportQuotaUsage * fix existing tests * fix logstore tests * fix typo * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * move notifications into debouncer and improve limit querying * cleanup * comment * fix: add quota unit tests command side * fix remaining quota usage query * implement InmemLogStorage * cleanup and linting * improve test * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * fix: add quota unit tests command side * action notifications and fixes for notifications query * revert console prefix * fix: add quota unit tests command side * fix: add quota integration tests * improve accountable requests * improve accountable requests * fix: add quota integration tests * fix: add quota integration tests * fix: add quota integration tests * comment * remove ability to store logs in db and other changes requested from review * changes requested from review * changes requested from review * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * tests: fix quotas integration tests * improve incrementUsageStatement * linting * fix: delete e2e tests as intergation tests cover functionality * Update internal/api/http/middleware/access_interceptor.go Co-authored-by: Silvan <silvan.reusser@gmail.com> * backup * fix conflict * create rc * create prerelease * remove issue release labeling * fix tracing --------- Co-authored-by: Livio Spring <livio.a@gmail.com> Co-authored-by: Stefan Benz <stefan@caos.ch> Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
2023-09-15 14:58:45 +00:00
lc := net.ListenConfig()
2023-05-02 16:24:24 +00:00
lis, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
if err != nil {
return fmt.Errorf("tcp listener on %d failed: %w", port, err)
}
errCh := make(chan error)
go func() {
logging.Infof("server is listening on %s", lis.Addr().String())
if tlsConfig != nil {
//we don't need to pass the files here, because we already initialized the TLS config on the server
errCh <- http1Server.ServeTLS(lis, "", "")
} else {
errCh <- http1Server.Serve(lis)
}
}()
select {
case err := <-errCh:
return fmt.Errorf("error starting server: %w", err)
case <-shutdown:
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
return shutdownServer(ctx, http1Server)
case <-ctx.Done():
return shutdownServer(ctx, http1Server)
}
}
func shutdownServer(ctx context.Context, server *http.Server) error {
err := server.Shutdown(ctx)
if err != nil {
return fmt.Errorf("could not shutdown gracefully: %w", err)
}
logging.New().Info("server shutdown gracefully")
return nil
}
func showBasicInformation(startConfig *Config) {
fmt.Println(color.MagentaString(figure.NewFigure("ZITADEL", "", true).String()))
http := "http"
if startConfig.TLS.Enabled || startConfig.ExternalSecure {
http = "https"
}
consoleURL := fmt.Sprintf("%s://%s:%v/ui/console\n", http, startConfig.ExternalDomain, startConfig.ExternalPort)
healthCheckURL := fmt.Sprintf("%s://%s:%v/debug/healthz\n", http, startConfig.ExternalDomain, startConfig.ExternalPort)
machineIdMethod := id.MachineIdentificationMethod()
insecure := !startConfig.TLS.Enabled && !startConfig.ExternalSecure
fmt.Printf(" ===============================================================\n\n")
fmt.Printf(" Version : %s\n", build.Version())
fmt.Printf(" TLS enabled : %v\n", startConfig.TLS.Enabled)
fmt.Printf(" External Secure : %v\n", startConfig.ExternalSecure)
fmt.Printf(" Machine Id Method : %v\n", machineIdMethod)
fmt.Printf(" Console URL : %s", color.BlueString(consoleURL))
fmt.Printf(" Health Check URL : %s", color.BlueString(healthCheckURL))
if insecure {
fmt.Printf("\n %s: you're using plain http without TLS. Be aware this is \n", color.RedString("Warning"))
fmt.Printf(" not a secure setup and should only be used for test systems. \n")
fmt.Printf(" Visit: %s \n", color.CyanString("https://zitadel.com/docs/self-hosting/manage/tls_modes"))
}
fmt.Printf("\n ===============================================================\n\n")
}
func checkExisting(values []string) func(string) bool {
return func(value string) bool {
return slices.Contains(values, value)
}
}