mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-15 04:18:01 +00:00
8537805ea5
# Which Problems Are Solved The current handling of notification follows the same pattern as all other projections: Created events are handled sequentially (based on "position") by a handler. During the process, a lot of information is aggregated (user, texts, templates, ...). This leads to back pressure on the projection since the handling of events might take longer than the time before a new event (to be handled) is created. # How the Problems Are Solved - The current user notification handler creates separate notification events based on the user / session events. - These events contain all the present and required information including the userID. - These notification events get processed by notification workers, which gather the necessary information (recipient address, texts, templates) to send out these notifications. - If a notification fails, a retry event is created based on the current notification request including the current state of the user (this prevents race conditions, where a user is changed in the meantime and the notification already gets the new state). - The retry event will be handled after a backoff delay. This delay increases with every attempt. - If the configured amount of attempts is reached or the message expired (based on config), a cancel event is created, letting the workers know, the notification must no longer be handled. - In case of successful send, a sent event is created for the notification aggregate and the existing "sent" events for the user / session object is stored. - The following is added to the defaults.yaml to allow configuration of the notification workers: ```yaml Notifications: # The amount of workers processing the notification request events. # If set to 0, no notification request events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS # The amount of events a single worker will process in a run. BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT # Time interval between scheduled notifications for request events RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY # The amount of workers processing the notification retry events. # If set to 0, no notification retry events will be handled. This can be useful when running in # multi binary / pod setup and allowing only certain executables to process the events. RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS # Time interval between scheduled notifications for retry events RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY # Only instances are projected, for which at least a projection-relevant event exists within the timeframe # from HandleActiveInstances duration in the past until the projection's current time # If set to 0 (default), every instance is always considered active HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES # The maximum duration a transaction remains open # before it spots left folding additional events # and updates the table. TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION # Automatically cancel the notification after the amount of failed attempts MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS # Automatically cancel the notification if it cannot be handled within a specific time MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL # Failed attempts are retried after a confogired delay (with exponential backoff). # Set a minimum and maximum delay and a factor for the backoff MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY # Any factor below 1 will be set to 1 RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR ``` # Additional Changes None # Additional Context - closes #8931
117 lines
3.0 KiB
YAML
117 lines
3.0 KiB
YAML
name: ZITADEL CI/CD
|
|
|
|
on:
|
|
push:
|
|
tags-ignore:
|
|
- "*"
|
|
branches:
|
|
- "main"
|
|
pull_request:
|
|
workflow_dispatch:
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
permissions:
|
|
contents: write
|
|
packages: write
|
|
issues: write
|
|
pull-requests: write
|
|
|
|
jobs:
|
|
core:
|
|
uses: ./.github/workflows/core.yml
|
|
with:
|
|
node_version: "20"
|
|
buf_version: "latest"
|
|
go_version: "1.22"
|
|
|
|
console:
|
|
uses: ./.github/workflows/console.yml
|
|
with:
|
|
node_version: "20"
|
|
buf_version: "latest"
|
|
|
|
version:
|
|
uses: ./.github/workflows/version.yml
|
|
with:
|
|
semantic_version: "23.0.7"
|
|
dry_run: true
|
|
|
|
compile:
|
|
needs: [core, console, version]
|
|
uses: ./.github/workflows/compile.yml
|
|
with:
|
|
go_version: "1.22"
|
|
core_cache_key: ${{ needs.core.outputs.cache_key }}
|
|
console_cache_key: ${{ needs.console.outputs.cache_key }}
|
|
core_cache_path: ${{ needs.core.outputs.cache_path }}
|
|
console_cache_path: ${{ needs.console.outputs.cache_path }}
|
|
version: ${{ needs.version.outputs.version }}
|
|
|
|
core-unit-test:
|
|
needs: core
|
|
uses: ./.github/workflows/core-unit-test.yml
|
|
with:
|
|
go_version: "1.22"
|
|
core_cache_key: ${{ needs.core.outputs.cache_key }}
|
|
core_cache_path: ${{ needs.core.outputs.cache_path }}
|
|
secrets:
|
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
|
|
|
core-integration-test:
|
|
needs: core
|
|
uses: ./.github/workflows/core-integration-test.yml
|
|
with:
|
|
go_version: "1.22"
|
|
core_cache_key: ${{ needs.core.outputs.cache_key }}
|
|
core_cache_path: ${{ needs.core.outputs.cache_path }}
|
|
secrets:
|
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
|
|
|
lint:
|
|
needs: [core, console]
|
|
uses: ./.github/workflows/lint.yml
|
|
with:
|
|
go_version: "1.22"
|
|
node_version: "18"
|
|
buf_version: "latest"
|
|
go_lint_version: "v1.62.2"
|
|
core_cache_key: ${{ needs.core.outputs.cache_key }}
|
|
core_cache_path: ${{ needs.core.outputs.cache_path }}
|
|
|
|
container:
|
|
needs: [compile]
|
|
uses: ./.github/workflows/container.yml
|
|
secrets: inherit
|
|
permissions:
|
|
packages: write
|
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
with:
|
|
build_image_name: "ghcr.io/zitadel/zitadel-build"
|
|
|
|
e2e:
|
|
uses: ./.github/workflows/e2e.yml
|
|
needs: [compile]
|
|
|
|
release:
|
|
uses: ./.github/workflows/release.yml
|
|
permissions:
|
|
packages: write
|
|
contents: write
|
|
issues: write
|
|
pull-requests: write
|
|
needs:
|
|
[version, core-unit-test, core-integration-test, lint, container, e2e]
|
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
secrets:
|
|
GCR_JSON_KEY_BASE64: ${{ secrets.GCR_JSON_KEY_BASE64 }}
|
|
APP_ID: ${{ secrets.APP_ID }}
|
|
APP_PRIVATE_KEY: ${{ secrets.APP_PRIVATE_KEY }}
|
|
with:
|
|
build_image_name: ${{ needs.container.outputs.build_image }}
|
|
semantic_version: "23.0.7"
|
|
image_name: "ghcr.io/zitadel/zitadel"
|
|
google_image_name: "europe-docker.pkg.dev/zitadel-common/zitadel-repo/zitadel"
|