Merge branch 'main' into next-rc

This commit is contained in:
Livio Spring 2024-11-27 16:59:41 +01:00
commit e4ebac13aa
No known key found for this signature in database
GPG Key ID: 26BB1C2FA5952CF0
152 changed files with 7674 additions and 2648 deletions

View File

@ -77,7 +77,7 @@ jobs:
go_version: "1.22"
node_version: "18"
buf_version: "latest"
go_lint_version: "v1.55.2"
go_lint_version: "v1.62.2"
core_cache_key: ${{ needs.core.outputs.cache_key }}
core_cache_path: ${{ needs.core.outputs.cache_path }}

15
ADOPTERS.md Normal file
View File

@ -0,0 +1,15 @@
## Adopters
Sharing experiences and learning from other users is essential. We are frequently asked who is using a particular feature of Zitadel so people can get in touch with other users to share experiences and best practices. People also often want to know if a specific product or platform has integrated Zitadel. While the Zitadel Discord Community allows users to get in touch, it can be challenging to find this information quickly.
The following is a directory of adopters to help identify users of individual features. The users themselves directly maintain the list.
### Adding yourself as a user
If you are using Zitadel, please consider adding yourself as a user with a quick description of your use case by opening a pull request to this file and adding a section describing your usage of Zitadel.
| Organization/Individual | Contact Information | Description of Usage |
| ----------------------- | -------------------------------------------------------- | ----------------------------------------------- |
| Zitadel | [@fforootd](https://github.com/fforootd) (and many more) | Zitadel Cloud makes heavy use of of Zitadel ;-) |
| Organization Name | contact@example.com | Description of how they use Zitadel |
| Individual Name | contact@example.com | Description of how they use Zitadel |

View File

@ -3,6 +3,35 @@
Dear community!
We're excited to announce bi-weekly office hours.
## #6 Q&A
Hey folks!
Were inviting you to our next open office hours session! C: From leveraging ZITADEL actions to exploring your use cases, join our hosts Silvan & Stefan on Wednesday, November 20, 2024 at 11:00 AM (EST) as they answer your questions about ZITADEL!
🦒 **What to expect**
An open Q&A session - Share your questions and support others with their inquiries.
A space to share your thoughts / feedback on the ZITADEL platform
🗒️ **Details**
Target audience: All ZITADEL platform users & community members
Topic: Q&A Session
Date & time: Wednesday, November 20, 2024 at 11:00 AM (EST)
Duration: ~1 hour
Platform: ZITADELs Discord stage channel
Register for this event here ➡️ https://discord.gg/bnuAe2RX?event=1307010383713927230
🗓️ **Add this to your calendar** ➡️ [Google Calendar](https://calendar.google.com/calendar/u/0/r/eventedit?dates=20241120T110000/20241120T110000&details=We%E2%80%99re+inviting+you+to+our+next+open+office+hours+session!+C:+From+leveraging+ZITADEL+actions+to+exploring+your+use+cases,+join+our+hosts+Silvan+%26+Stefan+as+they+answer+your+questions+about+ZITADEL!+%0A%0A**What+to+expect**%0A%0A-+An+open+Q%26A+session+-+Share+your+questions+and+support+others+with+their+inquiries.+%0A-+A+space+to+share+your+thoughts+/+feedback+on+the+ZITADEL+platform+++%0A%0A**Details**+%0A%0A**Target+audience:**+All+ZITADEL+platform+users+%26+community+members%0A**Topic**:+Q%26A+Session+%0A**Date+%26+time**:+Wednesday,+November+20,+2024+11:00+AM%0A**Duration**:+~1+hour+%0A**Platform**:+ZITADEL%E2%80%99s+Discord+stage+channel&location=Discord:+ZITADEL+server,+office+hours&text=Open+Office+Hours)
If you have any questions prior to the live session, be sure to share them in the office hours stage chat
Looking forward to seeing you there! Share this with other ZITADEL users & people who might be interested in ZITADEL! Its appreciated 🫶
## #5 Q&A
Dear community,

View File

@ -89,6 +89,10 @@ Available data regions are:
ZITADEL Cloud comes with a free tier, providing you with all the same features as the open-source version.
Learn more about the [pay-as-you-go pricing](https://zitadel.com/pricing).
## Adopters
We are grateful to the organizations and individuals who are using ZITADEL. If you are using ZITADEL, please consider adding your name to our [Adopters list](./ADOPTERS.md) by submitting a pull request.
### Example applications
Clone one of our [example applications](https://zitadel.com/docs/sdk-examples/introduction) or deploy them directly to Vercel.

View File

@ -68,7 +68,7 @@ Port: 8080 # ZITADEL_PORT
# It can differ from Port e.g. if a reverse proxy forwards the traffic to ZITADEL
# Read more about external access: https://zitadel.com/docs/self-hosting/manage/custom-domain
ExternalPort: 8080 # ZITADEL_EXTERNALPORT
# ExternalPort is the domain on which end users access ZITADEL.
# ExternalDomain is the domain on which end users access ZITADEL.
# Read more about external access: https://zitadel.com/docs/self-hosting/manage/custom-domain
ExternalDomain: localhost # ZITADEL_EXTERNALDOMAIN
# ExternalSecure specifies if ZITADEL is exposed externally using HTTPS or HTTP.
@ -291,6 +291,19 @@ Caches:
DisableIndentity: false
# Add suffix to client name. Default is empty.
IdentitySuffix: ""
# Implementation of [Circuit Breaker Pattern](https://learn.microsoft.com/en-us/previous-versions/msp-n-p/dn589784(v=pandp.10)?redirectedfrom=MSDN)
CircuitBreaker:
# Interval when the counters are reset to 0.
# 0 interval never resets the counters until the CB is opened.
Interval: 0
# Amount of consecutive failures permitted
MaxConsecutiveFailures: 5
# The ratio of failed requests out of total requests
MaxFailureRatio: 0.1
# Timeout after opening of the CB, until the state is set to half-open.
Timeout: 60s
# The allowed amount of requests that are allowed to pass when the CB is half-open.
MaxRetryRequests: 1
# Instance caches auth middleware instances, gettable by domain or ID.
Instance:
@ -315,6 +328,16 @@ Caches:
AddSource: true
Formatter:
Format: text
# Organization cache, gettable by primary domain or ID.
Organization:
Connector: ""
MaxAge: 1h
LastUsage: 10m
Log:
Level: error
AddSource: true
Formatter:
Format: text
Machine:
# Cloud-hosted VMs need to specify their metadata endpoint so that the machine can be uniquely identified.
@ -425,6 +448,40 @@ Projections:
# Telemetry data synchronization is not time critical. Setting RequeueEvery to 55 minutes doesn't annoy the database too much.
RequeueEvery: 3300s # ZITADEL_PROJECTIONS_CUSTOMIZATIONS_TELEMETRY_REQUEUEEVERY
Notifications:
# The amount of workers processing the notification request events.
# If set to 0, no notification request events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
Workers: 1 # ZITADEL_NOTIFIACATIONS_WORKERS
# The amount of events a single worker will process in a run.
BulkLimit: 10 # ZITADEL_NOTIFIACATIONS_BULKLIMIT
# Time interval between scheduled notifications for request events
RequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_REQUEUEEVERY
# The amount of workers processing the notification retry events.
# If set to 0, no notification retry events will be handled. This can be useful when running in
# multi binary / pod setup and allowing only certain executables to process the events.
RetryWorkers: 1 # ZITADEL_NOTIFIACATIONS_RETRYWORKERS
# Time interval between scheduled notifications for retry events
RetryRequeueEvery: 2s # ZITADEL_NOTIFIACATIONS_RETRYREQUEUEEVERY
# Only instances are projected, for which at least a projection-relevant event exists within the timeframe
# from HandleActiveInstances duration in the past until the projection's current time
# If set to 0 (default), every instance is always considered active
HandleActiveInstances: 0s # ZITADEL_NOTIFIACATIONS_HANDLEACTIVEINSTANCES
# The maximum duration a transaction remains open
# before it spots left folding additional events
# and updates the table.
TransactionDuration: 1m # ZITADEL_NOTIFIACATIONS_TRANSACTIONDURATION
# Automatically cancel the notification after the amount of failed attempts
MaxAttempts: 3 # ZITADEL_NOTIFIACATIONS_MAXATTEMPTS
# Automatically cancel the notification if it cannot be handled within a specific time
MaxTtl: 5m # ZITADEL_NOTIFIACATIONS_MAXTTL
# Failed attempts are retried after a confogired delay (with exponential backoff).
# Set a minimum and maximum delay and a factor for the backoff
MinRetryDelay: 1s # ZITADEL_NOTIFIACATIONS_MINRETRYDELAY
MaxRetryDelay: 20s # ZITADEL_NOTIFIACATIONS_MAXRETRYDELAY
# Any factor below 1 will be set to 1
RetryDelayFactor: 1.5 # ZITADEL_NOTIFIACATIONS_RETRYDELAYFACTOR
Auth:
# See Projections.BulkLimit
SearchLimit: 1000 # ZITADEL_AUTH_SEARCHLIMIT

View File

@ -69,6 +69,7 @@ func projectionsCmd() *cobra.Command {
type ProjectionsConfig struct {
Destination database.Config
Projections projection.Config
Notifications handlers.WorkerConfig
EncryptionKeys *encryption.EncryptionKeyConfig
SystemAPIUsers map[string]*internal_authz.SystemAPIUser
Eventstore *eventstore.Config
@ -205,6 +206,7 @@ func projections(
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
config.Notifications,
*config.Telemetry,
config.ExternalDomain,
config.ExternalPort,
@ -219,6 +221,7 @@ func projections(
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
client,
)
config.Auth.Spooler.Client = client

27
cmd/setup/39.go Normal file
View File

@ -0,0 +1,27 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 39.sql
deleteStaleOrgFields string
)
type DeleteStaleOrgFields struct {
dbClient *database.DB
}
func (mig *DeleteStaleOrgFields) Execute(ctx context.Context, _ eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, deleteStaleOrgFields)
return err
}
func (mig *DeleteStaleOrgFields) String() string {
return "39_delete_stale_org_fields"
}

6
cmd/setup/39.sql Normal file
View File

@ -0,0 +1,6 @@
DELETE FROM eventstore.fields
WHERE aggregate_type = 'org'
AND aggregate_id NOT IN (
SELECT id
FROM projections.orgs1
);

View File

@ -42,6 +42,7 @@ type Config struct {
DefaultInstance command.InstanceSetup
Machine *id.Config
Projections projection.Config
Notifications handlers.WorkerConfig
Eventstore *eventstore.Config
InitProjections InitProjections
@ -125,6 +126,7 @@ type Steps struct {
s36FillV2Milestones *FillV3Milestones
s37Apps7OIDConfigsBackChannelLogoutURI *Apps7OIDConfigsBackChannelLogoutURI
s38BackChannelLogoutNotificationStart *BackChannelLogoutNotificationStart
s39DeleteStaleOrgFields *DeleteStaleOrgFields
}
func MustNewSteps(v *viper.Viper) *Steps {

View File

@ -169,6 +169,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s36FillV2Milestones = &FillV3Milestones{dbClient: queryDBClient, eventstore: eventstoreClient}
steps.s37Apps7OIDConfigsBackChannelLogoutURI = &Apps7OIDConfigsBackChannelLogoutURI{dbClient: esPusherDBClient}
steps.s38BackChannelLogoutNotificationStart = &BackChannelLogoutNotificationStart{dbClient: esPusherDBClient, esClient: eventstoreClient}
steps.s39DeleteStaleOrgFields = &DeleteStaleOrgFields{dbClient: esPusherDBClient}
err = projection.Create(ctx, projectionDBClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections")
@ -232,6 +233,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s32AddAuthSessionID,
steps.s33SMSConfigs3TwilioAddVerifyServiceSid,
steps.s37Apps7OIDConfigsBackChannelLogoutURI,
steps.s39DeleteStaleOrgFields,
} {
mustExecuteMigration(ctx, eventstoreClient, step, "migration failed")
}
@ -435,6 +437,7 @@ func initProjections(
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
config.Notifications,
*config.Telemetry,
config.ExternalDomain,
config.ExternalPort,
@ -449,6 +452,7 @@ func initProjections(
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
queryDBClient,
)
for _, p := range notify_handler.Projections() {
err := migration.Migrate(ctx, eventstoreClient, p)

View File

@ -54,6 +54,7 @@ type Config struct {
Metrics metrics.Config
Profiler profiler.Config
Projections projection.Config
Notifications handlers.WorkerConfig
Auth auth_es.Config
Admin admin_es.Config
UserAgentCookie *middleware.UserAgentCookieConfig

View File

@ -277,6 +277,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
config.Projections.Customizations["notificationsquotas"],
config.Projections.Customizations["backchannel"],
config.Projections.Customizations["telemetry"],
config.Notifications,
*config.Telemetry,
config.ExternalDomain,
config.ExternalPort,
@ -291,6 +292,7 @@ func startZitadel(ctx context.Context, config *Config, masterKey string, server
keys.SMS,
keys.OIDC,
config.OIDC.DefaultBackChannelLogoutLifetime,
queryDBClient,
)
notification.Start(ctx)

View File

@ -8,12 +8,10 @@
</p>
</div>
<span class="fill-space"></span>
<span class="pos cnsl-secondary-text" *ngIf="!hidePagination"
>{{ pageIndex * pageSize }} - {{ pageIndex * pageSize + pageSize }}
</span>
<span class="pos cnsl-secondary-text" *ngIf="!hidePagination">{{ startIndex }} - {{ endIndex }} </span>
<div class="row" *ngIf="!hidePagination">
<cnsl-form-field class="size">
<mat-select class="paginator-select" [(ngModel)]="pageSize" (selectionChange)="emitChange()">
<mat-select class="paginator-select" [value]="pageSize" (selectionChange)="updatePageSize($event.value)">
<mat-option *ngFor="let sizeOption of pageSizeOptions" [value]="sizeOption">
{{ sizeOption }}
</mat-option>

View File

@ -50,6 +50,15 @@ export class PaginatorComponent {
return temp <= this.length / this.pageSize;
}
get startIndex(): number {
return this.pageIndex * this.pageSize;
}
get endIndex(): number {
const max = this.startIndex + this.pageSize;
return this.length < max ? this.length : max;
}
public emitChange(): void {
this.page.emit({
length: this.length,
@ -58,4 +67,10 @@ export class PaginatorComponent {
pageSizeOptions: this.pageSizeOptions,
});
}
public updatePageSize(newSize: number): void {
this.pageSize = newSize;
this.pageIndex = 0;
this.emitChange();
}
}

View File

@ -31,9 +31,9 @@ export class ProjectRoleDetailDialogComponent {
}
submitForm(): void {
if (this.formGroup.valid && this.key?.value && this.group?.value && this.displayName?.value) {
if (this.formGroup.valid && this.key?.value && this.displayName?.value) {
this.mgmtService
.updateProjectRole(this.projectId, this.key.value, this.displayName.value, this.group.value)
.updateProjectRole(this.projectId, this.key.value, this.displayName.value, this.group?.value)
.then(() => {
this.toast.showInfo('PROJECT.TOAST.ROLECHANGED', true);
this.dialogRef.close(true);

View File

@ -35,8 +35,8 @@
[disabled]="disabled"
color="primary"
(change)="$event ? masterToggle() : null"
[checked]="selection.hasValue() && isAllSelected()"
[indeterminate]="selection.hasValue() && !isAllSelected()"
[checked]="isAnySelected() && isAllSelected()"
[indeterminate]="isAnySelected() && !isAllSelected()"
>
</mat-checkbox>
</div>
@ -76,7 +76,7 @@
class="role state"
[ngClass]="{ 'no-selection': !selectionAllowed }"
*ngIf="role.group"
(click)="selectionAllowed ? selectAllOfGroup(role.group) : openDetailDialog(role)"
(click)="selectionAllowed ? groupMasterToggle(role.group) : openDetailDialog(role)"
[matTooltip]="selectionAllowed ? ('PROJECT.ROLE.SELECTGROUPTOOLTIP' | translate: role) : null"
>{{ role.group }}</span
>
@ -135,7 +135,7 @@
#paginator
[timestamp]="dataSource.viewTimestamp"
[length]="dataSource.totalResult"
[pageSize]="50"
[pageSize]="INITIAL_PAGE_SIZE"
(page)="changePage()"
[pageSizeOptions]="[25, 50, 100, 250]"
>

View File

@ -18,6 +18,7 @@ import { ProjectRolesDataSource } from './project-roles-table-datasource';
styleUrls: ['./project-roles-table.component.scss'],
})
export class ProjectRolesTableComponent implements OnInit {
public INITIAL_PAGE_SIZE: number = 50;
@Input() public projectId: string = '';
@Input() public grantId: string = '';
@Input() public disabled: boolean = false;
@ -43,41 +44,58 @@ export class ProjectRolesTableComponent implements OnInit {
}
public ngOnInit(): void {
this.dataSource.loadRoles(this.projectId, this.grantId, 0, 25, 'asc');
this.dataSource.rolesSubject.subscribe((roles) => {
const selectedRoles: Role.AsObject[] = roles.filter((role) => this.selectedKeys.includes(role.key));
this.selection.select(...selectedRoles.map((r) => r.key));
});
this.loadRolesPage();
this.selection.select(...this.selectedKeys);
this.selection.changed.subscribe(() => {
this.changedSelection.emit(this.selection.selected);
});
}
public selectAllOfGroup(group: string): void {
const groupRoles: Role.AsObject[] = this.dataSource.rolesSubject.getValue().filter((role) => role.group === group);
this.selection.select(...groupRoles.map((r) => r.key));
}
private loadRolesPage(): void {
this.dataSource.loadRoles(this.projectId, this.grantId, this.paginator?.pageIndex ?? 0, this.paginator?.pageSize ?? 25);
this.dataSource.loadRoles(
this.projectId,
this.grantId,
this.paginator?.pageIndex ?? 0,
this.paginator?.pageSize ?? this.INITIAL_PAGE_SIZE,
);
}
public changePage(): void {
this.loadRolesPage();
}
public isAllSelected(): boolean {
const numSelected = this.selection.selected.length;
const numRows = this.dataSource.totalResult;
return numSelected === numRows;
private listIsAllSelected(list: string[]): boolean {
return list.findIndex((key) => !this.selection.isSelected(key)) == -1;
}
private listIsAnySelected(list: string[]): boolean {
return list.findIndex((key) => this.selection.isSelected(key)) != -1;
}
private listMasterToggle(list: string[]): void {
if (this.listIsAllSelected(list)) this.selection.deselect(...list);
else this.selection.select(...list);
}
private compilePageKeys(): string[] {
return this.dataSource.rolesSubject.value.map((role) => role.key);
}
public masterToggle(): void {
this.isAllSelected()
? this.selection.clear()
: this.dataSource.rolesSubject.value.forEach((row: Role.AsObject) => this.selection.select(row.key));
this.listMasterToggle(this.compilePageKeys());
}
public isAllSelected(): boolean {
return this.listIsAllSelected(this.compilePageKeys());
}
public isAnySelected(): boolean {
return this.listIsAnySelected(this.compilePageKeys());
}
public groupMasterToggle(group: string): void {
this.listMasterToggle(this.dataSource.rolesSubject.value.filter((role) => role.group == group).map((role) => role.key));
}
public deleteRole(role: Role.AsObject): void {
@ -93,45 +111,28 @@ export class ProjectRolesTableComponent implements OnInit {
dialogRef.afterClosed().subscribe((resp) => {
if (resp) {
const index = this.dataSource.rolesSubject.value.findIndex((iter) => iter.key === role.key);
this.mgmtService.removeProjectRole(this.projectId, role.key).then(() => {
this.toast.showInfo('PROJECT.TOAST.ROLEREMOVED', true);
if (index > -1) {
this.dataSource.rolesSubject.value.splice(index, 1);
this.dataSource.rolesSubject.next(this.dataSource.rolesSubject.value);
}
this.loadRolesPage();
});
}
});
}
public removeRole(role: Role.AsObject, index: number): void {
this.mgmtService
.removeProjectRole(this.projectId, role.key)
.then(() => {
this.toast.showInfo('PROJECT.TOAST.ROLEREMOVED', true);
this.dataSource.rolesSubject.value.splice(index, 1);
this.dataSource.rolesSubject.next(this.dataSource.rolesSubject.value);
})
.catch((error) => {
this.toast.showError(error);
});
}
public openDetailDialog(role: Role.AsObject): void {
this.dialog.open(ProjectRoleDetailDialogComponent, {
const dialogRef = this.dialog.open(ProjectRoleDetailDialogComponent, {
data: {
role,
projectId: this.projectId,
},
width: '400px',
});
dialogRef.afterClosed().subscribe(() => this.loadRolesPage());
}
public refreshPage(): void {
this.dataSource.loadRoles(this.projectId, this.grantId, this.paginator?.pageIndex ?? 0, this.paginator?.pageSize ?? 25);
this.loadRolesPage();
}
public get selectionAllowed(): boolean {

View File

@ -4537,9 +4537,9 @@ critters@0.0.20:
pretty-bytes "^5.3.0"
cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.3"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
version "7.0.6"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
dependencies:
path-key "^3.1.0"
shebang-command "^2.0.0"
@ -6042,9 +6042,9 @@ http-proxy-agent@^5.0.0:
debug "4"
http-proxy-middleware@^2.0.3:
version "2.0.6"
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f"
integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==
version "2.0.7"
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6"
integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==
dependencies:
"@types/http-proxy" "^1.17.8"
http-proxy "^1.18.1"

View File

@ -0,0 +1,77 @@
<!--
query data from output.csv:
Note: you might need to adjust the WHERE clause to only filter the required trends and the current placeholders
Warning: it's currently only possible to show data of one endpoint
```
copy (SELECT
metric_name
, to_timestamp(timestamp::DOUBLE) as timestamp
, approx_quantile(metric_value, 0.50) AS p50
, approx_quantile(metric_value, 0.95) AS p95
, approx_quantile(metric_value, 0.99) AS p99
FROM
read_csv('/path/to/k6-output.csv', auto_detect=false, delim=',', quote='"', escape='"', new_line='\n', skip=0, comment='', header=true, columns={'metric_name': 'VARCHAR', 'timestamp': 'BIGINT', 'metric_value': 'DOUBLE', 'check': 'VARCHAR', 'error': 'VARCHAR', 'error_code': 'VARCHAR', 'expected_response': 'BOOLEAN', 'group': 'VARCHAR', 'method': 'VARCHAR', 'name': 'VARCHAR', 'proto': 'VARCHAR', 'scenario': 'VARCHAR', 'service': 'VARCHAR', 'status': 'BIGINT', 'subproto': 'VARCHAR', 'tls_version': 'VARCHAR', 'url': 'VARCHAR', 'extra_tags': 'VARCHAR', 'metadata': 'VARCHAR'})
WHERE
metric_name LIKE '%_duration'
GROUP BY
metric_name
, timestamp
ORDER BY
metric_name
, timestamp
) to 'output.json' (ARRAY);
```
-->
## Summary
TODO: describe the outcome of the test?
## Performance test results
| Metric | Value |
|-:-------------------------------------|-:-----|
| Baseline | none |
| Purpose | |
| Test start | UTC |
| Test duration | 30min |
| Executed test | |
| k6 version | |
| VUs | |
| Client location | |
| Client machine specification | vCPU: <br/> memory: Gb |
| ZITADEL location | |
| ZITADEL container specification | vCPU: <br/> Memory: Gb <br/>Container count: |
| ZITADEL Version | |
| ZITADEL Configuration | |
| ZITADEL feature flags | |
| Database | type: crdb / psql<br />version: |
| Database location | |
| Database specification | vCPU: <br/> memory: Gb |
| ZITADEL metrics during test | |
| Observed errors | |
| Top 3 most expensive database queries | |
| Database metrics during test | |
| k6 Iterations per second | |
| k6 overview | |
| k6 output | |
| flowchart outcome | |
## Endpoint latencies
import OutputSource from "!!raw-loader!./output.json";
import { BenchmarkChart } from '/src/components/benchmark_chart';
<BenchmarkChart testResults={OutputSource} />
## k6 output {#k6-output}
```bash
TODO: add summary of k6
```

View File

@ -0,0 +1,111 @@
---
title: Benchmarks
sidebar_label: Benchmarks
---
import DocCardList from '@theme/DocCardList';
Benchmarks are crucial to understand if ZITADEL fulfills your expected workload and what resources it needs to do so.
This document explains the process and goals of load-testing zitadel in a cloud environment.
The results can be found on sub pages.
## Goals
The primary goal is to assess if ZITADEL can scale to required proportion. The goals might change over time and maturity of ZITADEL. At the moment the goal is to assess how the applications performance scales. There are some concrete goals we have to meet:
1. [https://github.com/zitadel/zitadel/issues/8352](https://github.com/zitadel/zitadel/issues/8352) defines 1000 JWT profile auth/sec
2. [https://github.com/zitadel/zitadel/issues/4424](https://github.com/zitadel/zitadel/issues/4424) defines 1200 logins / sec.
## Procedure
First we determine the “target” of our load-test. The target is expressed as a make recipe in the load-test [Makefile](https://github.com/zitadel/zitadel/blob/main/load-test/Makefile). See also the load-test [readme](https://github.com/zitadel/zitadel/blob/main/load-test/README.md) on how to configure and run load-tests.
A target should be tested for longer periods of time, as it might take time for certain metrics to show up. For example, cloud SQL samples query insights. A runtime of at least **30 minutes** is advised at the moment.
After each iteration of load-test, we should consult the [After test procedure](#after-test-procedure) to conclude an outcome:
1. Scale
2. Log potential issuer and scale
3. Terminate testing and resolve issues
## Methodology
### Benchmark definition
Tests are implemented in the ecosystem of [k6](https://k6.io). The tests are publicly available in the [zitadel repository](https://github.com/zitadel/zitadel/tree/main/load-test). Custom extensions of k6 are implemented in the [xk6-modules repository](https://github.com/zitadel/xk6-modules).
The tests must at least measure the request duration for each API call. This gives an indication on how zitadel behaves over the duration of the load test.
### Metrics
The following metrics must be collected for each test iteration. The metrics are used to follow the decision path of the [After test procedure](https://drive.google.com/open?id=1WVr7aA8dGgV1zd2jUg1y1h_o37mkZF2O6M5Mhafn_NM):
| Metric | Type | Description | Unit |
| :---- | :---- | :---- | :---- |
| Baseline | Comparison | Defines the baseline the test is compared against. If not specified the baseline defined in this document is used. | Link to test result |
| Purpose | Description | Description what should been proved with this test run | text
| Test start | Setup | Timestamp when the test started. This is useful for gathering additional data like metrics or logs later | Date |
| Test duration | Setup | Duration of the test | Duration |
| Executed test | Setup | Name of the make recipe executed. Further information about specific test cases can be found [here](?tab=t.0#heading=h.xav4f3s5r2f3). | Name of the make recipe |
| k6 version | Setup | Version of the test client (k6) used | semantic version |
| VUs | Setup | Virtual Users which execute the test scenario in parallel | Number |
| Client location | Setup | Region or location of the machine which executed the test client. If not further specified the hoster is Google Cloud | Location / Region |
| Client machine specification | Setup | Definition of the client machine the test client ran on. The resources of the machine could be maxed out during tests therefore we collect this metric as well. The description must at least clarify the following metrics: vCPU Memory egress bandwidth | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps |
| ZITADEL location | Setup | Region or location of the deployment of zitadel. If not further specified the hoster is Google Cloud | Location / Region |
| ZITADEL container specification | Setup | As ZITADEL is mainly run in cloud environments it should also be run as a container during the load tests. The description must at least clarify the following metrics: vCPU Memory egress bandwidth Scale | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps **scale**: The amount of containers running during the test. The amount must not vary during the tests |
| ZITADEL Version | Setup | The version of zitadel deployed | Semantic version or commit |
| ZITADEL Configuration | Setup | Configuration of zitadel which deviates from the defaults and is not secret | yaml |
| ZITADEL feature flags | Setup | Changed feature flags | yaml |
| Database | Setup | Database type and version | **type**: crdb / psql **version**: semantic version |
| Database location | Setup | Region or location of the deployment of the database. If not further specified the hoster is Google Cloud SQL | Location / Region |
| Database specification | Setup | The description must at least clarify the following metrics: vCPU, Memory and egress bandwidth (Scale) | **vCPU**: Amount of threads ([additional info](https://cloud.google.com/compute/docs/cpu-platforms)) **memory**: GB **egress bandwidth**:Gbps **scale**: Amount of crdb nodes if crdb is used |
| ZITADEL metrics during test | Result | This metric helps understanding the bottlenecks of the executed test. At least the following metrics must be provided: CPU usage Memory usage | **CPU usage** in percent **Memory usage** in percent |
| Observed errors | Result | Errors worth mentioning, mostly unexpected errors | description |
| Top 3 most expensive database queries | Result | The execution plan of the top 3 most expensive database queries during the test execution | database execution plan |
| Database metrics during test | Result | This metric helps understanding the bottlenecks of the executed test. At least the following metrics must be provided: CPU usage Memory usage | **CPU usage** in percent **Memory usage** in percent |
| k6 Iterations per second | Result | How many test iterations were done per second | Number |
| k6 overview | Result | Shows some basic metrics aggregated over the test run At least the following metrics must be included: duration per request (min, max, avg, p50, p95, p99) VUS For simplicity just add the whole test result printed to the terminal | terminal output |
| k6 output | Result | Trends and metrics generated during the test, this contains detailed information for each step executed during each iteration | csv |
### Test setup
#### Make recipes
Details about the tests implemented can be found in [this readme](https://github.com/zitadel/zitadel/blob/main/load-test/README.md#test).
### Test conclusion
After each iteration of load-test, we should consult the [Flowchart](#after-test-procedure) to conclude an outcome:
1. [Scale](#scale)
2. [Log potential issue and scale](#potential-issues)
3. [Terminate testing](#termination) and resolve issues
#### Scale {#scale}
An outcome of scale means that the service hit some kind of resource limit, like CPU or RAM which can be increased. In such cases we increase the suggested parameter and rerun the load-test for the same target. On the next test we should analyse if the increase in scale resulted in a performance improvement proportional to the scale parameter. For example if we scale from 1 to 2 containers, it might be reasonable to expect a doubling of iterations / sec. If such an increase is not noticed, there might be another bottleneck or unlying issue, such as locking.
#### Potential issues {#potential-issues}
A potential issue has an impact on performance, but does not prevent us to scale. Such issues must be logged in GH issues and load-testing can continue. The issue can be resolved at a later time and the load-tests repeated when it is. This is primarily for issues which require big changes to ZITADEL.
#### Termination {#termination}
Scaling no longer improves iterations / second, or some kind of critical error or bug is experienced. The root cause of the issue must be resolved before we can continue with increasing scale.
### After test procedure
This flowchart shows the procedure after running a test.
![Flowchart](/img/benchmark/Flowchart.svg)
## Baseline
Will be established as soon as the goal described above is reached.
## Test results
This chapter provides a table linking to the detailed test results.
<DocCardList />

View File

@ -0,0 +1,75 @@
---
title: machine jwt profile grant benchmark of zitadel v2.65.0
sidebar_label: machine jwt profile grant
---
## Summary
Tests are halted after this test run because of too many [client read events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/wait-event.clientread.html) on the database.
## Performance test results
| Metric | Value |
| :---- | :---- |
| Baseline | none |
| Test start | 22-10-2024 16:20 UTC |
| Test duration | 30min |
| Executed test | machine\_jwt\_profile\_grant |
| k6 version | v0.54.0 |
| VUs | 50 |
| Client location | US1 |
| Client machine specification | e2-high-cpu-4 |
| Zitadel location | US1 |
| Zitadel container specification | vCPUs: 2<br/>Memory: 512 MiB<br/>Container count: 2 |
| Zitadel feature flags | none |
| Database | postgres v15 |
| Database location | US1 |
| Database specification | vCPUs: 4<br/>Memory: 16 GiB |
| Zitadel metrics during test | |
| Observed errors | Many client read events during push |
| Top 3 most expensive database queries | 1: Query events `instance_id = $1 AND aggregate_type = $2 AND aggregate_id = $3 AND event_type = ANY($4)`<br/>2: latest sequence query during push events<br/>3: writing events during push (caused lock wait events) |
| k6 iterations per second | 193 |
| k6 overview | [output](#k6-output) |
| flowchart outcome | Halt tests, must resolve an issue |
## /token endpoint latencies
import OutputSource from "!!raw-loader!./output.json";
import { BenchmarkChart } from '/src/components/benchmark_chart';
<BenchmarkChart testResults={OutputSource} />
## k6 output {#k6-output}
```bash
checks...............................: 100.00% ✓ 695739 ✗ 0
data_received........................: 479 MB 265 kB/s
data_sent............................: 276 MB 153 kB/s
http_req_blocked.....................: min=178ns avg=5µs max=119.8ms p(50)=460ns p(95)=702ns p(99)=921ns
http_req_connecting..................: min=0s avg=1.24µs max=43.45ms p(50)=0s p(95)=0s p(99)=0s
http_req_duration....................: min=18ms avg=255.3ms max=1.22s p(50)=241.56ms p(95)=479.19ms p(99)=600.92ms
{ expected_response:true }.........: min=18ms avg=255.3ms max=1.22s p(50)=241.56ms p(95)=479.19ms p(99)=600.92ms
http_req_failed......................: 0.00% ✓ 0 ✗ 347998
http_req_receiving...................: min=25.92µs avg=536.96µs max=401.94ms p(50)=89.44µs p(95)=2.39ms p(99)=11.12ms
http_req_sending.....................: min=24.01µs avg=63.86µs max=4.48ms p(50)=60.97µs p(95)=88.69µs p(99)=141.74µs
http_req_tls_handshaking.............: min=0s avg=2.8µs max=51.05ms p(50)=0s p(95)=0s p(99)=0s
http_req_waiting.....................: min=17.65ms avg=254.7ms max=1.22s p(50)=240.88ms p(95)=478.6ms p(99)=600.6ms
http_reqs............................: 347998 192.80552/s
iteration_duration...................: min=33.86ms avg=258.77ms max=1.22s p(50)=245ms p(95)=482.61ms p(99)=604.32ms
iterations...........................: 347788 192.689171/s
login_ui_enter_login_name_duration...: min=218.61ms avg=218.61ms max=218.61ms p(50)=218.61ms p(95)=218.61ms p(99)=218.61ms
login_ui_enter_password_duration.....: min=18ms avg=18ms max=18ms p(50)=18ms p(95)=18ms p(99)=18ms
login_ui_init_login_duration.........: min=90.96ms avg=90.96ms max=90.96ms p(50)=90.96ms p(95)=90.96ms p(99)=90.96ms
login_ui_token_duration..............: min=140.02ms avg=140.02ms max=140.02ms p(50)=140.02ms p(95)=140.02ms p(99)=140.02ms
oidc_token_duration..................: min=29.85ms avg=255.38ms max=1.22s p(50)=241.61ms p(95)=479.23ms p(99)=600.95ms
org_create_org_duration..............: min=64.51ms avg=64.51ms max=64.51ms p(50)=64.51ms p(95)=64.51ms p(99)=64.51ms
user_add_machine_key_duration........: min=44.93ms avg=87.89ms max=159.52ms p(50)=84.43ms p(95)=144.59ms p(99)=155.54ms
user_create_machine_duration.........: min=65.75ms avg=266.53ms max=421.58ms p(50)=276.59ms p(95)=380.84ms p(99)=414.43ms
vus..................................: 0 min=0 max=50
vus_max..............................: 50 min=50 max=50
running (30m04.9s), 00/50 VUs, 347788 complete and 0 interrupted iterations
default ✓ [======================================] 50 VUs 30m0s
```

File diff suppressed because it is too large Load Diff

View File

@ -75,8 +75,8 @@ To install run:
```bash
flutter pub add http
flutter pub add flutter_web_auth_2
flutter pub add flutter_secure_storage
flutter pub add oidc
flutter pub add oidc_default_store
```
#### Setup for Android

View File

@ -10,7 +10,7 @@ import TokenExchangeResponse from "../../apis/openidoauth/_token_exchange_respon
The Token Exchange grant implements [RFC 8693, OAuth 2.0 Token Exchange](https://www.rfc-editor.org/rfc/rfc8693) and can be used to exchange tokens to a different scope, audience or subject. Changing the subject of an authenticated token is called impersonation or delegation. This guide will explain how token exchange is implemented inside ZITADEL and gives some usage examples.
:::info
Token Exchange is currently an experimental beta](/docs/support/software-release-cycles-support#beta) feature. Be sure to enable it on the [feature API](#feature-api) before using it.
Token Exchange is currently an [experimental beta](/docs/support/software-release-cycles-support#beta) feature. Be sure to enable it on the [feature API](#feature-api) before using it.
:::
In this guide we assume that the application performing the token exchange is already in possession of tokens. You should already have a good understanding on the following topics before starting with this guide:

View File

@ -6,7 +6,7 @@ The ZITADEL API has different possibilities to create users.
This can be used, if you are building your own registration page.
Use the following API call to create your users:
[Create User (Human)](/apis/resources/mgmt/management-service-import-human-user.api.mdx)
[Create User (Human)](apis/resources/user_service_v2/user-service-add-human-user.api.mdx)
## With Username and Password

View File

@ -0,0 +1,253 @@
---
title: Caches
sidebar_label: Caches
---
ZITADEL supports the use of a caches to speed up the lookup of frequently needed objects. As opposed to HTTP caches which might reside between ZITADEL and end-user applications, the cache build into ZITADEL uses active invalidation when an object gets updated. Another difference is that HTTP caches only cache the result of a complete request and the built-in cache stores objects needed for the internal business logic. For example, each request made to ZITADEL needs to retrieve and set [instance](/docs/concepts/structure/instance) information in middleware.
:::info
Caches is currently an [experimental beta](/docs/support/software-release-cycles-support#beta) feature.
:::
## Configuration
The `Caches` configuration entry defines *connectors* which can be used by several objects. It is possible to mix *connectors* with different objects based on operational needs.
```yaml
Caches:
Connectors:
SomeConnector:
Enabled: true
SomeOption: foo
SomeObject:
# Connector must be enabled above.
# When connector is empty, this cache will be disabled.
Connector: "SomeConnector"
MaxAge: 1h
LastUsage: 10m
# Log enables cache-specific logging. Default to error log to stderr when omitted.
Log:
Level: error
```
For a full configuration reference, please see the [runtime configuration file](/docs/self-hosting/manage/configure#runtime-configuration-file) section's `defaults.yaml`.
## Connectors
ZITADEL supports a number of *connectors*. Connectors integrate a cache with a storage backend. Users can combine connectors with the type of object cache depending on their operational and performance requirements.
When no connector is specified for an object cache, then no caching is performed. This is the current default.
### Auto prune
Some connectors take an `AutoPrune` option. This is provided for caches which don't have built-in expiry and cleanup routines. The auto pruner is a routine launched by ZITADEL and scans and removes outdated objects in the cache. Pruning can take a cost as they typically involve some kind of scan. However, using a long interval can cause higher storage utilization.
```yaml
Caches:
Connectors:
Memory:
Enabled: true
# AutoPrune removes invalidated or expired object from the cache.
AutoPrune:
Interval: 1m
TimeOut: 5s
```
### Redis cache
Redis is supported in simple mode. Cluster and Sentinel are not yet supported. There is also a circuit-breaker provided which prevents a single point of failure, should the single Redis instance become unavailable.
Benefits:
- Centralized cache with single source of truth
- Consistent invalidation
- Very fast when network latency is kept to a minimum
- Built-in object expiry, no pruner required
Drawbacks:
- Increased operational overhead: need to run a Redis instance as part of your infrastructure.
- When running multiple servers of ZITADEL in different regions, network roundtrip time might impact performance, neutralizing the benefit of a cache.
#### Circuit breaker
A [circuit breaker](https://learn.microsoft.com/en-us/previous-versions/msp-n-p/dn589784(v=pandp.10)?redirectedfrom=MSDN) is provided for the Redis connector, to prevent a single point of failure in the case persistent errors. When the circuit breaker opens, the cache is temporary disabled and ignored. ZITADEL will continue to operate using queries to the database.
```yaml
Caches:
Connectors:
Redis:
Enabled: true
Addr: localhost:6379
# Many other options...
CircuitBreaker:
# Interval when the counters are reset to 0.
# 0 interval never resets the counters until the CB is opened.
Interval: 0
# Amount of consecutive failures permitted
MaxConsecutiveFailures: 5
# The ratio of failed requests out of total requests
MaxFailureRatio: 0.1
# Timeout after opening of the CB, until the state is set to half-open.
Timeout: 60s
# The allowed amount of requests that are allowed to pass when the CB is half-open.
MaxRetryRequests: 1
```
### PostgreSQL cache
PostgreSQL can be used to store objects in unlogged tables. [Unlogged tables](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-UNLOGGED) do not write to the WAL log and are therefore faster than regular tables. If the PostgreSQL server crashes, the data from those tables are lost. ZITADEL always creates the cache schema in the `zitadel` database during [setup](./updating_scaling#the-setup-phase). This connector requires a [pruner](#auto-prune) routine.
Benefits:
- Centralized cache with single source of truth
- No operational overhead. Reuses the query connection pool and the existing `zitadel` database.
- Consistent invalidation
- Faster than regular queries which often contain `JOIN` clauses.
Drawbacks:
- Slowest of the available caching options
- Might put additional strain on the database server, limiting horizontal scalability
- CockroachDB does not support unlogged tables. When this connector is enabled against CockroachDB, it does work but little to no performance benefit is to be expected.
### Local memory cache
ZITADEL is capable of caching object in local application memory, using hash-maps. Each ZITADEL server manages its own copy of the cache. This connector requires a [pruner](#auto-prune) routine.
Benefits:
- Fastest of the available caching options
- No operational overhead
Drawbacks:
- Inconsistent invalidation. An object validated in one ZITADEL server will not get invalidated in other servers.
- There's no single source of truth. Different servers may operate on a different version of an object
- Data is duplicated in each server, consuming more total memory inside a deployment.
The drawbacks restricts its usefulness in distributed deployments. However simple installations running a single server can benefit greatly from this type of cache. For example test, development or home deployments.
If inconsistency is acceptable for short periods of time, one can choose to use this type of cache in distributed deployments with short max age configuration.
**For example**: A ZITADEL deployment with 2 servers is serving 1000 req/sec total. The installation only has one instance[^1]. There is only a small amount of data cached (a few kB) so duplication is not a problem in this case. It is acceptable for [instance level setting](/docs/guides/manage/console/default-settings) to be out-dated for a short amount of time. When the memory cache is enabled for the instance objects, with a max age of 1 second, the instance only needs to be obtained from the database 2 times per second (once for each server). Saving 998 of redundant queries. Once an instance level setting is changed, it takes up to 1 second for all the servers to get the new state.
## Objects
The following section describes the type of objects ZITADEL can currently cache. Objects are actively invalidated at the cache backend when one of their properties is changed. Each object cache defines:
- `Connector`: Selects the used [connector](#connectors) back-end. Must be activated first.
- `MaxAge`: the amount of time that an object is considered valid. When this age is passed the object is ignored (cache miss) and possibly cleaned up by the [pruner](#auto-prune) or other built-in garbage collection.
- `LastUsage`: defines usage based lifetime. Each time an object is used, its usage timestamp is updated. Popular objects remain cached, while unused objects are cleaned up. This option can be used to indirectly limit the size of the cache.
- `Log`: allows specific log settings for the cache. This can be used to debug a certain cache without having to change the global log level.
```yaml
Caches:
SomeObject:
# Connector must be enabled above.
# When connector is empty, this cache will be disabled.
Connector: ""
MaxAge: 1h
LastUsage: 10m
# Log enables cache-specific logging. Default to error log to stderr when omitted.
Log:
Level: error
AddSource: true
Formatter:
Format: text
```
### Instance
All HTTP and gRPC requests sent to ZITADEL receive an instance context. The instance is usually resolved by the domain from the request. In some cases, like the [system service](/docs/apis/resources/system/system-service), the instance can be resolved by its ID. An instance object contains many of the [default settings](/docs/guides/manage/console/default-settings):
- Instance [features](/docs/guides/manage/console/default-settings#features)
- Instance domains: generated and [custom](/docs/guides/manage/cloud/instances#add-custom-domain)
- [Trusted domains](/docs/apis/resources/admin/admin-service-add-instance-trusted-domain)
- Security settings ([IFrame policy](/docs/guides/solution-scenarios/configurations#embedding-zitadel-in-an-iframe))
- Limits[^2]
- [Allowed languages](/docs/guides/manage/console/default-settings#languages)
These settings typically change infrequently in production. ***Every*** request made to ZITADEL needs to query for the instance. This is a typical case of set once, get many times where a cache can provide a significant optimization.
### Milestones
Milestones are used to track the administrator's progress in setting up their instance. Milestones are used to render *your next steps* in the [console](/docs/guides/manage/console/overview) landing page.
Milestones are reached upon the first time a certain action is performed. For example the first application created or the first human login. In order to push a "reached" event only once, ZITADEL must keep track of the current state of milestones by an eventstore query every time an eligible action is performed. This can cause an unwanted overhead on production servers, therefore they are cached.
As an extra optimization, once all milestones are reached by the instance, an in-memory flag is set and the milestone state is never queried again from the database nor cache.
For single instance setups which fulfilled all milestone (*your next steps* in console) it is not needed to enable this cache. We mainly use it for ZITADEL cloud where there are many instances with *incomplete* milestones.
### Organization
Most resources like users, project and applications are part of an [organization](/docs/concepts/structure/organizations). Therefore many parts of the ZITADEL logic search for an organization by ID or by their primary domain.
Organization objects are quite small and receive infrequent updates after they are created:
- Change of organization name
- Deactivation / Reactivation
- Change of primary domain
- Removal
## Examples
Currently caches are in beta and disabled by default. However, if you want to give caching a try, the following sections contains some suggested configurations for different setups.
The following configuration is recommended for single instance setups with a single ZITADEL server:
```yaml
Caches:
Memory:
Enabled: true
Instance:
Connector: "memory"
MaxAge: 1h
Organization:
Connector: "memory"
MaxAge: 1h
```
The following configuration is recommended for single instance setups with high traffic on multiple servers, where Redis is not available:
```yaml
Caches:
Memory:
Enabled: true
Postgres:
Enabled: true
Instance:
Connector: "memory"
MaxAge: 1s
Milestones:
Connector: "postgres"
MaxAge: 1h
LastUsage: 10m
Organization:
Connector: "memory"
MaxAge: 1s
```
When running many instances on multiple servers:
```yaml
Caches:
Connectors:
Redis:
Enabled: true
# Other connection options
Instance:
Connector: "redis"
MaxAge: 1h
LastUsage: 10m
Milestones:
Connector: "redis"
MaxAge: 1h
LastUsage: 10m
Organization:
Connector: "redis"
MaxAge: 1h
LastUsage: 10m
```
----
[^1]: Many deployments of ZITADEL have only one or few [instances](/docs/concepts/structure/instance). Multiple instances are mostly used for ZITADEL cloud, where each customer gets at least one instance.
[^2]: Limits are imposed by the system API, usually when customers exceed their subscription in ZITADEL cloud.

View File

@ -289,7 +289,7 @@ module.exports = {
outputDir: "docs/apis/resources/user_service_v2",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
categoryLinkSource: "auto",
},
},
session_v2: {
@ -297,7 +297,7 @@ module.exports = {
outputDir: "docs/apis/resources/session_service_v2",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
categoryLinkSource: "auto",
},
},
oidc_v2: {
@ -305,7 +305,7 @@ module.exports = {
outputDir: "docs/apis/resources/oidc_service_v2",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
categoryLinkSource: "auto",
},
},
settings_v2: {
@ -313,7 +313,7 @@ module.exports = {
outputDir: "docs/apis/resources/settings_service_v2",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
categoryLinkSource: "auto",
},
},
user_schema_v3: {

View File

@ -44,6 +44,7 @@
"react": "^18.2.0",
"react-copy-to-clipboard": "^5.1.0",
"react-dom": "^18.2.0",
"react-google-charts": "^5.2.1",
"react-player": "^2.15.1",
"sitemap": "7.1.1",
"swc-loader": "^0.2.3",

View File

@ -841,6 +841,30 @@ module.exports = {
label: "Rate Limits (Cloud)", // The link label
href: "/legal/policies/rate-limit-policy", // The internal path
},
{
type: "category",
label: "Benchmarks",
collapsed: false,
link: {
type: "doc",
id: "apis/benchmarks/index",
},
items: [
{
type: "category",
label: "v2.65.0",
link: {
title: "v2.65.0",
slug: "/apis/benchmarks/v2.65.0",
description:
"Benchmark results of Zitadel v2.65.0\n"
},
items: [
"apis/benchmarks/v2.65.0/machine_jwt_profile_grant/index",
],
},
],
},
],
selfHosting: [
{
@ -889,6 +913,7 @@ module.exports = {
"self-hosting/manage/http2",
"self-hosting/manage/tls_modes",
"self-hosting/manage/database/database",
"self-hosting/manage/cache",
"self-hosting/manage/updating_scaling",
"self-hosting/manage/usage_control",
{

View File

@ -0,0 +1,45 @@
import React from "react";
import Chart from "react-google-charts";
export function BenchmarkChart(testResults=[], height='500px') {
const options = {
legend: { position: 'bottom' },
focusTarget: 'category',
hAxis: {
title: 'timestamp',
},
vAxis: {
title: 'latency (ms)',
},
};
const data = [
[
{type:"datetime", label: "timestamp"},
{type:"number", label: "p50"},
{type:"number", label: "p95"},
{type:"number", label: "p99"},
],
]
JSON.parse(testResults.testResults).forEach((result) => {
data.push([
new Date(result.timestamp),
result.p50,
result.p95,
result.p99,
])
});
return (
<Chart
chartType="LineChart"
width="100%"
height="500px"
options={options}
data={data}
legendToggle
/>
);
}

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 439 KiB

View File

@ -3909,9 +3909,9 @@ cross-fetch@3.1.5:
node-fetch "2.6.7"
cross-spawn@^7.0.0, cross-spawn@^7.0.3:
version "7.0.3"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
version "7.0.6"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
dependencies:
path-key "^3.1.0"
shebang-command "^2.0.0"
@ -9479,6 +9479,11 @@ react-fast-compare@^3.0.1, react-fast-compare@^3.2.0, react-fast-compare@^3.2.2:
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.2.tgz#929a97a532304ce9fee4bcae44234f1ce2c21d49"
integrity sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==
react-google-charts@^5.2.1:
version "5.2.1"
resolved "https://registry.yarnpkg.com/react-google-charts/-/react-google-charts-5.2.1.tgz#d9cbe8ed45d7c0fafefea5c7c3361bee76648454"
integrity sha512-mCbPiObP8yWM5A9ogej7Qp3/HX4EzOwuEzUYvcfHtL98Xt4V/brD14KgfDzSNNtyD48MNXCpq5oVaYKt0ykQUQ==
react-helmet-async@*:
version "2.0.5"
resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-2.0.5.tgz#cfc70cd7bb32df7883a8ed55502a1513747223ec"

1
go.mod
View File

@ -56,6 +56,7 @@ require (
github.com/redis/go-redis/v9 v9.7.0
github.com/rs/cors v1.11.1
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
github.com/sony/gobreaker/v2 v2.0.0
github.com/sony/sonyflake v1.2.0
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.19.0

2
go.sum
View File

@ -670,6 +670,8 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sony/gobreaker/v2 v2.0.0 h1:23AaR4JQ65y4rz8JWMzgXw2gKOykZ/qfqYunll4OwJ4=
github.com/sony/gobreaker/v2 v2.0.0/go.mod h1:8JnRUz80DJ1/ne8M8v7nmTs2713i58nIt4s7XcGe/DI=
github.com/sony/sonyflake v1.2.0 h1:Pfr3A+ejSg+0SPqpoAmQgEtNDAhc2G1SUYk205qVMLQ=
github.com/sony/sonyflake v1.2.0/go.mod h1:LORtCywH/cq10ZbyfhKrHYgAUGH7mOBa76enV9txy/Y=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_GetSecurityPolicy(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
adminCtx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -72,8 +70,6 @@ func TestServer_GetSecurityPolicy(t *testing.T) {
}
func TestServer_SetSecurityPolicy(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
adminCtx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -21,8 +21,6 @@ import (
)
func TestServer_Restrictions_DisallowPublicOrgRegistration(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
regOrgUrl, err := url.Parse("http://" + instance.Domain + ":8080/ui/login/register/org")
require.NoError(t, err)

View File

@ -24,8 +24,6 @@ import (
)
func TestServer_Restrictions_AllowedLanguages(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
defer cancel()

View File

@ -42,8 +42,6 @@ func TestMain(m *testing.M) {
}
func TestServer_AddOrganization(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(CTX, Instance.DefaultOrg.Id)
tests := []struct {

View File

@ -27,8 +27,6 @@ type orgAttr struct {
}
func TestServer_ListOrganizations(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *org.ListOrganizationsRequest

View File

@ -26,7 +26,6 @@ import (
)
func TestServer_ExecutionTarget(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -25,7 +25,6 @@ func executionTargetsSingleInclude(include *action.Condition) []*action.Executio
}
func TestServer_SetExecution_Request(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -207,7 +206,6 @@ func TestServer_SetExecution_Request(t *testing.T) {
}
func TestServer_SetExecution_Request_Include(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -346,7 +344,6 @@ func TestServer_SetExecution_Request_Include(t *testing.T) {
}
func TestServer_SetExecution_Response(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -528,7 +525,6 @@ func TestServer_SetExecution_Response(t *testing.T) {
}
func TestServer_SetExecution_Event(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -716,7 +712,6 @@ func TestServer_SetExecution_Event(t *testing.T) {
}
func TestServer_SetExecution_Function(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -22,7 +22,6 @@ import (
)
func TestServer_GetTarget(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -237,7 +236,6 @@ func TestServer_GetTarget(t *testing.T) {
}
func TestServer_ListTargets(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -503,7 +501,6 @@ func TestServer_ListTargets(t *testing.T) {
}
func TestServer_SearchExecutions(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -21,7 +21,6 @@ import (
)
func TestServer_CreateTarget(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -19,7 +19,6 @@ import (
)
func TestServer_SetContactEmail(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -365,7 +364,6 @@ func TestServer_SetContactEmail(t *testing.T) {
}
func TestServer_VerifyContactEmail(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -555,7 +553,6 @@ func TestServer_VerifyContactEmail(t *testing.T) {
}
func TestServer_ResendContactEmailCode(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -18,7 +18,6 @@ import (
)
func TestServer_SetContactPhone(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -292,7 +291,6 @@ func TestServer_SetContactPhone(t *testing.T) {
}
func TestServer_VerifyContactPhone(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -484,7 +482,6 @@ func TestServer_VerifyContactPhone(t *testing.T) {
}
func TestServer_ResendContactPhoneCode(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -21,7 +21,6 @@ import (
)
func TestServer_CreateUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -230,7 +229,6 @@ func TestServer_CreateUser(t *testing.T) {
}
func TestServer_PatchUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -650,7 +648,6 @@ func TestServer_PatchUser(t *testing.T) {
}
func TestServer_DeleteUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -868,7 +865,6 @@ func unmarshalJSON(data string) *structpb.Struct {
}
func TestServer_LockUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -1070,7 +1066,6 @@ func TestServer_LockUser(t *testing.T) {
}
func TestServer_UnlockUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -1253,7 +1248,6 @@ func TestServer_UnlockUser(t *testing.T) {
}
func TestServer_DeactivateUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -1455,7 +1449,6 @@ func TestServer_DeactivateUser(t *testing.T) {
}
func TestServer_ActivateUser(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -20,7 +20,6 @@ import (
)
func TestServer_ListUserSchemas(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -214,7 +213,6 @@ func TestServer_ListUserSchemas(t *testing.T) {
}
func TestServer_GetUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -19,7 +19,6 @@ import (
)
func TestServer_CreateUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -304,7 +303,6 @@ func TestServer_CreateUserSchema(t *testing.T) {
}
func TestServer_UpdateUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -596,7 +594,6 @@ func TestServer_UpdateUserSchema(t *testing.T) {
}
func TestServer_DeactivateUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -678,7 +675,6 @@ func TestServer_DeactivateUserSchema(t *testing.T) {
}
func TestServer_ReactivateUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
@ -760,7 +756,6 @@ func TestServer_ReactivateUserSchema(t *testing.T) {
}
func TestServer_DeleteUserSchema(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ensureFeatureEnabled(t, instance)
isolatedIAMOwnerCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -15,8 +15,6 @@ import (
)
func TestServer_ListInstances(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
tests := []struct {

View File

@ -22,8 +22,6 @@ import (
)
func TestServer_Limits_AuditLogRetention(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
iamOwnerCtx := isoInstance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
userID, projectID, appID, projectGrantID := seedObjects(iamOwnerCtx, t, isoInstance.Client)

View File

@ -26,8 +26,6 @@ import (
)
func TestServer_Limits_Block(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
iamOwnerCtx := isoInstance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
tests := []*test{

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_SetEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -148,8 +146,6 @@ func TestServer_SetEmail(t *testing.T) {
}
func TestServer_ResendEmailCode(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId()
@ -254,8 +250,6 @@ func TestServer_ResendEmailCode(t *testing.T) {
}
func TestServer_VerifyEmail(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
tests := []struct {
name string

View File

@ -20,8 +20,6 @@ import (
)
func TestServer_AddIDPLink(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
type args struct {
ctx context.Context
@ -101,8 +99,6 @@ func TestServer_AddIDPLink(t *testing.T) {
}
func TestServer_ListIDPLinks(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks-%s", gofakeit.AppName()), gofakeit.Email())
instanceIdpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
@ -257,8 +253,6 @@ func TestServer_ListIDPLinks(t *testing.T) {
}
func TestServer_RemoveIDPLink(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListIDPLinks-%s", gofakeit.AppName()), gofakeit.Email())
instanceIdpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)

View File

@ -15,8 +15,6 @@ import (
)
func TestServer_AddOTPSMS(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -123,8 +121,6 @@ func TestServer_AddOTPSMS(t *testing.T) {
}
func TestServer_RemoveOTPSMS(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -191,8 +187,6 @@ func TestServer_RemoveOTPSMS(t *testing.T) {
}
func TestServer_AddOTPEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -301,8 +295,6 @@ func TestServer_AddOTPEmail(t *testing.T) {
}
func TestServer_RemoveOTPEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)

View File

@ -19,8 +19,6 @@ import (
)
func TestServer_RegisterPasskey(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
reg, err := Client.CreatePasskeyRegistrationLink(CTX, &user.CreatePasskeyRegistrationLinkRequest{
UserId: userID,
@ -141,8 +139,6 @@ func TestServer_RegisterPasskey(t *testing.T) {
}
func TestServer_VerifyPasskeyRegistration(t *testing.T) {
t.Parallel()
userID, pkr := userWithPasskeyRegistered(t)
attestationResponse, err := Instance.WebAuthN.CreateAttestationResponse(pkr.GetPublicKeyCredentialCreationOptions())
@ -219,8 +215,6 @@ func TestServer_VerifyPasskeyRegistration(t *testing.T) {
}
func TestServer_CreatePasskeyRegistrationLink(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
type args struct {
@ -354,8 +348,6 @@ func passkeyVerify(t *testing.T, userID string, pkr *user.RegisterPasskeyRespons
}
func TestServer_RemovePasskey(t *testing.T) {
t.Parallel()
userIDWithout := Instance.CreateHumanUser(CTX).GetUserId()
userIDRegistered, pkrRegistered := userWithPasskeyRegistered(t)
userIDVerified, passkeyIDVerified := userWithPasskeyVerified(t)
@ -461,8 +453,6 @@ func TestServer_RemovePasskey(t *testing.T) {
}
func TestServer_ListPasskeys(t *testing.T) {
t.Parallel()
userIDWithout := Instance.CreateHumanUser(CTX).GetUserId()
userIDRegistered, _ := userWithPasskeyRegistered(t)
userIDVerified, passkeyIDVerified := userWithPasskeyVerified(t)

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_RequestPasswordReset(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -107,8 +105,6 @@ func TestServer_RequestPasswordReset(t *testing.T) {
}
func TestServer_SetPassword(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.SetPasswordRequest

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_SetPhone(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -124,8 +122,6 @@ func TestServer_SetPhone(t *testing.T) {
}
func TestServer_ResendPhoneCode(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId()
@ -201,8 +197,6 @@ func TestServer_ResendPhoneCode(t *testing.T) {
}
func TestServer_VerifyPhone(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
tests := []struct {
name string
@ -256,8 +250,6 @@ func TestServer_VerifyPhone(t *testing.T) {
}
func TestServer_RemovePhone(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
failResp := Instance.CreateHumanUserNoPhone(CTX)
otherUser := Instance.CreateHumanUser(CTX).GetUserId()

View File

@ -20,8 +20,6 @@ import (
)
func TestServer_GetUserByID(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg-%s", gofakeit.AppName()), gofakeit.Email())
type args struct {
ctx context.Context
@ -188,8 +186,6 @@ func TestServer_GetUserByID(t *testing.T) {
}
func TestServer_GetUserByID_Permission(t *testing.T) {
t.Parallel()
newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
newUserID := newOrg.CreatedAdmins[0].GetUserId()
@ -337,8 +333,6 @@ type userAttr struct {
}
func TestServer_ListUsers(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg-%s", gofakeit.AppName()), gofakeit.Email())
userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email())
type args struct {

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_RegisterTOTP(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -106,8 +104,6 @@ func TestServer_RegisterTOTP(t *testing.T) {
}
func TestServer_VerifyTOTPRegistration(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -211,8 +207,6 @@ func TestServer_VerifyTOTPRegistration(t *testing.T) {
}
func TestServer_RemoveTOTP(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_RegisterU2F(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
otherUser := Instance.CreateHumanUser(CTX).GetUserId()
@ -108,8 +106,6 @@ func TestServer_RegisterU2F(t *testing.T) {
}
func TestServer_VerifyU2FRegistration(t *testing.T) {
t.Parallel()
ctx, userID, pkr := ctxFromNewUserWithRegisteredU2F(t)
attestationResponse, err := Instance.WebAuthN.CreateAttestationResponse(pkr.GetPublicKeyCredentialCreationOptions())
@ -215,8 +211,6 @@ func ctxFromNewUserWithVerifiedU2F(t *testing.T) (context.Context, string, strin
}
func TestServer_RemoveU2F(t *testing.T) {
t.Parallel()
userIDWithout := Instance.CreateHumanUser(CTX).GetUserId()
ctxRegistered, userIDRegistered, pkrRegistered := ctxFromNewUserWithRegisteredU2F(t)
_, userIDVerified, u2fVerified := ctxFromNewUserWithVerifiedU2F(t)

View File

@ -53,8 +53,6 @@ func TestMain(m *testing.M) {
}
func TestServer_AddHumanUser(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
type args struct {
ctx context.Context
@ -681,8 +679,6 @@ func TestServer_AddHumanUser(t *testing.T) {
}
func TestServer_AddHumanUser_Permission(t *testing.T) {
t.Parallel()
newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
type args struct {
@ -876,8 +872,6 @@ func TestServer_AddHumanUser_Permission(t *testing.T) {
}
func TestServer_UpdateHumanUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.UpdateHumanUserRequest
@ -1239,8 +1233,6 @@ func TestServer_UpdateHumanUser(t *testing.T) {
}
func TestServer_UpdateHumanUser_Permission(t *testing.T) {
t.Parallel()
newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
newUserID := newOrg.CreatedAdmins[0].GetUserId()
@ -1324,8 +1316,6 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) {
}
func TestServer_LockUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.LockUserRequest
@ -1434,8 +1424,6 @@ func TestServer_LockUser(t *testing.T) {
}
func TestServer_UnLockUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.UnlockUserRequest
@ -1544,8 +1532,6 @@ func TestServer_UnLockUser(t *testing.T) {
}
func TestServer_DeactivateUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.DeactivateUserRequest
@ -1655,8 +1641,6 @@ func TestServer_DeactivateUser(t *testing.T) {
}
func TestServer_ReactivateUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.ReactivateUserRequest
@ -1765,8 +1749,6 @@ func TestServer_ReactivateUser(t *testing.T) {
}
func TestServer_DeleteUser(t *testing.T) {
t.Parallel()
projectResp, err := Instance.CreateProject(CTX)
require.NoError(t, err)
type args struct {
@ -1866,8 +1848,6 @@ func TestServer_DeleteUser(t *testing.T) {
}
func TestServer_StartIdentityProviderIntent(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
orgIdpResp := Instance.AddOrgGenericOAuthProvider(CTX, Instance.DefaultOrg.Id)
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg-%s", gofakeit.AppName()), gofakeit.Email())
@ -2131,9 +2111,7 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) {
/*
func TestServer_RetrieveIdentityProviderIntent(t *testing.T) {
t.Parallel()
idpID := Instance.AddGenericOAuthProvider(t, CTX)
idpID := Instance.AddGenericOAuthProvider(t, CTX)
intentID := Instance.CreateIntent(t, CTX, idpID)
successfulID, token, changeDate, sequence := Instance.CreateSuccessfulOAuthIntent(t, CTX, idpID, "", "id")
successfulWithUserID, withUsertoken, withUserchangeDate, withUsersequence := Instance.CreateSuccessfulOAuthIntent(t, CTX, idpID, "user", "id")
@ -2421,8 +2399,6 @@ func ctxFromNewUserWithVerifiedPasswordlessLegacy(t *testing.T) (context.Context
}
func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
t.Parallel()
userIDWithoutAuth := Instance.CreateHumanUser(CTX).GetUserId()
userIDWithPasskey := Instance.CreateHumanUser(CTX).GetUserId()
@ -2654,8 +2630,6 @@ func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
}
func TestServer_CreateInviteCode(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.CreateInviteCodeRequest
@ -2787,8 +2761,6 @@ func TestServer_CreateInviteCode(t *testing.T) {
}
func TestServer_ResendInviteCode(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.ResendInviteCodeRequest
@ -2878,8 +2850,6 @@ func TestServer_ResendInviteCode(t *testing.T) {
}
func TestServer_VerifyInviteCode(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.VerifyInviteCodeRequest

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_SetEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -148,8 +146,6 @@ func TestServer_SetEmail(t *testing.T) {
}
func TestServer_ResendEmailCode(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId()
@ -254,8 +250,6 @@ func TestServer_ResendEmailCode(t *testing.T) {
}
func TestServer_VerifyEmail(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
tests := []struct {
name string

View File

@ -15,8 +15,6 @@ import (
)
func TestServer_AddOTPSMS(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -123,8 +121,6 @@ func TestServer_AddOTPSMS(t *testing.T) {
}
func TestServer_RemoveOTPSMS(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -191,8 +187,6 @@ func TestServer_RemoveOTPSMS(t *testing.T) {
}
func TestServer_AddOTPEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -301,8 +295,6 @@ func TestServer_AddOTPEmail(t *testing.T) {
}
func TestServer_RemoveOTPEmail(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_RegisterPasskey(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
reg, err := Client.CreatePasskeyRegistrationLink(CTX, &user.CreatePasskeyRegistrationLinkRequest{
UserId: userID,
@ -140,8 +138,6 @@ func TestServer_RegisterPasskey(t *testing.T) {
}
func TestServer_VerifyPasskeyRegistration(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
reg, err := Client.CreatePasskeyRegistrationLink(CTX, &user.CreatePasskeyRegistrationLinkRequest{
UserId: userID,
@ -230,8 +226,6 @@ func TestServer_VerifyPasskeyRegistration(t *testing.T) {
}
func TestServer_CreatePasskeyRegistrationLink(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
type args struct {

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_RequestPasswordReset(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -109,8 +107,6 @@ func TestServer_RequestPasswordReset(t *testing.T) {
}
func TestServer_SetPassword(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.SetPasswordRequest

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_SetPhone(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
tests := []struct {
@ -126,8 +124,6 @@ func TestServer_SetPhone(t *testing.T) {
}
func TestServer_ResendPhoneCode(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
verifiedUserID := Instance.CreateHumanUserVerified(CTX, Instance.DefaultOrg.Id, gofakeit.Email()).GetUserId()
@ -204,8 +200,6 @@ func TestServer_ResendPhoneCode(t *testing.T) {
}
func TestServer_VerifyPhone(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
tests := []struct {
name string
@ -258,8 +252,6 @@ func TestServer_VerifyPhone(t *testing.T) {
}
func TestServer_RemovePhone(t *testing.T) {
t.Parallel()
userResp := Instance.CreateHumanUser(CTX)
failResp := Instance.CreateHumanUserNoPhone(CTX)
otherUser := Instance.CreateHumanUser(CTX).GetUserId()

View File

@ -29,8 +29,6 @@ func detailsV2ToV2beta(obj *object.Details) *object_v2beta.Details {
}
func TestServer_GetUserByID(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetUserByIDOrg-%s", gofakeit.AppName()), gofakeit.Email())
type args struct {
ctx context.Context
@ -197,8 +195,6 @@ func TestServer_GetUserByID(t *testing.T) {
}
func TestServer_GetUserByID_Permission(t *testing.T) {
t.Parallel()
timeNow := time.Now().UTC()
newOrgOwnerEmail := gofakeit.Email()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("GetHuman-%s", gofakeit.AppName()), newOrgOwnerEmail)
@ -347,8 +343,6 @@ type userAttr struct {
}
func TestServer_ListUsers(t *testing.T) {
t.Parallel()
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("ListUsersOrg-%s", gofakeit.AppName()), gofakeit.Email())
userResp := Instance.CreateHumanUserVerified(IamCTX, orgResp.OrganizationId, gofakeit.Email())
type args struct {

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_RegisterTOTP(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -106,8 +104,6 @@ func TestServer_RegisterTOTP(t *testing.T) {
}
func TestServer_VerifyTOTPRegistration(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)
@ -216,8 +212,6 @@ func TestServer_VerifyTOTPRegistration(t *testing.T) {
}
func TestServer_RemoveTOTP(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)

View File

@ -17,8 +17,6 @@ import (
)
func TestServer_RegisterU2F(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
otherUser := Instance.CreateHumanUser(CTX).GetUserId()
@ -108,8 +106,6 @@ func TestServer_RegisterU2F(t *testing.T) {
}
func TestServer_VerifyU2FRegistration(t *testing.T) {
t.Parallel()
userID := Instance.CreateHumanUser(CTX).GetUserId()
Instance.RegisterUserPasskey(CTX, userID)
_, sessionToken, _, _ := Instance.CreateVerifiedWebAuthNSession(t, CTX, userID)

View File

@ -51,8 +51,6 @@ func TestMain(m *testing.M) {
}
func TestServer_AddHumanUser(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
type args struct {
ctx context.Context
@ -638,8 +636,6 @@ func TestServer_AddHumanUser(t *testing.T) {
}
func TestServer_AddHumanUser_Permission(t *testing.T) {
t.Parallel()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("AddHuman-%s", gofakeit.AppName()), gofakeit.Email())
type args struct {
ctx context.Context
@ -832,8 +828,6 @@ func TestServer_AddHumanUser_Permission(t *testing.T) {
}
func TestServer_UpdateHumanUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.UpdateHumanUserRequest
@ -1195,8 +1189,6 @@ func TestServer_UpdateHumanUser(t *testing.T) {
}
func TestServer_UpdateHumanUser_Permission(t *testing.T) {
t.Parallel()
newOrg := Instance.CreateOrganization(IamCTX, fmt.Sprintf("UpdateHuman-%s", gofakeit.AppName()), gofakeit.Email())
newUserID := newOrg.CreatedAdmins[0].GetUserId()
type args struct {
@ -1279,8 +1271,6 @@ func TestServer_UpdateHumanUser_Permission(t *testing.T) {
}
func TestServer_LockUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.LockUserRequest
@ -1389,8 +1379,6 @@ func TestServer_LockUser(t *testing.T) {
}
func TestServer_UnLockUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.UnlockUserRequest
@ -1499,8 +1487,6 @@ func TestServer_UnLockUser(t *testing.T) {
}
func TestServer_DeactivateUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.DeactivateUserRequest
@ -1609,8 +1595,6 @@ func TestServer_DeactivateUser(t *testing.T) {
}
func TestServer_ReactivateUser(t *testing.T) {
t.Parallel()
type args struct {
ctx context.Context
req *user.ReactivateUserRequest
@ -1719,8 +1703,6 @@ func TestServer_ReactivateUser(t *testing.T) {
}
func TestServer_DeleteUser(t *testing.T) {
t.Parallel()
projectResp, err := Instance.CreateProject(CTX)
require.NoError(t, err)
type args struct {
@ -1820,8 +1802,6 @@ func TestServer_DeleteUser(t *testing.T) {
}
func TestServer_AddIDPLink(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
type args struct {
ctx context.Context
@ -1901,8 +1881,6 @@ func TestServer_AddIDPLink(t *testing.T) {
}
func TestServer_StartIdentityProviderIntent(t *testing.T) {
t.Parallel()
idpResp := Instance.AddGenericOAuthProvider(IamCTX, Instance.DefaultOrg.Id)
orgIdpID := Instance.AddOrgGenericOAuthProvider(CTX, Instance.DefaultOrg.Id)
orgResp := Instance.CreateOrganization(IamCTX, fmt.Sprintf("NotDefaultOrg-%s", gofakeit.AppName()), gofakeit.Email())
@ -2166,9 +2144,7 @@ func TestServer_StartIdentityProviderIntent(t *testing.T) {
/*
func TestServer_RetrieveIdentityProviderIntent(t *testing.T) {
t.Parallel()
idpID := Instance.AddGenericOAuthProvider(t, CTX)
idpID := Instance.AddGenericOAuthProvider(t, CTX)
intentID := Instance.CreateIntent(t, CTX, idpID)
successfulID, token, changeDate, sequence := Instance.CreateSuccessfulOAuthIntent(t, CTX, idpID.Id, "", "id")
successfulWithUserID, withUsertoken, withUserchangeDate, withUsersequence := Instance.CreateSuccessfulOAuthIntent(t, CTX, idpID.Id, "user", "id")
@ -2428,8 +2404,6 @@ func TestServer_RetrieveIdentityProviderIntent(t *testing.T) {
*/
func TestServer_ListAuthenticationMethodTypes(t *testing.T) {
t.Parallel()
userIDWithoutAuth := Instance.CreateHumanUser(CTX).GetUserId()
userIDWithPasskey := Instance.CreateHumanUser(CTX).GetUserId()

View File

@ -28,8 +28,6 @@ var (
)
func TestOPStorage_CreateAuthRequest(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
id := createAuthRequest(t, Instance, clientID, redirectURI)
@ -37,8 +35,6 @@ func TestOPStorage_CreateAuthRequest(t *testing.T) {
}
func TestOPStorage_CreateAccessToken_code(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
authRequestID := createAuthRequest(t, Instance, clientID, redirectURI)
sessionID, sessionToken, startTime, changeTime := Instance.CreateVerifiedWebAuthNSession(t, CTXLOGIN, User.GetUserId())
@ -78,8 +74,6 @@ func TestOPStorage_CreateAccessToken_code(t *testing.T) {
}
func TestOPStorage_CreateAccessToken_implicit(t *testing.T) {
t.Parallel()
clientID := createImplicitClient(t)
authRequestID := createAuthRequestImplicit(t, clientID, redirectURIImplicit)
sessionID, sessionToken, startTime, changeTime := Instance.CreateVerifiedWebAuthNSession(t, CTXLOGIN, User.GetUserId())
@ -130,8 +124,6 @@ func TestOPStorage_CreateAccessToken_implicit(t *testing.T) {
}
func TestOPStorage_CreateAccessAndRefreshTokens_code(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
authRequestID := createAuthRequest(t, Instance, clientID, redirectURI, oidc.ScopeOpenID, oidc.ScopeOfflineAccess)
sessionID, sessionToken, startTime, changeTime := Instance.CreateVerifiedWebAuthNSession(t, CTXLOGIN, User.GetUserId())
@ -155,8 +147,6 @@ func TestOPStorage_CreateAccessAndRefreshTokens_code(t *testing.T) {
}
func TestOPStorage_CreateAccessAndRefreshTokens_refresh(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -193,8 +183,6 @@ func TestOPStorage_CreateAccessAndRefreshTokens_refresh(t *testing.T) {
}
func TestOPStorage_RevokeToken_access_token(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -238,8 +226,6 @@ func TestOPStorage_RevokeToken_access_token(t *testing.T) {
}
func TestOPStorage_RevokeToken_access_token_invalid_token_hint_type(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -277,8 +263,6 @@ func TestOPStorage_RevokeToken_access_token_invalid_token_hint_type(t *testing.T
}
func TestOPStorage_RevokeToken_refresh_token(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -322,8 +306,6 @@ func TestOPStorage_RevokeToken_refresh_token(t *testing.T) {
}
func TestOPStorage_RevokeToken_refresh_token_invalid_token_type_hint(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -361,8 +343,6 @@ func TestOPStorage_RevokeToken_refresh_token_invalid_token_type_hint(t *testing.
}
func TestOPStorage_RevokeToken_invalid_client(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
authRequestID := createAuthRequest(t, Instance, clientID, redirectURI, oidc.ScopeOpenID, oidc.ScopeOfflineAccess)
sessionID, sessionToken, startTime, changeTime := Instance.CreateVerifiedWebAuthNSession(t, CTXLOGIN, User.GetUserId())
@ -393,8 +373,6 @@ func TestOPStorage_RevokeToken_invalid_client(t *testing.T) {
}
func TestOPStorage_TerminateSession(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -432,8 +410,6 @@ func TestOPStorage_TerminateSession(t *testing.T) {
}
func TestOPStorage_TerminateSession_refresh_grant(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)
@ -478,8 +454,6 @@ func TestOPStorage_TerminateSession_refresh_grant(t *testing.T) {
}
func TestOPStorage_TerminateSession_empty_id_token_hint(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)

View File

@ -24,8 +24,6 @@ import (
)
func TestServer_Introspect(t *testing.T) {
t.Parallel()
project, err := Instance.CreateProject(CTX)
require.NoError(t, err)
app, err := Instance.CreateOIDCNativeClient(CTX, redirectURI, logoutRedirectURI, project.GetId(), false)
@ -143,8 +141,6 @@ func TestServer_Introspect(t *testing.T) {
}
func TestServer_Introspect_invalid_auth_invalid_token(t *testing.T) {
t.Parallel()
// ensure that when an invalid authentication and token is sent, the authentication error is returned
// https://github.com/zitadel/zitadel/pull/8133
resourceServer, err := Instance.CreateResourceServerClientCredentials(CTX, "xxxxx", "xxxxx")
@ -191,8 +187,6 @@ func assertIntrospection(
// TestServer_VerifyClient tests verification by running code flow tests
// with clients that have different authentication methods.
func TestServer_VerifyClient(t *testing.T) {
t.Parallel()
sessionID, sessionToken, startTime, changeTime := Instance.CreateVerifiedWebAuthNSession(t, CTXLOGIN, User.GetUserId())
project, err := Instance.CreateProject(CTX)
require.NoError(t, err)

View File

@ -24,8 +24,6 @@ import (
)
func TestServer_Keys(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ctxLogin := instance.WithAuthorization(CTX, integration.UserTypeLogin)

View File

@ -18,8 +18,6 @@ import (
)
func TestServer_RefreshToken_Status(t *testing.T) {
t.Parallel()
clientID, _ := createClient(t, Instance)
provider, err := Instance.CreateRelyingParty(CTX, clientID, redirectURI)
require.NoError(t, err)

View File

@ -22,8 +22,6 @@ import (
)
func TestServer_ClientCredentialsExchange(t *testing.T) {
t.Parallel()
machine, name, clientID, clientSecret, err := Instance.CreateOIDCCredentialsClient(CTX)
require.NoError(t, err)

View File

@ -143,8 +143,6 @@ func refreshTokenVerifier(ctx context.Context, provider rp.RelyingParty, subject
}
func TestServer_TokenExchange(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
userResp := instance.CreateHumanUser(ctx)
@ -365,8 +363,6 @@ func TestServer_TokenExchange(t *testing.T) {
}
func TestServer_TokenExchangeImpersonation(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
userResp := instance.CreateHumanUser(ctx)
@ -581,8 +577,6 @@ func TestServer_TokenExchangeImpersonation(t *testing.T) {
// This test tries to call the zitadel API with an impersonated token,
// which should fail.
func TestImpersonation_API_Call(t *testing.T) {
t.Parallel()
instance := integration.NewInstance(CTX)
ctx := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)

View File

@ -21,8 +21,6 @@ import (
)
func TestServer_JWTProfile(t *testing.T) {
t.Parallel()
user, name, keyData, err := Instance.CreateOIDCJWTProfileClient(CTX)
require.NoError(t, err)

View File

@ -1,8 +1,8 @@
package login
import (
"fmt"
"net/http"
"net/url"
http_mw "github.com/zitadel/zitadel/internal/api/http/middleware"
"github.com/zitadel/zitadel/internal/domain"
@ -38,13 +38,13 @@ type initPasswordData struct {
HasSymbol string
}
func InitPasswordLink(origin, userID, code, orgID, authRequestID string) string {
v := url.Values{}
v.Set(queryInitPWUserID, userID)
v.Set(queryInitPWCode, code)
v.Set(queryOrgID, orgID)
v.Set(QueryAuthRequestID, authRequestID)
return externalLink(origin) + EndpointInitPassword + "?" + v.Encode()
func InitPasswordLinkTemplate(origin, userID, orgID, authRequestID string) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s",
externalLink(origin), EndpointInitPassword,
queryInitPWUserID, userID,
queryInitPWCode, "{{.Code}}",
queryOrgID, orgID,
QueryAuthRequestID, authRequestID)
}
func (l *Login) handleInitPassword(w http.ResponseWriter, r *http.Request) {

View File

@ -1,8 +1,8 @@
package login
import (
"fmt"
"net/http"
"net/url"
"strconv"
http_mw "github.com/zitadel/zitadel/internal/api/http/middleware"
@ -44,15 +44,15 @@ type initUserData struct {
HasSymbol string
}
func InitUserLink(origin, userID, loginName, code, orgID string, passwordSet bool, authRequestID string) string {
v := url.Values{}
v.Set(queryInitUserUserID, userID)
v.Set(queryInitUserLoginName, loginName)
v.Set(queryInitUserCode, code)
v.Set(queryOrgID, orgID)
v.Set(queryInitUserPassword, strconv.FormatBool(passwordSet))
v.Set(QueryAuthRequestID, authRequestID)
return externalLink(origin) + EndpointInitUser + "?" + v.Encode()
func InitUserLinkTemplate(origin, userID, orgID, authRequestID string) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s&%s=%s&%s=%s",
externalLink(origin), EndpointInitUser,
queryInitUserUserID, userID,
queryInitUserLoginName, "{{.LoginName}}",
queryInitUserCode, "{{.Code}}",
queryOrgID, orgID,
queryInitUserPassword, "{{.PasswordSet}}",
QueryAuthRequestID, authRequestID)
}
func (l *Login) handleInitUser(w http.ResponseWriter, r *http.Request) {

View File

@ -1,8 +1,8 @@
package login
import (
"fmt"
"net/http"
"net/url"
http_mw "github.com/zitadel/zitadel/internal/api/http/middleware"
"github.com/zitadel/zitadel/internal/domain"
@ -40,14 +40,14 @@ type inviteUserData struct {
HasSymbol string
}
func InviteUserLink(origin, userID, loginName, code, orgID string, authRequestID string) string {
v := url.Values{}
v.Set(queryInviteUserUserID, userID)
v.Set(queryInviteUserLoginName, loginName)
v.Set(queryInviteUserCode, code)
v.Set(queryOrgID, orgID)
v.Set(QueryAuthRequestID, authRequestID)
return externalLink(origin) + EndpointInviteUser + "?" + v.Encode()
func InviteUserLinkTemplate(origin, userID, orgID string, authRequestID string) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s&%s=%s",
externalLink(origin), EndpointInviteUser,
queryInviteUserUserID, userID,
queryInviteUserLoginName, "{{.LoginName}}",
queryInviteUserCode, "{{.Code}}",
queryOrgID, orgID,
QueryAuthRequestID, authRequestID)
}
func (l *Login) handleInviteUser(w http.ResponseWriter, r *http.Request) {

View File

@ -2,8 +2,8 @@ package login
import (
"context"
"fmt"
"net/http"
"net/url"
"slices"
"github.com/zitadel/logging"
@ -43,13 +43,13 @@ type mailVerificationData struct {
HasSymbol string
}
func MailVerificationLink(origin, userID, code, orgID, authRequestID string) string {
v := url.Values{}
v.Set(queryUserID, userID)
v.Set(queryCode, code)
v.Set(queryOrgID, orgID)
v.Set(QueryAuthRequestID, authRequestID)
return externalLink(origin) + EndpointMailVerification + "?" + v.Encode()
func MailVerificationLinkTemplate(origin, userID, orgID, authRequestID string) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%s&%s=%s",
externalLink(origin), EndpointMailVerification,
queryUserID, userID,
queryCode, "{{.Code}}",
queryOrgID, orgID,
QueryAuthRequestID, authRequestID)
}
func (l *Login) handleMailVerification(w http.ResponseWriter, r *http.Request) {

View File

@ -27,8 +27,8 @@ type mfaOTPFormData struct {
Provider domain.MFAType `schema:"provider"`
}
func OTPLink(origin, authRequestID, code string, provider domain.MFAType) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%d", externalLink(origin), EndpointMFAOTPVerify, QueryAuthRequestID, authRequestID, queryCode, code, querySelectedProvider, provider)
func OTPLinkTemplate(origin, authRequestID string, provider domain.MFAType) string {
return fmt.Sprintf("%s%s?%s=%s&%s=%s&%s=%d", externalLink(origin), EndpointMFAOTPVerify, QueryAuthRequestID, authRequestID, queryCode, "{{.Code}}", querySelectedProvider, provider)
}
// renderOTPVerification renders the OTP verification for SMS and Email based on the passed MFAType.

View File

@ -16,6 +16,7 @@ const (
PurposeUnspecified Purpose = iota
PurposeAuthzInstance
PurposeMilestones
PurposeOrganization
)
// Cache stores objects with a value of type `V`.

View File

@ -19,8 +19,9 @@ type CachesConfig struct {
Postgres pg.Config
Redis redis.Config
}
Instance *cache.Config
Milestones *cache.Config
Instance *cache.Config
Milestones *cache.Config
Organization *cache.Config
}
type Connectors struct {

View File

@ -0,0 +1,90 @@
package redis
import (
"context"
"errors"
"time"
"github.com/redis/go-redis/v9"
"github.com/sony/gobreaker/v2"
"github.com/zitadel/logging"
)
const defaultInflightSize = 100000
type CBConfig struct {
// Interval when the counters are reset to 0.
// 0 interval never resets the counters until the CB is opened.
Interval time.Duration
// Amount of consecutive failures permitted
MaxConsecutiveFailures uint32
// The ratio of failed requests out of total requests
MaxFailureRatio float64
// Timeout after opening of the CB, until the state is set to half-open.
Timeout time.Duration
// The allowed amount of requests that are allowed to pass when the CB is half-open.
MaxRetryRequests uint32
}
func (config *CBConfig) readyToTrip(counts gobreaker.Counts) bool {
if config.MaxConsecutiveFailures > 0 && counts.ConsecutiveFailures > config.MaxConsecutiveFailures {
return true
}
if config.MaxFailureRatio > 0 && counts.Requests > 0 {
failureRatio := float64(counts.TotalFailures) / float64(counts.Requests)
return failureRatio > config.MaxFailureRatio
}
return false
}
// limiter implements [redis.Limiter] as a circuit breaker.
type limiter struct {
inflight chan func(success bool)
cb *gobreaker.TwoStepCircuitBreaker[struct{}]
}
func newLimiter(config *CBConfig, maxActiveConns int) redis.Limiter {
if config == nil {
return nil
}
// The size of the inflight channel needs to be big enough for maxActiveConns to prevent blocking.
// When that is 0 (no limit), we must set a sane default.
if maxActiveConns <= 0 {
maxActiveConns = defaultInflightSize
}
return &limiter{
inflight: make(chan func(success bool), maxActiveConns),
cb: gobreaker.NewTwoStepCircuitBreaker[struct{}](gobreaker.Settings{
Name: "redis cache",
MaxRequests: config.MaxRetryRequests,
Interval: config.Interval,
Timeout: config.Timeout,
ReadyToTrip: config.readyToTrip,
OnStateChange: func(name string, from, to gobreaker.State) {
logging.WithFields("name", name, "from", from, "to", to).Warn("circuit breaker state change")
},
}),
}
}
// Allow implements [redis.Limiter].
func (l *limiter) Allow() error {
done, err := l.cb.Allow()
if err != nil {
return err
}
l.inflight <- done
return nil
}
// ReportResult implements [redis.Limiter].
//
// ReportResult checks the error returned by the Redis client.
// `nil`, [redis.Nil] and [context.Canceled] are not considered failures.
// Any other error, like connection or [context.DeadlineExceeded] is counted as a failure.
func (l *limiter) ReportResult(err error) {
done := <-l.inflight
done(err == nil ||
errors.Is(err, redis.Nil) ||
errors.Is(err, context.Canceled))
}

View File

@ -0,0 +1,168 @@
package redis
import (
"context"
"testing"
"time"
"github.com/sony/gobreaker/v2"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/cache"
)
func TestCBConfig_readyToTrip(t *testing.T) {
type fields struct {
MaxConsecutiveFailures uint32
MaxFailureRatio float64
}
type args struct {
counts gobreaker.Counts
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "disabled",
fields: fields{},
args: args{
counts: gobreaker.Counts{
Requests: 100,
ConsecutiveFailures: 5,
TotalFailures: 10,
},
},
want: false,
},
{
name: "no failures",
fields: fields{
MaxConsecutiveFailures: 5,
MaxFailureRatio: 0.1,
},
args: args{
counts: gobreaker.Counts{
Requests: 100,
ConsecutiveFailures: 0,
TotalFailures: 0,
},
},
want: false,
},
{
name: "some failures",
fields: fields{
MaxConsecutiveFailures: 5,
MaxFailureRatio: 0.1,
},
args: args{
counts: gobreaker.Counts{
Requests: 100,
ConsecutiveFailures: 5,
TotalFailures: 10,
},
},
want: false,
},
{
name: "consecutive exceeded",
fields: fields{
MaxConsecutiveFailures: 5,
MaxFailureRatio: 0.1,
},
args: args{
counts: gobreaker.Counts{
Requests: 100,
ConsecutiveFailures: 6,
TotalFailures: 0,
},
},
want: true,
},
{
name: "ratio exceeded",
fields: fields{
MaxConsecutiveFailures: 5,
MaxFailureRatio: 0.1,
},
args: args{
counts: gobreaker.Counts{
Requests: 100,
ConsecutiveFailures: 1,
TotalFailures: 11,
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := &CBConfig{
MaxConsecutiveFailures: tt.fields.MaxConsecutiveFailures,
MaxFailureRatio: tt.fields.MaxFailureRatio,
}
if got := config.readyToTrip(tt.args.counts); got != tt.want {
t.Errorf("CBConfig.readyToTrip() = %v, want %v", got, tt.want)
}
})
}
}
func Test_redisCache_limiter(t *testing.T) {
c, _ := prepareCache(t, cache.Config{}, withCircuitBreakerOption(
&CBConfig{
MaxConsecutiveFailures: 2,
MaxFailureRatio: 0.4,
Timeout: 100 * time.Millisecond,
MaxRetryRequests: 1,
},
))
ctx := context.Background()
canceledCtx, cancel := context.WithCancel(ctx)
cancel()
timedOutCtx, cancel := context.WithTimeout(ctx, -1)
defer cancel()
// CB is and should remain closed
for i := 0; i < 10; i++ {
err := c.Truncate(ctx)
require.NoError(t, err)
}
for i := 0; i < 10; i++ {
err := c.Truncate(canceledCtx)
require.ErrorIs(t, err, context.Canceled)
}
// Timeout err should open the CB after more than 2 failures
for i := 0; i < 3; i++ {
err := c.Truncate(timedOutCtx)
if i > 2 {
require.ErrorIs(t, err, gobreaker.ErrOpenState)
} else {
require.ErrorIs(t, err, context.DeadlineExceeded)
}
}
time.Sleep(200 * time.Millisecond)
// CB should be half-open. If the first command fails, the CB will be Open again
err := c.Truncate(timedOutCtx)
require.ErrorIs(t, err, context.DeadlineExceeded)
err = c.Truncate(timedOutCtx)
require.ErrorIs(t, err, gobreaker.ErrOpenState)
// Reset the DB to closed
time.Sleep(200 * time.Millisecond)
err = c.Truncate(ctx)
require.NoError(t, err)
// Exceed the ratio
err = c.Truncate(timedOutCtx)
require.ErrorIs(t, err, context.DeadlineExceeded)
err = c.Truncate(ctx)
require.ErrorIs(t, err, gobreaker.ErrOpenState)
}

View File

@ -105,6 +105,8 @@ type Config struct {
// Add suffix to client name. Default is empty.
IdentitySuffix string
CircuitBreaker *CBConfig
}
type Connector struct {
@ -146,6 +148,7 @@ func optionsFromConfig(c Config) *redis.Options {
ConnMaxLifetime: c.ConnMaxLifetime,
DisableIndentity: c.DisableIndentity,
IdentitySuffix: c.IdentitySuffix,
Limiter: newLimiter(c.CircuitBreaker, c.MaxActiveConns),
}
if c.EnableTLS {
opts.TLSConfig = new(tls.Config)

View File

@ -6,7 +6,6 @@ import (
"time"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/logging"
@ -689,26 +688,34 @@ func Test_redisCache_Truncate(t *testing.T) {
}
}
func prepareCache(t *testing.T, conf cache.Config) (cache.Cache[testIndex, string, *testObject], *miniredis.Miniredis) {
func prepareCache(t *testing.T, conf cache.Config, options ...func(*Config)) (cache.Cache[testIndex, string, *testObject], *miniredis.Miniredis) {
conf.Log = &logging.Config{
Level: "debug",
AddSource: true,
}
server := miniredis.RunT(t)
server.Select(testDB)
client := redis.NewClient(&redis.Options{
Network: "tcp",
Addr: server.Addr(),
})
connConfig := Config{
Enabled: true,
Network: "tcp",
Addr: server.Addr(),
DisableIndentity: true,
}
for _, option := range options {
option(&connConfig)
}
connector := NewConnector(connConfig)
t.Cleanup(func() {
client.Close()
connector.Close()
server.Close()
})
connector := NewConnector(Config{
Enabled: true,
Network: "tcp",
Addr: server.Addr(),
})
c := NewCache[testIndex, string, *testObject](conf, connector, testDB, testIndices)
return c, server
}
func withCircuitBreakerOption(cb *CBConfig) func(*Config) {
return func(c *Config) {
c.CircuitBreaker = cb
}
}

View File

@ -7,11 +7,11 @@ import (
"strings"
)
const _PurposeName = "unspecifiedauthz_instancemilestones"
const _PurposeName = "unspecifiedauthz_instancemilestonesorganization"
var _PurposeIndex = [...]uint8{0, 11, 25, 35}
var _PurposeIndex = [...]uint8{0, 11, 25, 35, 47}
const _PurposeLowerName = "unspecifiedauthz_instancemilestones"
const _PurposeLowerName = "unspecifiedauthz_instancemilestonesorganization"
func (i Purpose) String() string {
if i < 0 || i >= Purpose(len(_PurposeIndex)-1) {
@ -27,9 +27,10 @@ func _PurposeNoOp() {
_ = x[PurposeUnspecified-(0)]
_ = x[PurposeAuthzInstance-(1)]
_ = x[PurposeMilestones-(2)]
_ = x[PurposeOrganization-(3)]
}
var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones}
var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones, PurposeOrganization}
var _PurposeNameToValueMap = map[string]Purpose{
_PurposeName[0:11]: PurposeUnspecified,
@ -38,12 +39,15 @@ var _PurposeNameToValueMap = map[string]Purpose{
_PurposeLowerName[11:25]: PurposeAuthzInstance,
_PurposeName[25:35]: PurposeMilestones,
_PurposeLowerName[25:35]: PurposeMilestones,
_PurposeName[35:47]: PurposeOrganization,
_PurposeLowerName[35:47]: PurposeOrganization,
}
var _PurposeNames = []string{
_PurposeName[0:11],
_PurposeName[11:25],
_PurposeName[25:35],
_PurposeName[35:47],
}
// PurposeString retrieves an enum value from the enum constants string name.

View File

@ -199,7 +199,7 @@ func TestCommandSide_ChangeDebugNotificationProviderLog(t *testing.T) {
},
},
{
name: "change, ok",
name: "change, ok 1",
fields: fields{
eventstore: eventstoreExpect(
t,
@ -232,7 +232,7 @@ func TestCommandSide_ChangeDebugNotificationProviderLog(t *testing.T) {
},
},
{
name: "change, ok",
name: "change, ok 2",
fields: fields{
eventstore: eventstoreExpect(
t,

View File

@ -0,0 +1,162 @@
package command
import (
"context"
"database/sql"
"time"
"github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
"github.com/zitadel/zitadel/internal/query"
"github.com/zitadel/zitadel/internal/repository/notification"
)
type NotificationRequest struct {
UserID string
UserResourceOwner string
TriggerOrigin string
URLTemplate string
Code *crypto.CryptoValue
CodeExpiry time.Duration
EventType eventstore.EventType
NotificationType domain.NotificationType
MessageType string
UnverifiedNotificationChannel bool
Args *domain.NotificationArguments
AggregateID string
AggregateResourceOwner string
IsOTP bool
RequiresPreviousDomain bool
}
type NotificationRetryRequest struct {
NotificationRequest
BackOff time.Duration
NotifyUser *query.NotifyUser
}
func NewNotificationRequest(
userID, resourceOwner, triggerOrigin string,
eventType eventstore.EventType,
notificationType domain.NotificationType,
messageType string,
) *NotificationRequest {
return &NotificationRequest{
UserID: userID,
UserResourceOwner: resourceOwner,
TriggerOrigin: triggerOrigin,
EventType: eventType,
NotificationType: notificationType,
MessageType: messageType,
}
}
func (r *NotificationRequest) WithCode(code *crypto.CryptoValue, expiry time.Duration) *NotificationRequest {
r.Code = code
r.CodeExpiry = expiry
return r
}
func (r *NotificationRequest) WithURLTemplate(urlTemplate string) *NotificationRequest {
r.URLTemplate = urlTemplate
return r
}
func (r *NotificationRequest) WithUnverifiedChannel() *NotificationRequest {
r.UnverifiedNotificationChannel = true
return r
}
func (r *NotificationRequest) WithArgs(args *domain.NotificationArguments) *NotificationRequest {
r.Args = args
return r
}
func (r *NotificationRequest) WithAggregate(id, resourceOwner string) *NotificationRequest {
r.AggregateID = id
r.AggregateResourceOwner = resourceOwner
return r
}
func (r *NotificationRequest) WithOTP() *NotificationRequest {
r.IsOTP = true
return r
}
func (r *NotificationRequest) WithPreviousDomain() *NotificationRequest {
r.RequiresPreviousDomain = true
return r
}
// RequestNotification writes a new notification.RequestEvent with the notification.Aggregate to the eventstore
func (c *Commands) RequestNotification(
ctx context.Context,
resourceOwner string,
request *NotificationRequest,
) error {
id, err := c.idGenerator.Next()
if err != nil {
return err
}
_, err = c.eventstore.Push(ctx, notification.NewRequestedEvent(ctx, &notification.NewAggregate(id, resourceOwner).Aggregate,
request.UserID,
request.UserResourceOwner,
request.AggregateID,
request.AggregateResourceOwner,
request.TriggerOrigin,
request.URLTemplate,
request.Code,
request.CodeExpiry,
request.EventType,
request.NotificationType,
request.MessageType,
request.UnverifiedNotificationChannel,
request.IsOTP,
request.RequiresPreviousDomain,
request.Args))
return err
}
// NotificationCanceled writes a new notification.CanceledEvent with the notification.Aggregate to the eventstore
func (c *Commands) NotificationCanceled(ctx context.Context, tx *sql.Tx, id, resourceOwner string, requestError error) error {
var errorMessage string
if requestError != nil {
errorMessage = requestError.Error()
}
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewCanceledEvent(ctx, &notification.NewAggregate(id, resourceOwner).Aggregate, errorMessage))
return err
}
// NotificationSent writes a new notification.SentEvent with the notification.Aggregate to the eventstore
func (c *Commands) NotificationSent(ctx context.Context, tx *sql.Tx, id, resourceOwner string) error {
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewSentEvent(ctx, &notification.NewAggregate(id, resourceOwner).Aggregate))
return err
}
// NotificationRetryRequested writes a new notification.RetryRequestEvent with the notification.Aggregate to the eventstore
func (c *Commands) NotificationRetryRequested(ctx context.Context, tx *sql.Tx, id, resourceOwner string, request *NotificationRetryRequest, requestError error) error {
var errorMessage string
if requestError != nil {
errorMessage = requestError.Error()
}
_, err := c.eventstore.PushWithClient(ctx, tx, notification.NewRetryRequestedEvent(ctx, &notification.NewAggregate(id, resourceOwner).Aggregate,
request.UserID,
request.UserResourceOwner,
request.AggregateID,
request.AggregateResourceOwner,
request.TriggerOrigin,
request.URLTemplate,
request.Code,
request.CodeExpiry,
request.EventType,
request.NotificationType,
request.MessageType,
request.UnverifiedNotificationChannel,
request.IsOTP,
request.Args,
request.NotifyUser,
request.BackOff,
errorMessage))
return err
}

View File

@ -18,6 +18,42 @@ import (
"github.com/zitadel/zitadel/internal/zerrors"
)
type QueryExecuter interface {
Query(query string, args ...any) (*sql.Rows, error)
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
Exec(query string, args ...any) (sql.Result, error)
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
}
type Client interface {
QueryExecuter
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
Begin() (*sql.Tx, error)
}
type Tx interface {
QueryExecuter
Commit() error
Rollback() error
}
var (
_ Client = (*sql.DB)(nil)
_ Tx = (*sql.Tx)(nil)
)
func CloseTransaction(tx Tx, err error) error {
if err != nil {
rollbackErr := tx.Rollback()
logging.OnError(rollbackErr).Error("failed to rollback transaction")
return err
}
commitErr := tx.Commit()
logging.OnError(commitErr).Error("failed to commit transaction")
return commitErr
}
type Config struct {
Dialects map[string]interface{} `mapstructure:",remain"`
EventPushConnRatio float64

View File

@ -96,3 +96,7 @@ func (p *PasswordlessInitCode) Link(baseURL string) string {
func PasswordlessInitCodeLink(baseURL, userID, resourceOwner, codeID, code string) string {
return fmt.Sprintf("%s?userID=%s&orgID=%s&codeID=%s&code=%s", baseURL, userID, resourceOwner, codeID, code)
}
func PasswordlessInitCodeLinkTemplate(baseURL, userID, resourceOwner, codeID string) string {
return PasswordlessInitCodeLink(baseURL, userID, resourceOwner, codeID, "{{.Code}}")
}

View File

@ -1,5 +1,9 @@
package domain
import (
"time"
)
type NotificationType int32
const (
@ -31,3 +35,32 @@ const (
notificationProviderTypeCount
)
type NotificationArguments struct {
Origin string `json:"origin,omitempty"`
Domain string `json:"domain,omitempty"`
Expiry time.Duration `json:"expiry,omitempty"`
TempUsername string `json:"tempUsername,omitempty"`
ApplicationName string `json:"applicationName,omitempty"`
CodeID string `json:"codeID,omitempty"`
SessionID string `json:"sessionID,omitempty"`
AuthRequestID string `json:"authRequestID,omitempty"`
}
// ToMap creates a type safe map of the notification arguments.
// Since these arguments are used in text template, all keys must be PascalCase and types must remain the same (e.g. Duration).
func (n *NotificationArguments) ToMap() map[string]interface{} {
m := make(map[string]interface{})
if n == nil {
return m
}
m["Origin"] = n.Origin
m["Domain"] = n.Domain
m["Expiry"] = n.Expiry
m["TempUsername"] = n.TempUsername
m["ApplicationName"] = n.ApplicationName
m["CodeID"] = n.CodeID
m["SessionID"] = n.SessionID
m["AuthRequestID"] = n.AuthRequestID
return m
}

View File

@ -7,6 +7,10 @@ import (
"github.com/zitadel/zitadel/internal/zerrors"
)
func RenderURLTemplate(w io.Writer, tmpl string, data any) error {
return renderURLTemplate(w, tmpl, data)
}
func renderURLTemplate(w io.Writer, tmpl string, data any) error {
parsed, err := template.New("").Parse(tmpl)
if err != nil {

View File

@ -85,6 +85,12 @@ func (es *Eventstore) Health(ctx context.Context) error {
// Push pushes the events in a single transaction
// an event needs at least an aggregate
func (es *Eventstore) Push(ctx context.Context, cmds ...Command) ([]Event, error) {
return es.PushWithClient(ctx, nil, cmds...)
}
// PushWithClient pushes the events in a single transaction using the provided database client
// an event needs at least an aggregate
func (es *Eventstore) PushWithClient(ctx context.Context, client database.QueryExecuter, cmds ...Command) ([]Event, error) {
if es.PushTimeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, es.PushTimeout)
@ -100,12 +106,24 @@ func (es *Eventstore) Push(ctx context.Context, cmds ...Command) ([]Event, error
// https://github.com/zitadel/zitadel/issues/7202
retry:
for i := 0; i <= es.maxRetries; i++ {
events, err = es.pusher.Push(ctx, cmds...)
var pgErr *pgconn.PgError
if !errors.As(err, &pgErr) || pgErr.ConstraintName != "events2_pkey" || pgErr.SQLState() != "23505" {
events, err = es.pusher.Push(ctx, client, cmds...)
// if there is a transaction passed the calling function needs to retry
if _, ok := client.(database.Tx); ok {
break retry
}
logging.WithError(err).Info("eventstore push retry")
var pgErr *pgconn.PgError
if !errors.As(err, &pgErr) {
break retry
}
if pgErr.ConstraintName == "events2_pkey" && pgErr.SQLState() == "23505" {
logging.WithError(err).Info("eventstore push retry")
continue
}
if pgErr.SQLState() == "CR000" || pgErr.SQLState() == "40001" {
logging.WithError(err).Info("eventstore push retry")
continue
}
break retry
}
if err != nil {
return nil, err
@ -283,7 +301,9 @@ type Pusher interface {
// Health checks if the connection to the storage is available
Health(ctx context.Context) error
// Push stores the actions
Push(ctx context.Context, commands ...Command) (_ []Event, err error)
Push(ctx context.Context, client database.QueryExecuter, commands ...Command) (_ []Event, err error)
// Client returns the underlying database connection
Client() *database.DB
}
type FillFieldsEvent interface {

View File

@ -69,7 +69,7 @@ func Benchmark_Push_SameAggregate(b *testing.B) {
b.StartTimer()
for n := 0; n < b.N; n++ {
_, err := store.Push(ctx, cmds...)
_, err := store.Push(ctx, store.Client().DB, cmds...)
if err != nil {
b.Error(err)
}
@ -149,7 +149,7 @@ func Benchmark_Push_MultipleAggregate_Parallel(b *testing.B) {
b.RunParallel(func(p *testing.PB) {
for p.Next() {
i++
_, err := store.Push(ctx, commandCreator(strconv.Itoa(i))...)
_, err := store.Push(ctx, store.Client().DB, commandCreator(strconv.Itoa(i))...)
if err != nil {
b.Error(err)
}

View File

@ -607,7 +607,7 @@ func TestCRDB_Push_ResourceOwner(t *testing.T) {
}
}
func pushAggregates(pusher eventstore.Pusher, aggregateCommands [][]eventstore.Command) []error {
func pushAggregates(es *eventstore.Eventstore, aggregateCommands [][]eventstore.Command) []error {
wg := sync.WaitGroup{}
errs := make([]error, 0)
errsMu := sync.Mutex{}
@ -619,7 +619,7 @@ func pushAggregates(pusher eventstore.Pusher, aggregateCommands [][]eventstore.C
go func(events []eventstore.Command) {
<-ctx.Done()
_, err := pusher.Push(context.Background(), events...) //nolint:contextcheck
_, err := es.Push(context.Background(), events...) //nolint:contextcheck
if err != nil {
errsMu.Lock()
errs = append(errs, err)

View File

@ -66,6 +66,39 @@ func TestCRDB_Filter(t *testing.T) {
},
wantErr: false,
},
{
name: "exclude aggregate type and event type",
args: args{
searchQuery: eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
AddQuery().
AggregateTypes(eventstore.AggregateType(t.Name())).
Builder().
ExcludeAggregateIDs().
EventTypes("test.updated").
AggregateTypes(eventstore.AggregateType(t.Name())).
Builder(),
},
fields: fields{
existingEvents: []eventstore.Command{
generateCommand(eventstore.AggregateType(t.Name()), "306"),
generateCommand(
eventstore.AggregateType(t.Name()),
"306",
func(te *testEvent) {
te.EventType = "test.updated"
},
),
generateCommand(
eventstore.AggregateType(t.Name()),
"308",
),
},
},
res: res{
eventCount: 1,
},
wantErr: false,
},
}
for _, tt := range tests {
for querierName, querier := range queriers {

View File

@ -330,6 +330,12 @@ func Test_eventData(t *testing.T) {
}
}
var _ Pusher = (*testPusher)(nil)
func (repo *testPusher) Client() *database.DB {
return nil
}
type testPusher struct {
events []Event
errs []error
@ -341,7 +347,7 @@ func (repo *testPusher) Health(ctx context.Context) error {
return nil
}
func (repo *testPusher) Push(ctx context.Context, commands ...Command) (events []Event, err error) {
func (repo *testPusher) Push(_ context.Context, _ database.QueryExecuter, commands ...Command) (events []Event, err error) {
if len(repo.errs) != 0 {
err, repo.errs = repo.errs[0], repo.errs[1:]
return nil, err

Some files were not shown because too many files have changed in this diff Show More