chore(tests): use a coverage server binary (#8407)

# Which Problems Are Solved

Use a single server instance for API integration tests. This optimizes
the time taken for the integration test pipeline,
because it allows running tests on multiple packages in parallel. Also,
it saves time by not start and stopping a zitadel server for every
package.

# How the Problems Are Solved

- Build a binary with `go build -race -cover ....`
- Integration tests only construct clients. The server remains running
in the background.
- The integration package and tested packages now fully utilize the API.
No more direct database access trough `query` and `command` packages.
- Use Makefile recipes to setup, start and stop the server in the
background.
- The binary has the race detector enabled
- Init and setup jobs are configured to halt immediately on race
condition
- Because the server runs in the background, races are only logged. When
the server is stopped and race logs exist, the Makefile recipe will
throw an error and print the logs.
- Makefile recipes include logic to print logs and convert coverage
reports after the server is stopped.
- Some tests need a downstream HTTP server to make requests, like quota
and milestones. A new `integration/sink` package creates an HTTP server
and uses websockets to forward HTTP request back to the test packages.
The package API uses Go channels for abstraction and easy usage.

# Additional Changes

- Integration test files already used the `//go:build integration`
directive. In order to properly split integration from unit tests,
integration test files need to be in a `integration_test` subdirectory
of their package.
- `UseIsolatedInstance` used to overwrite the `Tester.Client` for each
instance. Now a `Instance` object is returned with a gRPC client that is
connected to the isolated instance's hostname.
- The `Tester` type is now `Instance`. The object is created for the
first instance, used by default in any test. Isolated instances are also
`Instance` objects and therefore benefit from the same methods and
values. The first instance and any other us capable of creating an
isolated instance over the system API.
- All test packages run in an Isolated instance by calling
`NewInstance()`
- Individual tests that use an isolated instance use `t.Parallel()`

# Additional Context

- Closes #6684
- https://go.dev/doc/articles/race_detector
- https://go.dev/doc/build-cover

---------

Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
This commit is contained in:
Tim Möhlmann
2024-09-06 15:47:57 +03:00
committed by GitHub
parent b522588d98
commit d2e0ac07f1
115 changed files with 3632 additions and 3348 deletions

View File

@@ -0,0 +1,113 @@
//go:build integration
package system_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/instance"
"github.com/zitadel/zitadel/pkg/grpc/object"
system_pb "github.com/zitadel/zitadel/pkg/grpc/system"
)
func TestServer_ListInstances(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
tests := []struct {
name string
req *system_pb.ListInstancesRequest
want []*instance.Instance
wantErr bool
}{
{
name: "empty query error",
req: &system_pb.ListInstancesRequest{
Queries: []*instance.Query{{}},
},
wantErr: true,
},
{
name: "non-existing id",
req: &system_pb.ListInstancesRequest{
Queries: []*instance.Query{{
Query: &instance.Query_IdQuery{
IdQuery: &instance.IdsQuery{
Ids: []string{"foo"},
},
},
}},
},
want: []*instance.Instance{},
},
{
name: "get 1 by id",
req: &system_pb.ListInstancesRequest{
Query: &object.ListQuery{
Limit: 1,
},
Queries: []*instance.Query{{
Query: &instance.Query_IdQuery{
IdQuery: &instance.IdsQuery{
Ids: []string{isoInstance.ID()},
},
},
}},
},
want: []*instance.Instance{{
Id: isoInstance.ID(),
}},
},
{
name: "non-existing domain",
req: &system_pb.ListInstancesRequest{
Queries: []*instance.Query{{
Query: &instance.Query_DomainQuery{
DomainQuery: &instance.DomainsQuery{
Domains: []string{"foo"},
},
},
}},
},
want: []*instance.Instance{},
},
{
name: "get 1 by domain",
req: &system_pb.ListInstancesRequest{
Query: &object.ListQuery{
Limit: 1,
},
Queries: []*instance.Query{{
Query: &instance.Query_DomainQuery{
DomainQuery: &instance.DomainsQuery{
Domains: []string{isoInstance.Domain},
},
},
}},
},
want: []*instance.Instance{{
Id: isoInstance.ID(),
}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := integration.SystemClient().ListInstances(CTX, tt.req)
if tt.wantErr {
require.Error(t, err)
return
}
require.NoError(t, err)
got := resp.GetResult()
assert.Len(t, got, len(tt.want))
for i := 0; i < len(tt.want); i++ {
assert.Equalf(t, tt.want[i].GetId(), got[i].GetId(), "instance[%d] id", i)
}
})
}
}

View File

@@ -0,0 +1,231 @@
//go:build integration
package system_test
import (
"context"
"math/rand"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/admin"
"github.com/zitadel/zitadel/pkg/grpc/auth"
"github.com/zitadel/zitadel/pkg/grpc/management"
"github.com/zitadel/zitadel/pkg/grpc/system"
)
func TestServer_Limits_AuditLogRetention(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
iamOwnerCtx := isoInstance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
userID, projectID, appID, projectGrantID := seedObjects(iamOwnerCtx, t, isoInstance.Client)
beforeTime := time.Now()
farPast := timestamppb.New(beforeTime.Add(-10 * time.Hour).UTC())
zeroCounts := &eventCounts{}
seededCount := requireEventually(t, iamOwnerCtx, isoInstance.Client, userID, projectID, appID, projectGrantID, func(c assert.TestingT, counts *eventCounts) {
counts.assertAll(t, c, "seeded events are > 0", assert.Greater, zeroCounts)
}, "wait for seeded event assertions to pass")
produceEvents(iamOwnerCtx, t, isoInstance.Client, userID, appID, projectID, projectGrantID)
addedCount := requireEventually(t, iamOwnerCtx, isoInstance.Client, userID, projectID, appID, projectGrantID, func(c assert.TestingT, counts *eventCounts) {
counts.assertAll(t, c, "added events are > seeded events", assert.Greater, seededCount)
}, "wait for added event assertions to pass")
_, err := integration.SystemClient().SetLimits(CTX, &system.SetLimitsRequest{
InstanceId: isoInstance.ID(),
AuditLogRetention: durationpb.New(time.Now().Sub(beforeTime)),
})
require.NoError(t, err)
var limitedCounts *eventCounts
requireEventually(t, iamOwnerCtx, isoInstance.Client, userID, projectID, appID, projectGrantID, func(c assert.TestingT, counts *eventCounts) {
counts.assertAll(t, c, "limited events < added events", assert.Less, addedCount)
counts.assertAll(t, c, "limited events > 0", assert.Greater, zeroCounts)
limitedCounts = counts
}, "wait for limited event assertions to pass")
listedEvents, err := isoInstance.Client.Admin.ListEvents(iamOwnerCtx, &admin.ListEventsRequest{CreationDateFilter: &admin.ListEventsRequest_From{
From: farPast,
}})
require.NoError(t, err)
assert.LessOrEqual(t, len(listedEvents.GetEvents()), limitedCounts.all, "ListEvents with from query older than retention doesn't return more events")
listedEvents, err = isoInstance.Client.Admin.ListEvents(iamOwnerCtx, &admin.ListEventsRequest{CreationDateFilter: &admin.ListEventsRequest_Range{Range: &admin.ListEventsRequestCreationDateRange{
Since: farPast,
}}})
require.NoError(t, err)
assert.LessOrEqual(t, len(listedEvents.GetEvents()), limitedCounts.all, "ListEvents with since query older than retention doesn't return more events")
_, err = integration.SystemClient().ResetLimits(CTX, &system.ResetLimitsRequest{
InstanceId: isoInstance.ID(),
})
require.NoError(t, err)
requireEventually(t, iamOwnerCtx, isoInstance.Client, userID, projectID, appID, projectGrantID, func(c assert.TestingT, counts *eventCounts) {
counts.assertAll(t, c, "with reset limit, added events are > seeded events", assert.Greater, seededCount)
}, "wait for reset event assertions to pass")
}
func requireEventually(
t *testing.T,
ctx context.Context,
cc *integration.Client,
userID, projectID, appID, projectGrantID string,
assertCounts func(assert.TestingT, *eventCounts),
msg string,
) (counts *eventCounts) {
countTimeout := 30 * time.Second
assertTimeout := countTimeout + time.Second
countCtx, cancel := context.WithTimeout(ctx, countTimeout)
defer cancel()
require.EventuallyWithT(t, func(c *assert.CollectT) {
counts = countEvents(countCtx, c, cc, userID, projectID, appID, projectGrantID)
assertCounts(c, counts)
}, assertTimeout, time.Second, msg)
return counts
}
var runes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomString(resourceType string, n int) string {
b := make([]rune, n)
for i := range b {
b[i] = runes[rand.Intn(len(runes))]
}
return "test" + resourceType + "-" + string(b)
}
func seedObjects(ctx context.Context, t *testing.T, cc *integration.Client) (string, string, string, string) {
t.Helper()
project, err := cc.Mgmt.AddProject(ctx, &management.AddProjectRequest{
Name: randomString("project", 5),
})
require.NoError(t, err)
app, err := cc.Mgmt.AddOIDCApp(ctx, &management.AddOIDCAppRequest{
Name: randomString("app", 5),
ProjectId: project.GetId(),
})
org, err := cc.Mgmt.AddOrg(ctx, &management.AddOrgRequest{
Name: randomString("org", 5),
})
require.NoError(t, err)
role := randomString("role", 5)
require.NoError(t, err)
_, err = cc.Mgmt.AddProjectRole(ctx, &management.AddProjectRoleRequest{
ProjectId: project.GetId(),
RoleKey: role,
DisplayName: role,
})
require.NoError(t, err)
projectGrant, err := cc.Mgmt.AddProjectGrant(ctx, &management.AddProjectGrantRequest{
ProjectId: project.GetId(),
GrantedOrgId: org.GetId(),
RoleKeys: []string{role},
})
require.NoError(t, err)
user, err := cc.Auth.GetMyUser(ctx, &auth.GetMyUserRequest{})
require.NoError(t, err)
userID := user.GetUser().GetId()
requireUserEvent(ctx, t, cc, userID)
return userID, project.GetId(), app.GetAppId(), projectGrant.GetGrantId()
}
func produceEvents(ctx context.Context, t *testing.T, cc *integration.Client, machineID, appID, projectID, grantID string) {
t.Helper()
_, err := cc.Mgmt.UpdateOrg(ctx, &management.UpdateOrgRequest{
Name: randomString("org", 5),
})
require.NoError(t, err)
_, err = cc.Mgmt.UpdateProject(ctx, &management.UpdateProjectRequest{
Id: projectID,
Name: randomString("project", 5),
})
require.NoError(t, err)
_, err = cc.Mgmt.UpdateApp(ctx, &management.UpdateAppRequest{
AppId: appID,
ProjectId: projectID,
Name: randomString("app", 5),
})
require.NoError(t, err)
requireUserEvent(ctx, t, cc, machineID)
_, err = cc.Mgmt.UpdateProjectGrant(ctx, &management.UpdateProjectGrantRequest{
ProjectId: projectID,
GrantId: grantID,
})
require.NoError(t, err)
}
func requireUserEvent(ctx context.Context, t *testing.T, cc *integration.Client, machineID string) {
_, err := cc.Mgmt.UpdateMachine(ctx, &management.UpdateMachineRequest{
UserId: machineID,
Name: randomString("machine", 5),
})
require.NoError(t, err)
}
type eventCounts struct {
all, myUser, aUser, grant, project, app, org int
}
func (e *eventCounts) assertAll(t *testing.T, c assert.TestingT, name string, compare assert.ComparisonAssertionFunc, than *eventCounts) {
t.Run(name, func(t *testing.T) {
compare(c, e.all, than.all, "ListEvents")
compare(c, e.myUser, than.myUser, "ListMyUserChanges")
compare(c, e.aUser, than.aUser, "ListUserChanges")
compare(c, e.grant, than.grant, "ListProjectGrantChanges")
compare(c, e.project, than.project, "ListProjectChanges")
compare(c, e.app, than.app, "ListAppChanges")
compare(c, e.org, than.org, "ListOrgChanges")
})
}
func countEvents(ctx context.Context, t assert.TestingT, cc *integration.Client, userID, projectID, appID, grantID string) *eventCounts {
counts := new(eventCounts)
var wg sync.WaitGroup
wg.Add(7)
go func() {
defer wg.Done()
result, err := cc.Admin.ListEvents(ctx, &admin.ListEventsRequest{})
assert.NoError(t, err)
counts.all = len(result.GetEvents())
}()
go func() {
defer wg.Done()
result, err := cc.Auth.ListMyUserChanges(ctx, &auth.ListMyUserChangesRequest{})
assert.NoError(t, err)
counts.myUser = len(result.GetResult())
}()
go func() {
defer wg.Done()
result, err := cc.Mgmt.ListUserChanges(ctx, &management.ListUserChangesRequest{UserId: userID})
assert.NoError(t, err)
counts.aUser = len(result.GetResult())
}()
go func() {
defer wg.Done()
result, err := cc.Mgmt.ListAppChanges(ctx, &management.ListAppChangesRequest{ProjectId: projectID, AppId: appID})
assert.NoError(t, err)
counts.app = len(result.GetResult())
}()
go func() {
defer wg.Done()
result, err := cc.Mgmt.ListOrgChanges(ctx, &management.ListOrgChangesRequest{})
assert.NoError(t, err)
counts.org = len(result.GetResult())
}()
go func() {
defer wg.Done()
result, err := cc.Mgmt.ListProjectChanges(ctx, &management.ListProjectChangesRequest{ProjectId: projectID})
assert.NoError(t, err)
counts.project = len(result.GetResult())
}()
go func() {
defer wg.Done()
result, err := cc.Mgmt.ListProjectGrantChanges(ctx, &management.ListProjectGrantChangesRequest{ProjectId: projectID, GrantId: grantID})
assert.NoError(t, err)
counts.grant = len(result.GetResult())
}()
wg.Wait()
return counts
}

View File

@@ -0,0 +1,264 @@
//go:build integration
package system_test
import (
"fmt"
"io"
"net"
"net/http"
"strings"
"testing"
"time"
"github.com/muhlemmer/gu"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/admin"
"github.com/zitadel/zitadel/pkg/grpc/system"
)
func TestServer_Limits_Block(t *testing.T) {
t.Parallel()
isoInstance := integration.NewInstance(CTX)
iamOwnerCtx := isoInstance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
tests := []*test{
publicAPIBlockingTest(isoInstance.Domain),
{
name: "mutating API",
testGrpc: func(tt assert.TestingT, expectBlocked bool) {
randomGrpcIdpName := randomString("idp-grpc", 5)
_, err := isoInstance.Client.Admin.AddGitHubProvider(iamOwnerCtx, &admin.AddGitHubProviderRequest{
Name: randomGrpcIdpName,
ClientId: "client-id",
ClientSecret: "client-secret",
})
assertGrpcError(tt, err, expectBlocked)
},
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
randomHttpIdpName := randomString("idp-http", 5)
req, err := http.NewRequestWithContext(
CTX,
"POST",
fmt.Sprintf("http://%s/admin/v1/idps/github", net.JoinHostPort(isoInstance.Domain, "8080")),
strings.NewReader(`{
"name": "`+randomHttpIdpName+`",
"clientId": "client-id",
"clientSecret": "client-secret"
}`),
)
if err != nil {
return nil, err, nil
}
req.Header.Set("Authorization", isoInstance.BearerToken(iamOwnerCtx))
return req, nil, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
assertLimitResponse(ttt, response, expectBlocked)
assertSetLimitingCookie(ttt, response, expectBlocked)
}
},
}, {
name: "discovery",
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
req, err := http.NewRequestWithContext(
CTX,
"GET",
fmt.Sprintf("http://%s/.well-known/openid-configuration", net.JoinHostPort(isoInstance.Domain, "8080")),
nil,
)
return req, err, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
assertLimitResponse(ttt, response, expectBlocked)
assertSetLimitingCookie(ttt, response, expectBlocked)
}
},
}, {
name: "login",
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
req, err := http.NewRequestWithContext(
CTX,
"GET",
fmt.Sprintf("http://%s/ui/login/login/externalidp/callback", net.JoinHostPort(isoInstance.Domain, "8080")),
nil,
)
return req, err, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
// the login paths should return a redirect if the instance is blocked
if expectBlocked {
assert.Equal(ttt, http.StatusFound, response.StatusCode)
} else {
assertLimitResponse(ttt, response, false)
}
assertSetLimitingCookie(ttt, response, expectBlocked)
}
},
}, {
name: "console",
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
req, err := http.NewRequestWithContext(
CTX,
"GET",
fmt.Sprintf("http://%s/ui/console/", net.JoinHostPort(isoInstance.Domain, "8080")),
nil,
)
return req, err, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
// the console is not blocked so we can render a link to an instance management portal.
// A CDN can cache these assets easily
// We also don't care about a cookie because the environment.json already takes care of that.
assertLimitResponse(ttt, response, false)
}
},
}, {
name: "environment.json",
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
req, err := http.NewRequestWithContext(
CTX,
"GET",
fmt.Sprintf("http://%s/ui/console/assets/environment.json", net.JoinHostPort(isoInstance.Domain, "8080")),
nil,
)
return req, err, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
// the environment.json should always return successfully
assertLimitResponse(ttt, response, false)
assertSetLimitingCookie(ttt, response, expectBlocked)
body, err := io.ReadAll(response.Body)
assert.NoError(ttt, err)
var compFunc assert.ComparisonAssertionFunc = assert.NotContains
if expectBlocked {
compFunc = assert.Contains
}
compFunc(ttt, string(body), `"exhausted":true`)
}
},
}}
_, err := integration.SystemClient().SetLimits(CTX, &system.SetLimitsRequest{
InstanceId: isoInstance.ID(),
Block: gu.Ptr(true),
})
require.NoError(t, err)
// The following call ensures that an undefined bool is not deserialized to false
_, err = integration.SystemClient().SetLimits(CTX, &system.SetLimitsRequest{
InstanceId: isoInstance.ID(),
AuditLogRetention: durationpb.New(time.Hour),
})
require.NoError(t, err)
for _, tt := range tests {
var isFirst bool
t.Run(tt.name+" with blocking", func(t *testing.T) {
isFirst = isFirst || !t.Skipped()
testBlockingAPI(t, tt, true, isFirst)
})
}
_, err = integration.SystemClient().SetLimits(CTX, &system.SetLimitsRequest{
InstanceId: isoInstance.ID(),
Block: gu.Ptr(false),
})
require.NoError(t, err)
for _, tt := range tests {
var isFirst bool
t.Run(tt.name+" without blocking", func(t *testing.T) {
isFirst = isFirst || !t.Skipped()
testBlockingAPI(t, tt, false, isFirst)
})
}
}
type test struct {
name string
testHttp func(t assert.TestingT) (req *http.Request, err error, assertResponse func(t assert.TestingT, response *http.Response, expectBlocked bool))
testGrpc func(t assert.TestingT, expectBlocked bool)
}
func testBlockingAPI(t *testing.T, tt *test, expectBlocked bool, isFirst bool) {
req, err, assertResponse := tt.testHttp(t)
require.NoError(t, err)
testHTTP := func(t require.TestingT) {
resp, err := (&http.Client{
// Don't follow redirects
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}).Do(req)
defer func() {
require.NoError(t, resp.Body.Close())
}()
require.NoError(t, err)
assertResponse(t, resp, expectBlocked)
}
if isFirst {
// limits are eventually consistent, so we need to wait for the blocking to be set on the first test
assert.EventuallyWithT(t, func(c *assert.CollectT) {
testHTTP(c)
}, time.Minute, time.Second, "wait for blocking to be set")
} else {
testHTTP(t)
}
if tt.testGrpc != nil {
tt.testGrpc(t, expectBlocked)
}
}
func publicAPIBlockingTest(domain string) *test {
return &test{
name: "public API",
testGrpc: func(tt assert.TestingT, expectBlocked bool) {
conn, err := grpc.DialContext(CTX, net.JoinHostPort(domain, "8080"),
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
assert.NoError(tt, err)
_, err = admin.NewAdminServiceClient(conn).Healthz(CTX, &admin.HealthzRequest{})
assertGrpcError(tt, err, expectBlocked)
},
testHttp: func(tt assert.TestingT) (*http.Request, error, func(assert.TestingT, *http.Response, bool)) {
req, err := http.NewRequestWithContext(
CTX,
"GET",
fmt.Sprintf("http://%s/admin/v1/healthz", net.JoinHostPort(domain, "8080")),
nil,
)
return req, err, func(ttt assert.TestingT, response *http.Response, expectBlocked bool) {
assertLimitResponse(ttt, response, expectBlocked)
assertSetLimitingCookie(ttt, response, expectBlocked)
}
},
}
}
// If expectSet is true, we expect the cookie to be set
// If expectSet is false, we expect the cookie to be deleted
func assertSetLimitingCookie(t assert.TestingT, response *http.Response, expectSet bool) {
for _, cookie := range response.Cookies() {
if cookie.Name == "zitadel.quota.exhausted" {
if expectSet {
assert.Greater(t, cookie.MaxAge, 0)
} else {
assert.LessOrEqual(t, cookie.MaxAge, 0)
}
return
}
}
assert.Fail(t, "cookie not found")
}
func assertGrpcError(t assert.TestingT, err error, expectBlocked bool) {
if expectBlocked {
assert.Equal(t, codes.ResourceExhausted, status.Convert(err).Code())
return
}
assert.NoError(t, err)
}
func assertLimitResponse(t assert.TestingT, response *http.Response, expectBlocked bool) {
if expectBlocked {
assert.Equal(t, http.StatusTooManyRequests, response.StatusCode)
return
}
assert.GreaterOrEqual(t, response.StatusCode, 200)
assert.Less(t, response.StatusCode, 300)
}

View File

@@ -0,0 +1,75 @@
//go:build integration
package system_test
import (
"testing"
"github.com/muhlemmer/gu"
"github.com/stretchr/testify/require"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/pkg/grpc/system"
)
func TestServer_Limits_Bulk(t *testing.T) {
const len = 5
type instance struct{ domain, id string }
instances := make([]*instance, len)
for i := 0; i < len; i++ {
domain := integration.RandString(5) + ".integration.localhost"
resp, err := integration.SystemClient().CreateInstance(CTX, &system.CreateInstanceRequest{
InstanceName: "testinstance",
CustomDomain: domain,
Owner: &system.CreateInstanceRequest_Machine_{
Machine: &system.CreateInstanceRequest_Machine{
UserName: "owner",
Name: "owner",
},
},
})
require.NoError(t, err)
instances[i] = &instance{domain, resp.GetInstanceId()}
}
resp, err := integration.SystemClient().BulkSetLimits(CTX, &system.BulkSetLimitsRequest{
Limits: []*system.SetLimitsRequest{{
InstanceId: instances[0].id,
Block: gu.Ptr(true),
}, {
InstanceId: instances[1].id,
Block: gu.Ptr(false),
}, {
InstanceId: instances[2].id,
Block: gu.Ptr(true),
}, {
InstanceId: instances[3].id,
Block: gu.Ptr(false),
}, {
InstanceId: instances[4].id,
Block: gu.Ptr(true),
}},
})
require.NoError(t, err)
details := resp.GetTargetDetails()
require.Len(t, details, len)
t.Run("the first instance is blocked", func(t *testing.T) {
require.Equal(t, instances[0].id, details[0].GetResourceOwner(), "resource owner must be instance id")
testBlockingAPI(t, publicAPIBlockingTest(instances[0].domain), true, true)
})
t.Run("the second instance isn't blocked", func(t *testing.T) {
require.Equal(t, instances[1].id, details[1].GetResourceOwner(), "resource owner must be instance id")
testBlockingAPI(t, publicAPIBlockingTest(instances[1].domain), false, true)
})
t.Run("the third instance is blocked", func(t *testing.T) {
require.Equal(t, instances[2].id, details[2].GetResourceOwner(), "resource owner must be instance id")
testBlockingAPI(t, publicAPIBlockingTest(instances[2].domain), true, true)
})
t.Run("the fourth instance isn't blocked", func(t *testing.T) {
require.Equal(t, instances[3].id, details[3].GetResourceOwner(), "resource owner must be instance id")
testBlockingAPI(t, publicAPIBlockingTest(instances[3].domain), false, true)
})
t.Run("the fifth instance is blocked", func(t *testing.T) {
require.Equal(t, instances[4].id, details[4].GetResourceOwner(), "resource owner must be instance id")
testBlockingAPI(t, publicAPIBlockingTest(instances[4].domain), true, true)
})
}

View File

@@ -0,0 +1,216 @@
//go:build integration
package quotas_enabled_test
import (
"bytes"
"encoding/json"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/zitadel/zitadel/internal/integration"
"github.com/zitadel/zitadel/internal/integration/sink"
"github.com/zitadel/zitadel/internal/repository/quota"
"github.com/zitadel/zitadel/pkg/grpc/admin"
quota_pb "github.com/zitadel/zitadel/pkg/grpc/quota"
"github.com/zitadel/zitadel/pkg/grpc/system"
)
var callURL = sink.CallURL(sink.ChannelQuota)
func TestServer_QuotaNotification_Limit(t *testing.T) {
instance := integration.NewInstance(CTX)
iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
amount := 10
percent := 50
percentAmount := amount * percent / 100
_, err := integration.SystemClient().SetQuota(CTX, &system.SetQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
From: timestamppb.Now(),
ResetInterval: durationpb.New(time.Minute * 5),
Amount: uint64(amount),
Limit: true,
Notifications: []*quota_pb.Notification{
{
Percent: uint32(percent),
Repeat: true,
CallUrl: callURL,
},
{
Percent: 100,
Repeat: true,
CallUrl: callURL,
},
},
})
require.NoError(t, err)
sub := sink.Subscribe(CTX, sink.ChannelQuota)
defer sub.Close()
for i := 0; i < percentAmount; i++ {
_, err := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoErrorf(t, err, "error in %d call of %d", i, percentAmount)
}
awaitNotification(t, sub, quota.RequestsAllAuthenticated, percent)
for i := 0; i < (amount - percentAmount); i++ {
_, err := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoErrorf(t, err, "error in %d call of %d", i, percentAmount)
}
awaitNotification(t, sub, quota.RequestsAllAuthenticated, 100)
_, limitErr := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.Error(t, limitErr)
}
func TestServer_QuotaNotification_NoLimit(t *testing.T) {
instance := integration.NewInstance(CTX)
iamCTX := instance.WithAuthorization(CTX, integration.UserTypeIAMOwner)
amount := 10
percent := 50
percentAmount := amount * percent / 100
_, err := integration.SystemClient().SetQuota(CTX, &system.SetQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
From: timestamppb.Now(),
ResetInterval: durationpb.New(time.Minute * 5),
Amount: uint64(amount),
Limit: false,
Notifications: []*quota_pb.Notification{
{
Percent: uint32(percent),
Repeat: false,
CallUrl: callURL,
},
{
Percent: 100,
Repeat: true,
CallUrl: callURL,
},
},
})
require.NoError(t, err)
sub := sink.Subscribe(CTX, sink.ChannelQuota)
defer sub.Close()
for i := 0; i < percentAmount; i++ {
_, err := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoErrorf(t, err, "error in %d call of %d", i, percentAmount)
}
awaitNotification(t, sub, quota.RequestsAllAuthenticated, percent)
for i := 0; i < (amount - percentAmount); i++ {
_, err := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoErrorf(t, err, "error in %d call of %d", i, percentAmount)
}
awaitNotification(t, sub, quota.RequestsAllAuthenticated, 100)
for i := 0; i < amount; i++ {
_, err := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoErrorf(t, err, "error in %d call of %d", i, percentAmount)
}
awaitNotification(t, sub, quota.RequestsAllAuthenticated, 200)
_, limitErr := instance.Client.Admin.GetDefaultOrg(iamCTX, &admin.GetDefaultOrgRequest{})
require.NoError(t, limitErr)
}
func awaitNotification(t *testing.T, sub *sink.Subscription, unit quota.Unit, percent int) {
for {
select {
case req, ok := <-sub.Recv():
require.True(t, ok, "channel closed")
plain := new(bytes.Buffer)
if err := json.Indent(plain, req.Body, "", " "); err != nil {
t.Fatal(err)
}
t.Log("received notificationDueEvent", plain.String())
event := struct {
Unit quota.Unit `json:"unit"`
ID string `json:"id"`
CallURL string `json:"callURL"`
PeriodStart time.Time `json:"periodStart"`
Threshold uint16 `json:"threshold"`
Usage uint64 `json:"usage"`
}{}
if err := json.Unmarshal(req.Body, &event); err != nil {
t.Error(err)
}
if event.ID == "" {
continue
}
if event.Unit == unit && event.Threshold == uint16(percent) {
return
}
case <-time.After(60 * time.Second):
t.Fatalf("timed out waiting for unit %s and percent %d", strconv.Itoa(int(unit)), percent)
}
}
}
func TestServer_AddAndRemoveQuota(t *testing.T) {
instance := integration.NewInstance(CTX)
got, err := integration.SystemClient().SetQuota(CTX, &system.SetQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
From: timestamppb.Now(),
ResetInterval: durationpb.New(time.Minute),
Amount: 10,
Limit: true,
Notifications: []*quota_pb.Notification{
{
Percent: 20,
Repeat: true,
CallUrl: callURL,
},
},
})
require.NoError(t, err)
require.Equal(t, got.Details.ResourceOwner, instance.Instance.Id)
gotAlreadyExisting, errAlreadyExisting := integration.SystemClient().SetQuota(CTX, &system.SetQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
From: timestamppb.Now(),
ResetInterval: durationpb.New(time.Minute),
Amount: 10,
Limit: true,
Notifications: []*quota_pb.Notification{
{
Percent: 20,
Repeat: true,
CallUrl: callURL,
},
},
})
require.Nil(t, errAlreadyExisting)
require.Equal(t, gotAlreadyExisting.Details.ResourceOwner, instance.Instance.Id)
gotRemove, errRemove := integration.SystemClient().RemoveQuota(CTX, &system.RemoveQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
})
require.NoError(t, errRemove)
require.Equal(t, gotRemove.Details.ResourceOwner, instance.Instance.Id)
gotRemoveAlready, errRemoveAlready := integration.SystemClient().RemoveQuota(CTX, &system.RemoveQuotaRequest{
InstanceId: instance.Instance.Id,
Unit: quota_pb.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED,
})
require.Error(t, errRemoveAlready)
require.Nil(t, gotRemoveAlready)
}

View File

@@ -0,0 +1,23 @@
//go:build integration
package quotas_enabled_test
import (
"context"
"os"
"testing"
"time"
)
var (
CTX context.Context
)
func TestMain(m *testing.M) {
os.Exit(func() int {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
defer cancel()
CTX = ctx
return m.Run()
}())
}

View File

@@ -0,0 +1,24 @@
//go:build integration
package system_test
import (
"context"
"os"
"testing"
"time"
)
var (
CTX context.Context
)
func TestMain(m *testing.M) {
os.Exit(func() int {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
defer cancel()
CTX = ctx
return m.Run()
}())
}