mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-13 13:13:38 +00:00
feat: add quotas (#4779)
adds possibilities to cap authenticated requests and execution seconds of actions on a defined intervall
This commit is contained in:
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dop251/goja_nodejs/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
z_errs "github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
)
|
||||
@@ -14,15 +16,45 @@ type Config struct {
|
||||
HTTP HTTPConfig
|
||||
}
|
||||
|
||||
var (
|
||||
ErrHalt = errors.New("interrupt")
|
||||
)
|
||||
var ErrHalt = errors.New("interrupt")
|
||||
|
||||
type jsAction func(fields, fields) error
|
||||
|
||||
func Run(ctx context.Context, ctxParam contextFields, apiParam apiFields, script, name string, opts ...Option) error {
|
||||
config, err := prepareRun(ctx, ctxParam, apiParam, script, opts)
|
||||
if err != nil {
|
||||
const (
|
||||
actionStartedMessage = "action run started"
|
||||
actionSucceededMessage = "action run succeeded"
|
||||
)
|
||||
|
||||
func actionFailedMessage(err error) string {
|
||||
return fmt.Sprintf("action run failed: %s", err.Error())
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, ctxParam contextFields, apiParam apiFields, script, name string, opts ...Option) (err error) {
|
||||
config := newRunConfig(ctx, append(opts, withLogger(ctx))...)
|
||||
if config.functionTimeout == 0 {
|
||||
return z_errs.ThrowInternal(nil, "ACTIO-uCpCx", "Errrors.Internal")
|
||||
}
|
||||
|
||||
remaining := logstoreService.Limit(ctx, config.instanceID)
|
||||
config.cutTimeouts(remaining)
|
||||
|
||||
config.logger.Log(actionStartedMessage)
|
||||
if remaining != nil && *remaining == 0 {
|
||||
return z_errs.ThrowResourceExhausted(nil, "ACTIO-f19Ii", "Errors.Quota.Execution.Exhausted")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
config.logger.log(actionFailedMessage(err), logrus.ErrorLevel, true)
|
||||
} else {
|
||||
config.logger.log(actionSucceededMessage, logrus.InfoLevel, true)
|
||||
}
|
||||
if config.allowedToFail {
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
|
||||
if err := executeScript(config, ctxParam, apiParam, script); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,12 +63,11 @@ func Run(ctx context.Context, ctxParam contextFields, apiParam apiFields, script
|
||||
if jsFn == nil {
|
||||
return errors.New("function not found")
|
||||
}
|
||||
err = config.vm.ExportTo(jsFn, &fn)
|
||||
if err != nil {
|
||||
if err := config.vm.ExportTo(jsFn, &fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := config.Start()
|
||||
t := config.StartFunction()
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
@@ -44,12 +75,8 @@ func Run(ctx context.Context, ctxParam contextFields, apiParam apiFields, script
|
||||
return executeFn(config, fn)
|
||||
}
|
||||
|
||||
func prepareRun(ctx context.Context, ctxParam contextFields, apiParam apiFields, script string, opts []Option) (config *runConfig, err error) {
|
||||
config = newRunConfig(ctx, opts...)
|
||||
if config.timeout == 0 {
|
||||
return nil, z_errs.ThrowInternal(nil, "ACTIO-uCpCx", "Errrors.Internal")
|
||||
}
|
||||
t := config.Prepare()
|
||||
func executeScript(config *runConfig, ctxParam contextFields, apiParam apiFields, script string) (err error) {
|
||||
t := config.StartScript()
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
@@ -67,7 +94,6 @@ func prepareRun(ctx context.Context, ctxParam contextFields, apiParam apiFields,
|
||||
for name, loader := range config.modules {
|
||||
registry.RegisterNativeModule(name, loader)
|
||||
}
|
||||
|
||||
// overload error if function panics
|
||||
defer func() {
|
||||
r := recover()
|
||||
@@ -76,29 +102,31 @@ func prepareRun(ctx context.Context, ctxParam contextFields, apiParam apiFields,
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = config.vm.RunString(script)
|
||||
return config, err
|
||||
return err
|
||||
}
|
||||
|
||||
func executeFn(config *runConfig, fn jsAction) (err error) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r != nil && !config.allowedToFail {
|
||||
var ok bool
|
||||
if err, ok = r.(error); ok {
|
||||
return
|
||||
}
|
||||
|
||||
e, ok := r.(string)
|
||||
if ok {
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
err = fmt.Errorf("unknown error occured: %v", r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
var ok bool
|
||||
if err, ok = r.(error); ok {
|
||||
return
|
||||
}
|
||||
|
||||
e, ok := r.(string)
|
||||
if ok {
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
err = fmt.Errorf("unknown error occurred: %v", r)
|
||||
}()
|
||||
err = fn(config.ctxParam.fields, config.apiParam.fields)
|
||||
if err != nil && !config.allowedToFail {
|
||||
|
||||
if err = fn(config.ctxParam.fields, config.apiParam.fields); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@@ -7,9 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
SetLogstoreService(logstore.New(nil, nil, nil))
|
||||
type args struct {
|
||||
timeout time.Duration
|
||||
api apiFields
|
||||
|
@@ -23,13 +23,14 @@ func WithAllowedToFail() Option {
|
||||
|
||||
type runConfig struct {
|
||||
allowedToFail bool
|
||||
timeout,
|
||||
prepareTimeout time.Duration
|
||||
modules map[string]require.ModuleLoader
|
||||
|
||||
vm *goja.Runtime
|
||||
ctxParam *ctxConfig
|
||||
apiParam *apiConfig
|
||||
functionTimeout,
|
||||
scriptTimeout time.Duration
|
||||
modules map[string]require.ModuleLoader
|
||||
logger *logger
|
||||
instanceID string
|
||||
vm *goja.Runtime
|
||||
ctxParam *ctxConfig
|
||||
apiParam *apiConfig
|
||||
}
|
||||
|
||||
func newRunConfig(ctx context.Context, opts ...Option) *runConfig {
|
||||
@@ -42,10 +43,10 @@ func newRunConfig(ctx context.Context, opts ...Option) *runConfig {
|
||||
vm.SetFieldNameMapper(goja.UncapFieldNameMapper())
|
||||
|
||||
config := &runConfig{
|
||||
timeout: time.Until(deadline),
|
||||
prepareTimeout: maxPrepareTimeout,
|
||||
modules: map[string]require.ModuleLoader{},
|
||||
vm: vm,
|
||||
functionTimeout: time.Until(deadline),
|
||||
scriptTimeout: maxPrepareTimeout,
|
||||
modules: map[string]require.ModuleLoader{},
|
||||
vm: vm,
|
||||
ctxParam: &ctxConfig{
|
||||
FieldConfig: FieldConfig{
|
||||
Runtime: vm,
|
||||
@@ -64,23 +65,37 @@ func newRunConfig(ctx context.Context, opts ...Option) *runConfig {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if config.prepareTimeout > config.timeout {
|
||||
config.prepareTimeout = config.timeout
|
||||
if config.scriptTimeout > config.functionTimeout {
|
||||
config.scriptTimeout = config.functionTimeout
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func (c *runConfig) Start() *time.Timer {
|
||||
func (c *runConfig) StartFunction() *time.Timer {
|
||||
c.vm.ClearInterrupt()
|
||||
return time.AfterFunc(c.timeout, func() {
|
||||
return time.AfterFunc(c.functionTimeout, func() {
|
||||
c.vm.Interrupt(ErrHalt)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *runConfig) Prepare() *time.Timer {
|
||||
func (c *runConfig) StartScript() *time.Timer {
|
||||
c.vm.ClearInterrupt()
|
||||
return time.AfterFunc(c.prepareTimeout, func() {
|
||||
return time.AfterFunc(c.scriptTimeout, func() {
|
||||
c.vm.Interrupt(ErrHalt)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *runConfig) cutTimeouts(remainingSeconds *uint64) {
|
||||
if remainingSeconds == nil {
|
||||
return
|
||||
}
|
||||
|
||||
remainingDur := time.Duration(*remainingSeconds) * time.Second
|
||||
if c.functionTimeout > remainingDur {
|
||||
c.functionTimeout = remainingDur
|
||||
}
|
||||
if c.scriptTimeout > remainingDur {
|
||||
c.scriptTimeout = remainingDur
|
||||
}
|
||||
}
|
||||
|
@@ -5,9 +5,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
func TestSetFields(t *testing.T) {
|
||||
SetLogstoreService(logstore.New(nil, nil, nil))
|
||||
primitveFn := func(a string) { fmt.Println(a) }
|
||||
complexFn := func(*FieldConfig) interface{} {
|
||||
return primitveFn
|
||||
|
@@ -11,9 +11,11 @@ import (
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
func Test_isHostBlocked(t *testing.T) {
|
||||
SetLogstoreService(logstore.New(nil, nil, nil))
|
||||
var denyList = []AddressChecker{
|
||||
mustNewIPChecker(t, "192.168.5.0/24"),
|
||||
mustNewIPChecker(t, "127.0.0.1"),
|
||||
|
@@ -1,30 +1,83 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"github.com/zitadel/logging"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/dop251/goja_nodejs/console"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/logstore/emitters/execution"
|
||||
)
|
||||
|
||||
var ServerLog *logrus
|
||||
var (
|
||||
logstoreService *logstore.Service
|
||||
_ console.Printer = (*logger)(nil)
|
||||
)
|
||||
|
||||
type logrus struct{}
|
||||
|
||||
func (*logrus) Log(s string) {
|
||||
logging.WithFields("message", s).Info("log from action")
|
||||
}
|
||||
func (*logrus) Warn(s string) {
|
||||
logging.WithFields("message", s).Info("warn from action")
|
||||
}
|
||||
func (*logrus) Error(s string) {
|
||||
logging.WithFields("message", s).Info("error from action")
|
||||
func SetLogstoreService(svc *logstore.Service) {
|
||||
logstoreService = svc
|
||||
}
|
||||
|
||||
func WithLogger(logger console.Printer) Option {
|
||||
type logger struct {
|
||||
ctx context.Context
|
||||
started time.Time
|
||||
instanceID string
|
||||
}
|
||||
|
||||
// newLogger returns a *logger instance that should only be used for a single action run.
|
||||
// The first log call sets the started field for subsequent log calls
|
||||
func newLogger(ctx context.Context, instanceID string) *logger {
|
||||
return &logger{
|
||||
ctx: ctx,
|
||||
started: time.Time{},
|
||||
instanceID: instanceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *logger) Log(msg string) {
|
||||
l.log(msg, logrus.InfoLevel, false)
|
||||
}
|
||||
|
||||
func (l *logger) Warn(msg string) {
|
||||
l.log(msg, logrus.WarnLevel, false)
|
||||
}
|
||||
|
||||
func (l *logger) Error(msg string) {
|
||||
l.log(msg, logrus.ErrorLevel, false)
|
||||
}
|
||||
|
||||
func (l *logger) log(msg string, level logrus.Level, last bool) {
|
||||
ts := time.Now()
|
||||
if l.started.IsZero() {
|
||||
l.started = ts
|
||||
}
|
||||
|
||||
record := &execution.Record{
|
||||
LogDate: ts,
|
||||
InstanceID: l.instanceID,
|
||||
Message: msg,
|
||||
LogLevel: level,
|
||||
}
|
||||
|
||||
if last {
|
||||
record.Took = ts.Sub(l.started)
|
||||
}
|
||||
|
||||
logstoreService.Handle(l.ctx, record)
|
||||
}
|
||||
|
||||
func withLogger(ctx context.Context) Option {
|
||||
instance := authz.GetInstance(ctx)
|
||||
instanceID := instance.InstanceID()
|
||||
return func(c *runConfig) {
|
||||
c.logger = newLogger(ctx, instanceID)
|
||||
c.instanceID = instanceID
|
||||
c.modules["zitadel/log"] = func(runtime *goja.Runtime, module *goja.Object) {
|
||||
console.RequireWithPrinter(logger)(runtime, module)
|
||||
console.RequireWithPrinter(c.logger)(runtime, module)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -16,6 +16,7 @@ import (
|
||||
http_util "github.com/zitadel/zitadel/internal/api/http"
|
||||
"github.com/zitadel/zitadel/internal/api/ui/login"
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/metrics"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
@@ -36,7 +37,18 @@ type health interface {
|
||||
Instance(ctx context.Context, shouldTriggerBulk bool) (*query.Instance, error)
|
||||
}
|
||||
|
||||
func New(port uint16, router *mux.Router, queries *query.Queries, verifier *internal_authz.TokenVerifier, authZ internal_authz.Config, externalSecure bool, tlsConfig *tls.Config, http2HostName, http1HostName string) *API {
|
||||
func New(
|
||||
port uint16,
|
||||
router *mux.Router,
|
||||
queries *query.Queries,
|
||||
verifier *internal_authz.TokenVerifier,
|
||||
authZ internal_authz.Config,
|
||||
externalSecure bool,
|
||||
tlsConfig *tls.Config,
|
||||
http2HostName,
|
||||
http1HostName string,
|
||||
accessSvc *logstore.Service,
|
||||
) *API {
|
||||
api := &API{
|
||||
port: port,
|
||||
verifier: verifier,
|
||||
@@ -45,7 +57,8 @@ func New(port uint16, router *mux.Router, queries *query.Queries, verifier *inte
|
||||
externalSecure: externalSecure,
|
||||
http1HostName: http1HostName,
|
||||
}
|
||||
api.grpcServer = server.CreateServer(api.verifier, authZ, queries, http2HostName, tlsConfig)
|
||||
|
||||
api.grpcServer = server.CreateServer(api.verifier, authZ, queries, http2HostName, tlsConfig, accessSvc)
|
||||
api.routeGRPC()
|
||||
|
||||
api.RegisterHandler("/debug", api.healthHandler())
|
||||
|
@@ -82,7 +82,7 @@ func DefaultErrorHandler(w http.ResponseWriter, r *http.Request, err error, code
|
||||
http.Error(w, err.Error(), code)
|
||||
}
|
||||
|
||||
func NewHandler(commands *command.Commands, verifier *authz.TokenVerifier, authConfig authz.Config, idGenerator id.Generator, storage static.Storage, queries *query.Queries, instanceInterceptor, assetCacheInterceptor func(handler http.Handler) http.Handler) http.Handler {
|
||||
func NewHandler(commands *command.Commands, verifier *authz.TokenVerifier, authConfig authz.Config, idGenerator id.Generator, storage static.Storage, queries *query.Queries, instanceInterceptor, assetCacheInterceptor, accessInterceptor func(handler http.Handler) http.Handler) http.Handler {
|
||||
h := &Handler{
|
||||
commands: commands,
|
||||
errorHandler: DefaultErrorHandler,
|
||||
@@ -94,7 +94,7 @@ func NewHandler(commands *command.Commands, verifier *authz.TokenVerifier, authC
|
||||
|
||||
verifier.RegisterServer("Assets-API", "assets", AssetsService_AuthMethods)
|
||||
router := mux.NewRouter()
|
||||
router.Use(instanceInterceptor, assetCacheInterceptor)
|
||||
router.Use(instanceInterceptor, assetCacheInterceptor, accessInterceptor)
|
||||
RegisterRoutes(router, h)
|
||||
router.PathPrefix("/{owner}").Methods("GET").HandlerFunc(DownloadHandleFunc(h, h.GetFile()))
|
||||
return http_util.CopyHeadersToContext(http_mw.CORSInterceptor(router))
|
||||
|
@@ -23,7 +23,8 @@ type Instance interface {
|
||||
}
|
||||
|
||||
type InstanceVerifier interface {
|
||||
InstanceByHost(context.Context, string) (Instance, error)
|
||||
InstanceByHost(ctx context.Context, host string) (Instance, error)
|
||||
InstanceByID(ctx context.Context) (Instance, error)
|
||||
}
|
||||
|
||||
type instance struct {
|
||||
|
@@ -55,6 +55,8 @@ func ExtractCaosError(err error) (c codes.Code, msg, id string, ok bool) {
|
||||
return codes.Unavailable, caosErr.GetMessage(), caosErr.GetID(), true
|
||||
case *caos_errs.UnimplementedError:
|
||||
return codes.Unimplemented, caosErr.GetMessage(), caosErr.GetID(), true
|
||||
case *caos_errs.ResourceExhaustedError:
|
||||
return codes.ResourceExhausted, caosErr.GetMessage(), caosErr.GetID(), true
|
||||
default:
|
||||
return codes.Unknown, err.Error(), "", false
|
||||
}
|
||||
|
@@ -136,6 +136,14 @@ func Test_Extract(t *testing.T) {
|
||||
"id",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"exhausted",
|
||||
args{caos_errs.ThrowResourceExhausted(nil, "id", "exhausted")},
|
||||
codes.ResourceExhausted,
|
||||
"exhausted",
|
||||
"id",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"unknown",
|
||||
args{errors.New("unknown")},
|
||||
|
55
internal/api/grpc/server/middleware/access_interceptor.go
Normal file
55
internal/api/grpc/server/middleware/access_interceptor.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/logstore/emitters/access"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
func AccessStorageInterceptor(svc *logstore.Service) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ interface{}, err error) {
|
||||
if !svc.Enabled() {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
reqMd, _ := metadata.FromIncomingContext(ctx)
|
||||
|
||||
resp, handlerErr := handler(ctx, req)
|
||||
|
||||
interceptorCtx, span := tracing.NewServerInterceptorSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
var respStatus uint32
|
||||
grpcStatus, ok := status.FromError(handlerErr)
|
||||
if ok {
|
||||
respStatus = uint32(grpcStatus.Code())
|
||||
}
|
||||
|
||||
resMd, _ := metadata.FromOutgoingContext(ctx)
|
||||
instance := authz.GetInstance(ctx)
|
||||
|
||||
record := &access.Record{
|
||||
LogDate: time.Now(),
|
||||
Protocol: access.GRPC,
|
||||
RequestURL: info.FullMethod,
|
||||
ResponseStatus: respStatus,
|
||||
RequestHeaders: reqMd,
|
||||
ResponseHeaders: resMd,
|
||||
InstanceID: instance.InstanceID(),
|
||||
ProjectID: instance.ProjectID(),
|
||||
RequestedDomain: instance.RequestedDomain(),
|
||||
RequestedHost: instance.RequestedHost(),
|
||||
}
|
||||
|
||||
svc.Handle(interceptorCtx, record)
|
||||
return resp, handlerErr
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@ package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
errs "errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
caos_errors "github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/i18n"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
@@ -23,27 +23,36 @@ const (
|
||||
HTTP1Host = "x-zitadel-http1-host"
|
||||
)
|
||||
|
||||
type InstanceVerifier interface {
|
||||
GetInstance(ctx context.Context)
|
||||
}
|
||||
|
||||
func InstanceInterceptor(verifier authz.InstanceVerifier, headerName string, ignoredServices ...string) grpc.UnaryServerInterceptor {
|
||||
func InstanceInterceptor(verifier authz.InstanceVerifier, headerName string, explicitInstanceIdServices ...string) grpc.UnaryServerInterceptor {
|
||||
translator, err := newZitadelTranslator(language.English)
|
||||
logging.OnError(err).Panic("unable to get translator")
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return setInstance(ctx, req, info, handler, verifier, headerName, translator, ignoredServices...)
|
||||
return setInstance(ctx, req, info, handler, verifier, headerName, translator, explicitInstanceIdServices...)
|
||||
}
|
||||
}
|
||||
|
||||
func setInstance(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, verifier authz.InstanceVerifier, headerName string, translator *i18n.Translator, ignoredServices ...string) (_ interface{}, err error) {
|
||||
func setInstance(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, verifier authz.InstanceVerifier, headerName string, translator *i18n.Translator, idFromRequestsServices ...string) (_ interface{}, err error) {
|
||||
interceptorCtx, span := tracing.NewServerInterceptorSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
for _, service := range ignoredServices {
|
||||
for _, service := range idFromRequestsServices {
|
||||
if !strings.HasPrefix(service, "/") {
|
||||
service = "/" + service
|
||||
}
|
||||
if strings.HasPrefix(info.FullMethod, service) {
|
||||
return handler(ctx, req)
|
||||
withInstanceIDProperty, ok := req.(interface{ GetInstanceId() string })
|
||||
if !ok {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
ctx = authz.WithInstanceID(ctx, withInstanceIDProperty.GetInstanceId())
|
||||
instance, err := verifier.InstanceByID(ctx)
|
||||
if err != nil {
|
||||
notFoundErr := new(errors.NotFoundError)
|
||||
if errs.As(err, ¬FoundErr) {
|
||||
notFoundErr.Message = translator.LocalizeFromCtx(ctx, notFoundErr.GetMessage(), nil)
|
||||
}
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
return handler(authz.WithInstance(ctx, instance), req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,9 +62,9 @@ func setInstance(ctx context.Context, req interface{}, info *grpc.UnaryServerInf
|
||||
}
|
||||
instance, err := verifier.InstanceByHost(interceptorCtx, host)
|
||||
if err != nil {
|
||||
caosErr := new(caos_errors.NotFoundError)
|
||||
if errors.As(err, &caosErr) {
|
||||
caosErr.Message = translator.LocalizeFromCtx(ctx, caosErr.GetMessage(), nil)
|
||||
notFoundErr := new(errors.NotFoundError)
|
||||
if errs.As(err, ¬FoundErr) {
|
||||
notFoundErr.Message = translator.LocalizeFromCtx(ctx, notFoundErr.GetMessage(), nil)
|
||||
}
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
@@ -153,13 +153,15 @@ type mockInstanceVerifier struct {
|
||||
host string
|
||||
}
|
||||
|
||||
func (m *mockInstanceVerifier) InstanceByHost(ctx context.Context, host string) (authz.Instance, error) {
|
||||
func (m *mockInstanceVerifier) InstanceByHost(_ context.Context, host string) (authz.Instance, error) {
|
||||
if host != m.host {
|
||||
return nil, fmt.Errorf("invalid host")
|
||||
}
|
||||
return &mockInstance{}, nil
|
||||
}
|
||||
|
||||
func (m *mockInstanceVerifier) InstanceByID(context.Context) (authz.Instance, error) { return nil, nil }
|
||||
|
||||
type mockInstance struct{}
|
||||
|
||||
func (m *mockInstance) InstanceID() string {
|
||||
|
46
internal/api/grpc/server/middleware/quota_interceptor.go
Normal file
46
internal/api/grpc/server/middleware/quota_interceptor.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
func QuotaExhaustedInterceptor(svc *logstore.Service, ignoreService ...string) grpc.UnaryServerInterceptor {
|
||||
|
||||
prunedIgnoredServices := make([]string, len(ignoreService))
|
||||
for idx, service := range ignoreService {
|
||||
if !strings.HasPrefix(service, "/") {
|
||||
service = "/" + service
|
||||
}
|
||||
prunedIgnoredServices[idx] = service
|
||||
}
|
||||
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ interface{}, err error) {
|
||||
if !svc.Enabled() {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
interceptorCtx, span := tracing.NewServerInterceptorSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
for _, service := range prunedIgnoredServices {
|
||||
if strings.HasPrefix(info.FullMethod, service) {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
instance := authz.GetInstance(ctx)
|
||||
remaining := svc.Limit(interceptorCtx, instance.InstanceID())
|
||||
if remaining != nil && *remaining == 0 {
|
||||
return nil, errors.ThrowResourceExhausted(nil, "QUOTA-vjAy8", "Quota.Access.Exhausted")
|
||||
}
|
||||
span.End()
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
@@ -2,7 +2,6 @@ package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
grpc_api "github.com/zitadel/zitadel/internal/api/grpc"
|
||||
"github.com/zitadel/zitadel/internal/api/grpc/server/middleware"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/query"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/metrics"
|
||||
system_pb "github.com/zitadel/zitadel/pkg/grpc/system"
|
||||
@@ -23,7 +23,14 @@ type Server interface {
|
||||
AuthMethods() authz.MethodMapping
|
||||
}
|
||||
|
||||
func CreateServer(verifier *authz.TokenVerifier, authConfig authz.Config, queries *query.Queries, hostHeaderName string, tlsConfig *tls.Config) *grpc.Server {
|
||||
func CreateServer(
|
||||
verifier *authz.TokenVerifier,
|
||||
authConfig authz.Config,
|
||||
queries *query.Queries,
|
||||
hostHeaderName string,
|
||||
tlsConfig *tls.Config,
|
||||
accessSvc *logstore.Service,
|
||||
) *grpc.Server {
|
||||
metricTypes := []metrics.MetricType{metrics.MetricTypeTotalCount, metrics.MetricTypeRequestCount, metrics.MetricTypeStatusCode}
|
||||
serverOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
@@ -33,10 +40,12 @@ func CreateServer(verifier *authz.TokenVerifier, authConfig authz.Config, querie
|
||||
middleware.NoCacheInterceptor(),
|
||||
middleware.ErrorHandler(),
|
||||
middleware.InstanceInterceptor(queries, hostHeaderName, system_pb.SystemService_MethodPrefix),
|
||||
middleware.AccessStorageInterceptor(accessSvc),
|
||||
middleware.AuthorizationInterceptor(verifier, authConfig),
|
||||
middleware.TranslationHandler(),
|
||||
middleware.ValidationHandler(),
|
||||
middleware.ServiceHandler(),
|
||||
middleware.QuotaExhaustedInterceptor(accessSvc, system_pb.SystemService_MethodPrefix),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
@@ -31,7 +31,6 @@ func (s *Server) ListInstances(ctx context.Context, req *system_pb.ListInstances
|
||||
}
|
||||
|
||||
func (s *Server) GetInstance(ctx context.Context, req *system_pb.GetInstanceRequest) (*system_pb.GetInstanceResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
instance, err := s.query.Instance(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -53,7 +52,6 @@ func (s *Server) AddInstance(ctx context.Context, req *system_pb.AddInstanceRequ
|
||||
}
|
||||
|
||||
func (s *Server) UpdateInstance(ctx context.Context, req *system_pb.UpdateInstanceRequest) (*system_pb.UpdateInstanceResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
details, err := s.command.UpdateInstance(ctx, req.InstanceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -86,7 +84,6 @@ func (s *Server) CreateInstance(ctx context.Context, req *system_pb.CreateInstan
|
||||
}
|
||||
|
||||
func (s *Server) RemoveInstance(ctx context.Context, req *system_pb.RemoveInstanceRequest) (*system_pb.RemoveInstanceResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
details, err := s.command.RemoveInstance(ctx, req.InstanceId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,7 +94,6 @@ func (s *Server) RemoveInstance(ctx context.Context, req *system_pb.RemoveInstan
|
||||
}
|
||||
|
||||
func (s *Server) ListIAMMembers(ctx context.Context, req *system_pb.ListIAMMembersRequest) (*system_pb.ListIAMMembersResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
queries, err := ListIAMMembersRequestToQuery(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -139,7 +135,6 @@ func (s *Server) ExistsDomain(ctx context.Context, req *system_pb.ExistsDomainRe
|
||||
}
|
||||
|
||||
func (s *Server) ListDomains(ctx context.Context, req *system_pb.ListDomainsRequest) (*system_pb.ListDomainsResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
queries, err := ListInstanceDomainsRequestToModel(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -156,8 +151,6 @@ func (s *Server) ListDomains(ctx context.Context, req *system_pb.ListDomainsRequ
|
||||
}
|
||||
|
||||
func (s *Server) AddDomain(ctx context.Context, req *system_pb.AddDomainRequest) (*system_pb.AddDomainResponse, error) {
|
||||
//TODO: should be solved in interceptor
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
instance, err := s.query.Instance(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -174,7 +167,6 @@ func (s *Server) AddDomain(ctx context.Context, req *system_pb.AddDomainRequest)
|
||||
}
|
||||
|
||||
func (s *Server) RemoveDomain(ctx context.Context, req *system_pb.RemoveDomainRequest) (*system_pb.RemoveDomainResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
details, err := s.command.RemoveInstanceDomain(ctx, req.Domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -185,7 +177,6 @@ func (s *Server) RemoveDomain(ctx context.Context, req *system_pb.RemoveDomainRe
|
||||
}
|
||||
|
||||
func (s *Server) SetPrimaryDomain(ctx context.Context, req *system_pb.SetPrimaryDomainRequest) (*system_pb.SetPrimaryDomainResponse, error) {
|
||||
ctx = authz.WithInstanceID(ctx, req.InstanceId)
|
||||
details, err := s.command.SetPrimaryInstanceDomain(ctx, req.Domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
32
internal/api/grpc/system/quota.go
Normal file
32
internal/api/grpc/system/quota.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/grpc/object"
|
||||
"github.com/zitadel/zitadel/pkg/grpc/system"
|
||||
system_pb "github.com/zitadel/zitadel/pkg/grpc/system"
|
||||
)
|
||||
|
||||
func (s *Server) AddQuota(ctx context.Context, req *system.AddQuotaRequest) (*system.AddQuotaResponse, error) {
|
||||
details, err := s.command.AddQuota(
|
||||
ctx,
|
||||
instanceQuotaPbToCommand(req),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &system_pb.AddQuotaResponse{
|
||||
Details: object.AddToDetailsPb(details.Sequence, details.EventDate, details.ResourceOwner),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) RemoveQuota(ctx context.Context, req *system.RemoveQuotaRequest) (*system.RemoveQuotaResponse, error) {
|
||||
details, err := s.command.RemoveQuota(ctx, instanceQuotaUnitPbToCommand(req.Unit))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &system_pb.RemoveQuotaResponse{
|
||||
Details: object.ChangeToDetailsPb(details.Sequence, details.EventDate, details.ResourceOwner),
|
||||
}, nil
|
||||
}
|
43
internal/api/grpc/system/quota_converter.go
Normal file
43
internal/api/grpc/system/quota_converter.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/command"
|
||||
"github.com/zitadel/zitadel/pkg/grpc/quota"
|
||||
"github.com/zitadel/zitadel/pkg/grpc/system"
|
||||
)
|
||||
|
||||
func instanceQuotaPbToCommand(req *system.AddQuotaRequest) *command.AddQuota {
|
||||
return &command.AddQuota{
|
||||
Unit: instanceQuotaUnitPbToCommand(req.Unit),
|
||||
From: req.From.AsTime(),
|
||||
ResetInterval: req.ResetInterval.AsDuration(),
|
||||
Amount: req.Amount,
|
||||
Limit: req.Limit,
|
||||
Notifications: instanceQuotaNotificationsPbToCommand(req.Notifications),
|
||||
}
|
||||
}
|
||||
|
||||
func instanceQuotaUnitPbToCommand(unit quota.Unit) command.QuotaUnit {
|
||||
switch unit {
|
||||
case quota.Unit_UNIT_REQUESTS_ALL_AUTHENTICATED:
|
||||
return command.QuotaRequestsAllAuthenticated
|
||||
case quota.Unit_UNIT_ACTIONS_ALL_RUN_SECONDS:
|
||||
return command.QuotaActionsAllRunsSeconds
|
||||
case quota.Unit_UNIT_UNIMPLEMENTED:
|
||||
fallthrough
|
||||
default:
|
||||
return command.QuotaUnit(unit.String())
|
||||
}
|
||||
}
|
||||
|
||||
func instanceQuotaNotificationsPbToCommand(req []*quota.Notification) command.QuotaNotifications {
|
||||
notifications := make([]*command.QuotaNotification, len(req))
|
||||
for idx, item := range req {
|
||||
notifications[idx] = &command.QuotaNotification{
|
||||
Percent: uint16(item.Percent),
|
||||
Repeat: item.Repeat,
|
||||
CallURL: item.CallUrl,
|
||||
}
|
||||
}
|
||||
return notifications
|
||||
}
|
@@ -72,7 +72,9 @@ func WithPath(path string) CookieHandlerOpt {
|
||||
func WithMaxAge(maxAge int) CookieHandlerOpt {
|
||||
return func(c *CookieHandler) {
|
||||
c.maxAge = maxAge
|
||||
c.securecookie.MaxAge(maxAge)
|
||||
if c.securecookie != nil {
|
||||
c.securecookie.MaxAge(maxAge)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
102
internal/api/http/middleware/access_interceptor.go
Normal file
102
internal/api/http/middleware/access_interceptor.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
http_utils "github.com/zitadel/zitadel/internal/api/http"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/logstore/emitters/access"
|
||||
"github.com/zitadel/zitadel/internal/telemetry/tracing"
|
||||
)
|
||||
|
||||
type AccessInterceptor struct {
|
||||
svc *logstore.Service
|
||||
cookieHandler *http_utils.CookieHandler
|
||||
limitConfig *AccessConfig
|
||||
}
|
||||
|
||||
type AccessConfig struct {
|
||||
ExhaustedCookieKey string
|
||||
ExhaustedCookieMaxAge time.Duration
|
||||
}
|
||||
|
||||
func NewAccessInterceptor(svc *logstore.Service, cookieConfig *AccessConfig) *AccessInterceptor {
|
||||
return &AccessInterceptor{
|
||||
svc: svc,
|
||||
cookieHandler: http_utils.NewCookieHandler(
|
||||
http_utils.WithUnsecure(),
|
||||
http_utils.WithMaxAge(int(math.Floor(cookieConfig.ExhaustedCookieMaxAge.Seconds()))),
|
||||
),
|
||||
limitConfig: cookieConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AccessInterceptor) Handle(next http.Handler) http.Handler {
|
||||
if !a.svc.Enabled() {
|
||||
return next
|
||||
}
|
||||
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
|
||||
|
||||
ctx := request.Context()
|
||||
var err error
|
||||
|
||||
tracingCtx, span := tracing.NewServerInterceptorSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
wrappedWriter := &statusRecorder{ResponseWriter: writer, status: 0}
|
||||
|
||||
instance := authz.GetInstance(ctx)
|
||||
remaining := a.svc.Limit(tracingCtx, instance.InstanceID())
|
||||
limit := remaining != nil && *remaining == 0
|
||||
|
||||
a.cookieHandler.SetCookie(wrappedWriter, a.limitConfig.ExhaustedCookieKey, request.Host, strconv.FormatBool(limit))
|
||||
|
||||
if limit {
|
||||
wrappedWriter.WriteHeader(http.StatusTooManyRequests)
|
||||
wrappedWriter.ignoreWrites = true
|
||||
}
|
||||
|
||||
next.ServeHTTP(wrappedWriter, request)
|
||||
|
||||
requestURL := request.RequestURI
|
||||
unescapedURL, err := url.QueryUnescape(requestURL)
|
||||
if err != nil {
|
||||
logging.WithError(err).WithField("url", requestURL).Warning("failed to unescape request url")
|
||||
// err = nil is effective because of deferred tracing span end
|
||||
err = nil
|
||||
}
|
||||
a.svc.Handle(tracingCtx, &access.Record{
|
||||
LogDate: time.Now(),
|
||||
Protocol: access.HTTP,
|
||||
RequestURL: unescapedURL,
|
||||
ResponseStatus: uint32(wrappedWriter.status),
|
||||
RequestHeaders: request.Header,
|
||||
ResponseHeaders: writer.Header(),
|
||||
InstanceID: instance.InstanceID(),
|
||||
ProjectID: instance.ProjectID(),
|
||||
RequestedDomain: instance.RequestedDomain(),
|
||||
RequestedHost: instance.RequestedHost(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type statusRecorder struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
ignoreWrites bool
|
||||
}
|
||||
|
||||
func (r *statusRecorder) WriteHeader(status int) {
|
||||
if r.ignoreWrites {
|
||||
return
|
||||
}
|
||||
r.status = status
|
||||
r.ResponseWriter.WriteHeader(status)
|
||||
}
|
@@ -244,6 +244,10 @@ func (m *mockInstanceVerifier) InstanceByHost(_ context.Context, host string) (a
|
||||
return &mockInstance{}, nil
|
||||
}
|
||||
|
||||
func (m *mockInstanceVerifier) InstanceByID(context.Context) (authz.Instance, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type mockInstance struct{}
|
||||
|
||||
func (m *mockInstance) InstanceID() string {
|
||||
|
@@ -427,7 +427,7 @@ func (o *OPStorage) userinfoFlows(ctx context.Context, resourceOwner string, use
|
||||
apiFields,
|
||||
action.Script,
|
||||
action.Name,
|
||||
append(actions.ActionToOptions(action), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(action), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
@@ -583,7 +583,7 @@ func (o *OPStorage) privateClaimsFlows(ctx context.Context, userID string, claim
|
||||
apiFields,
|
||||
action.Script,
|
||||
action.Name,
|
||||
append(actions.ActionToOptions(action), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(action), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@@ -73,13 +73,13 @@ type OPStorage struct {
|
||||
assetAPIPrefix func(ctx context.Context) string
|
||||
}
|
||||
|
||||
func NewProvider(ctx context.Context, config Config, defaultLogoutRedirectURI string, externalSecure bool, command *command.Commands, query *query.Queries, repo repository.Repository, encryptionAlg crypto.EncryptionAlgorithm, cryptoKey []byte, es *eventstore.Eventstore, projections *sql.DB, userAgentCookie, instanceHandler func(http.Handler) http.Handler) (op.OpenIDProvider, error) {
|
||||
func NewProvider(ctx context.Context, config Config, defaultLogoutRedirectURI string, externalSecure bool, command *command.Commands, query *query.Queries, repo repository.Repository, encryptionAlg crypto.EncryptionAlgorithm, cryptoKey []byte, es *eventstore.Eventstore, projections *sql.DB, userAgentCookie, instanceHandler, accessHandler func(http.Handler) http.Handler) (op.OpenIDProvider, error) {
|
||||
opConfig, err := createOPConfig(config, defaultLogoutRedirectURI, cryptoKey)
|
||||
if err != nil {
|
||||
return nil, caos_errs.ThrowInternal(err, "OIDC-EGrqd", "cannot create op config: %w")
|
||||
}
|
||||
storage := newStorage(config, command, query, repo, encryptionAlg, es, projections, externalSecure)
|
||||
options, err := createOptions(config, externalSecure, userAgentCookie, instanceHandler)
|
||||
options, err := createOptions(config, externalSecure, userAgentCookie, instanceHandler, accessHandler)
|
||||
if err != nil {
|
||||
return nil, caos_errs.ThrowInternal(err, "OIDC-D3gq1", "cannot create options: %w")
|
||||
}
|
||||
@@ -117,7 +117,7 @@ func createOPConfig(config Config, defaultLogoutRedirectURI string, cryptoKey []
|
||||
return opConfig, nil
|
||||
}
|
||||
|
||||
func createOptions(config Config, externalSecure bool, userAgentCookie, instanceHandler func(http.Handler) http.Handler) ([]op.Option, error) {
|
||||
func createOptions(config Config, externalSecure bool, userAgentCookie, instanceHandler, accessHandler func(http.Handler) http.Handler) ([]op.Option, error) {
|
||||
metricTypes := []metrics.MetricType{metrics.MetricTypeRequestCount, metrics.MetricTypeStatusCode, metrics.MetricTypeTotalCount}
|
||||
options := []op.Option{
|
||||
op.WithHttpInterceptors(
|
||||
@@ -127,6 +127,7 @@ func createOptions(config Config, externalSecure bool, userAgentCookie, instance
|
||||
instanceHandler,
|
||||
userAgentCookie,
|
||||
http_utils.CopyHeadersToContext,
|
||||
accessHandler,
|
||||
),
|
||||
}
|
||||
if !externalSecure {
|
||||
|
@@ -40,7 +40,8 @@ func NewProvider(
|
||||
es *eventstore.Eventstore,
|
||||
projections *sql.DB,
|
||||
instanceHandler,
|
||||
userAgentCookie func(http.Handler) http.Handler,
|
||||
userAgentCookie,
|
||||
accessHandler func(http.Handler) http.Handler,
|
||||
) (*provider.Provider, error) {
|
||||
metricTypes := []metrics.MetricType{metrics.MetricTypeRequestCount, metrics.MetricTypeStatusCode, metrics.MetricTypeTotalCount}
|
||||
|
||||
@@ -64,6 +65,7 @@ func NewProvider(
|
||||
middleware.NoCacheInterceptor().Handler,
|
||||
instanceHandler,
|
||||
userAgentCookie,
|
||||
accessHandler,
|
||||
http_utils.CopyHeadersToContext,
|
||||
),
|
||||
}
|
||||
|
@@ -88,7 +88,7 @@ func (f *file) Stat() (_ fs.FileInfo, err error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func Start(config Config, externalSecure bool, issuer op.IssuerFromRequest, instanceHandler func(http.Handler) http.Handler, customerPortal string) (http.Handler, error) {
|
||||
func Start(config Config, externalSecure bool, issuer op.IssuerFromRequest, instanceHandler, accessInterceptor func(http.Handler) http.Handler, customerPortal string) (http.Handler, error) {
|
||||
fSys, err := fs.Sub(static, "static")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -103,7 +103,7 @@ func Start(config Config, externalSecure bool, issuer op.IssuerFromRequest, inst
|
||||
|
||||
handler := mux.NewRouter()
|
||||
|
||||
handler.Use(instanceHandler, security)
|
||||
handler.Use(instanceHandler, security, accessInterceptor)
|
||||
handler.Handle(envRequestPath, middleware.TelemetryHandler()(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
url := http_util.BuildOrigin(r.Host, externalSecure)
|
||||
environmentJSON, err := createEnvironmentJSON(url, issuer(r), authz.GetInstance(r.Context()).ConsoleClientID(), customerPortal)
|
||||
|
@@ -106,7 +106,7 @@ func (l *Login) runPostExternalAuthenticationActions(
|
||||
apiFields,
|
||||
a.Script,
|
||||
a.Name,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
@@ -175,7 +175,7 @@ func (l *Login) runPostInternalAuthenticationActions(
|
||||
apiFields,
|
||||
a.Script,
|
||||
a.Name,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
@@ -274,7 +274,7 @@ func (l *Login) runPreCreationActions(
|
||||
apiFields,
|
||||
a.Script,
|
||||
a.Name,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
@@ -332,7 +332,7 @@ func (l *Login) runPostCreationActions(
|
||||
apiFields,
|
||||
a.Script,
|
||||
a.Name,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx), actions.WithLogger(actions.ServerLog))...,
|
||||
append(actions.ActionToOptions(a), actions.WithHTTP(actionCtx))...,
|
||||
)
|
||||
cancel()
|
||||
if err != nil {
|
||||
|
@@ -69,6 +69,7 @@ func CreateLogin(config Config,
|
||||
oidcInstanceHandler,
|
||||
samlInstanceHandler mux.MiddlewareFunc,
|
||||
assetCache mux.MiddlewareFunc,
|
||||
accessHandler mux.MiddlewareFunc,
|
||||
userCodeAlg crypto.EncryptionAlgorithm,
|
||||
idpConfigAlg crypto.EncryptionAlgorithm,
|
||||
csrfCookieKey []byte,
|
||||
@@ -94,7 +95,7 @@ func CreateLogin(config Config,
|
||||
cacheInterceptor := createCacheInterceptor(config.Cache.MaxAge, config.Cache.SharedMaxAge, assetCache)
|
||||
security := middleware.SecurityHeaders(csp(), login.cspErrorHandler)
|
||||
|
||||
login.router = CreateRouter(login, statikFS, middleware.TelemetryHandler(IgnoreInstanceEndpoints...), oidcInstanceHandler, samlInstanceHandler, csrfInterceptor, cacheInterceptor, security, userAgentCookie, issuerInterceptor)
|
||||
login.router = CreateRouter(login, statikFS, middleware.TelemetryHandler(IgnoreInstanceEndpoints...), oidcInstanceHandler, samlInstanceHandler, csrfInterceptor, cacheInterceptor, security, userAgentCookie, issuerInterceptor, accessHandler)
|
||||
login.renderer = CreateRenderer(HandlerPrefix, statikFS, staticStorage, config.LanguageCookieName)
|
||||
login.parser = form.NewParser()
|
||||
return login, nil
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/repository/keypair"
|
||||
"github.com/zitadel/zitadel/internal/repository/org"
|
||||
proj_repo "github.com/zitadel/zitadel/internal/repository/project"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
usr_repo "github.com/zitadel/zitadel/internal/repository/user"
|
||||
usr_grant_repo "github.com/zitadel/zitadel/internal/repository/usergrant"
|
||||
"github.com/zitadel/zitadel/internal/static"
|
||||
@@ -110,6 +111,7 @@ func StartCommands(es *eventstore.Eventstore,
|
||||
proj_repo.RegisterEventMappers(repo.eventstore)
|
||||
keypair.RegisterEventMappers(repo.eventstore)
|
||||
action.RegisterEventMappers(repo.eventstore)
|
||||
quota.RegisterEventMappers(repo.eventstore)
|
||||
|
||||
repo.userPasswordAlg = crypto.NewBCrypt(defaults.SecretGenerators.PasswordSaltCost)
|
||||
repo.machineKeySize = int(defaults.SecretGenerators.MachineKeySize)
|
||||
@@ -129,6 +131,7 @@ func StartCommands(es *eventstore.Eventstore,
|
||||
|
||||
func AppendAndReduce(object interface {
|
||||
AppendEvents(...eventstore.Event)
|
||||
// TODO: Why is it allowed to return an error here?
|
||||
Reduce() error
|
||||
}, events ...eventstore.Event) error {
|
||||
object.AppendEvents(events...)
|
||||
|
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/zitadel/zitadel/internal/repository/instance"
|
||||
"github.com/zitadel/zitadel/internal/repository/org"
|
||||
"github.com/zitadel/zitadel/internal/repository/project"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
"github.com/zitadel/zitadel/internal/repository/user"
|
||||
)
|
||||
|
||||
@@ -116,6 +117,9 @@ type InstanceSetup struct {
|
||||
RefreshTokenIdleExpiration time.Duration
|
||||
RefreshTokenExpiration time.Duration
|
||||
}
|
||||
Quotas *struct {
|
||||
Items []*AddQuota
|
||||
}
|
||||
}
|
||||
|
||||
type ZitadelConfig struct {
|
||||
@@ -261,6 +265,19 @@ func (c *Commands) SetUpInstance(ctx context.Context, setup *InstanceSetup) (str
|
||||
prepareAddDefaultEmailTemplate(instanceAgg, setup.EmailTemplate),
|
||||
}
|
||||
|
||||
if setup.Quotas != nil {
|
||||
for _, q := range setup.Quotas.Items {
|
||||
quotaId, err := c.idGenerator.Next()
|
||||
if err != nil {
|
||||
return "", "", nil, nil, err
|
||||
}
|
||||
|
||||
quotaAggregate := quota.NewAggregate(quotaId, instanceID, instanceID)
|
||||
|
||||
validations = append(validations, c.AddQuotaCommand(quotaAggregate, q))
|
||||
}
|
||||
}
|
||||
|
||||
for _, msg := range setup.MessageTexts {
|
||||
validations = append(validations, prepareSetInstanceCustomMessageTexts(instanceAgg, msg))
|
||||
}
|
||||
|
207
internal/command/quota.go
Normal file
207
internal/command/quota.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/command/preparation"
|
||||
"github.com/zitadel/zitadel/internal/domain"
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/id"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
type QuotaUnit string
|
||||
|
||||
const (
|
||||
QuotaRequestsAllAuthenticated QuotaUnit = "requests.all.authenticated"
|
||||
QuotaActionsAllRunsSeconds QuotaUnit = "actions.all.runs.seconds"
|
||||
)
|
||||
|
||||
func (q *QuotaUnit) Enum() quota.Unit {
|
||||
switch *q {
|
||||
case QuotaRequestsAllAuthenticated:
|
||||
return quota.RequestsAllAuthenticated
|
||||
case QuotaActionsAllRunsSeconds:
|
||||
return quota.ActionsAllRunsSeconds
|
||||
default:
|
||||
return quota.Unimplemented
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Commands) AddQuota(
|
||||
ctx context.Context,
|
||||
q *AddQuota,
|
||||
) (*domain.ObjectDetails, error) {
|
||||
instanceId := authz.GetInstance(ctx).InstanceID()
|
||||
|
||||
wm, err := c.getQuotaWriteModel(ctx, instanceId, instanceId, q.Unit.Enum())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if wm.active {
|
||||
return nil, errors.ThrowAlreadyExists(nil, "COMMAND-WDfFf", "Errors.Quota.AlreadyExists")
|
||||
}
|
||||
|
||||
aggregateId, err := c.idGenerator.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aggregate := quota.NewAggregate(aggregateId, instanceId, instanceId)
|
||||
|
||||
cmds, err := preparation.PrepareCommands(ctx, c.eventstore.Filter, c.AddQuotaCommand(aggregate, q))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
events, err := c.eventstore.Push(ctx, cmds...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = AppendAndReduce(wm, events...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return writeModelToObjectDetails(&wm.WriteModel), nil
|
||||
}
|
||||
|
||||
func (c *Commands) RemoveQuota(ctx context.Context, unit QuotaUnit) (*domain.ObjectDetails, error) {
|
||||
instanceId := authz.GetInstance(ctx).InstanceID()
|
||||
|
||||
wm, err := c.getQuotaWriteModel(ctx, instanceId, instanceId, unit.Enum())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !wm.active {
|
||||
return nil, errors.ThrowNotFound(nil, "COMMAND-WDfFf", "Errors.Quota.NotFound")
|
||||
}
|
||||
|
||||
aggregate := quota.NewAggregate(wm.AggregateID, instanceId, instanceId)
|
||||
|
||||
events := []eventstore.Command{
|
||||
quota.NewRemovedEvent(ctx, &aggregate.Aggregate, unit.Enum()),
|
||||
}
|
||||
pushedEvents, err := c.eventstore.Push(ctx, events...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = AppendAndReduce(wm, pushedEvents...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return writeModelToObjectDetails(&wm.WriteModel), nil
|
||||
}
|
||||
|
||||
func (c *Commands) getQuotaWriteModel(ctx context.Context, instanceId, resourceOwner string, unit quota.Unit) (*quotaWriteModel, error) {
|
||||
wm := newQuotaWriteModel(instanceId, resourceOwner, unit)
|
||||
return wm, c.eventstore.FilterToQueryReducer(ctx, wm)
|
||||
}
|
||||
|
||||
type QuotaNotification struct {
|
||||
Percent uint16
|
||||
Repeat bool
|
||||
CallURL string
|
||||
}
|
||||
|
||||
type QuotaNotifications []*QuotaNotification
|
||||
|
||||
func (q *QuotaNotifications) toAddedEventNotifications(idGenerator id.Generator) ([]*quota.AddedEventNotification, error) {
|
||||
if q == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
notifications := make([]*quota.AddedEventNotification, len(*q))
|
||||
for idx, notification := range *q {
|
||||
|
||||
id, err := idGenerator.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notifications[idx] = "a.AddedEventNotification{
|
||||
ID: id,
|
||||
Percent: notification.Percent,
|
||||
Repeat: notification.Repeat,
|
||||
CallURL: notification.CallURL,
|
||||
}
|
||||
}
|
||||
|
||||
return notifications, nil
|
||||
}
|
||||
|
||||
type AddQuota struct {
|
||||
Unit QuotaUnit
|
||||
From time.Time
|
||||
ResetInterval time.Duration
|
||||
Amount uint64
|
||||
Limit bool
|
||||
Notifications QuotaNotifications
|
||||
}
|
||||
|
||||
func (q *AddQuota) validate() error {
|
||||
for _, notification := range q.Notifications {
|
||||
u, err := url.Parse(notification.CallURL)
|
||||
if err != nil {
|
||||
return errors.ThrowInvalidArgument(err, "QUOTA-bZ0Fj", "Errors.Quota.Invalid.CallURL")
|
||||
}
|
||||
|
||||
if !u.IsAbs() || u.Host == "" {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-HAYmN", "Errors.Quota.Invalid.CallURL")
|
||||
}
|
||||
|
||||
if notification.Percent < 1 {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-pBfjq", "Errors.Quota.Invalid.Percent")
|
||||
}
|
||||
}
|
||||
|
||||
if q.Unit.Enum() == quota.Unimplemented {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-OTeSh", "Errors.Quota.Invalid.Unimplemented")
|
||||
}
|
||||
|
||||
if q.Amount < 1 {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-hOKSJ", "Errors.Quota.Invalid.Amount")
|
||||
}
|
||||
|
||||
if q.ResetInterval < time.Minute {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-R5otd", "Errors.Quota.Invalid.ResetInterval")
|
||||
}
|
||||
|
||||
if !q.Limit && len(q.Notifications) == 0 {
|
||||
return errors.ThrowInvalidArgument(nil, "QUOTA-4Nv68", "Errors.Quota.Invalid.Noop")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Commands) AddQuotaCommand(a *quota.Aggregate, q *AddQuota) preparation.Validation {
|
||||
return func() (preparation.CreateCommands, error) {
|
||||
|
||||
if err := q.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func(ctx context.Context, filter preparation.FilterToQueryReducer) (cmd []eventstore.Command, err error) {
|
||||
|
||||
notifications, err := q.Notifications.toAddedEventNotifications(c.idGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []eventstore.Command{quota.NewAddedEvent(
|
||||
ctx,
|
||||
&a.Aggregate,
|
||||
q.Unit.Enum(),
|
||||
q.From,
|
||||
q.ResetInterval,
|
||||
q.Amount,
|
||||
q.Limit,
|
||||
notifications,
|
||||
)}, err
|
||||
},
|
||||
nil
|
||||
}
|
||||
}
|
54
internal/command/quota_model.go
Normal file
54
internal/command/quota_model.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
type quotaWriteModel struct {
|
||||
eventstore.WriteModel
|
||||
unit quota.Unit
|
||||
active bool
|
||||
config *quota.AddedEvent
|
||||
}
|
||||
|
||||
// newQuotaWriteModel aggregateId is filled by reducing unit matching events
|
||||
func newQuotaWriteModel(instanceId, resourceOwner string, unit quota.Unit) *quotaWriteModel {
|
||||
return "aWriteModel{
|
||||
WriteModel: eventstore.WriteModel{
|
||||
InstanceID: instanceId,
|
||||
ResourceOwner: resourceOwner,
|
||||
},
|
||||
unit: unit,
|
||||
}
|
||||
}
|
||||
|
||||
func (wm *quotaWriteModel) Query() *eventstore.SearchQueryBuilder {
|
||||
query := eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||
ResourceOwner(wm.ResourceOwner).
|
||||
AddQuery().
|
||||
InstanceID(wm.InstanceID).
|
||||
AggregateTypes(quota.AggregateType).
|
||||
EventTypes(
|
||||
quota.AddedEventType,
|
||||
quota.RemovedEventType,
|
||||
).EventData(map[string]interface{}{"unit": wm.unit})
|
||||
|
||||
return query.Builder()
|
||||
}
|
||||
|
||||
func (wm *quotaWriteModel) Reduce() error {
|
||||
for _, event := range wm.Events {
|
||||
switch e := event.(type) {
|
||||
case *quota.AddedEvent:
|
||||
wm.AggregateID = e.Aggregate().ID
|
||||
wm.active = true
|
||||
wm.config = e
|
||||
case *quota.RemovedEvent:
|
||||
wm.AggregateID = e.Aggregate().ID
|
||||
wm.active = false
|
||||
wm.config = nil
|
||||
}
|
||||
}
|
||||
return wm.WriteModel.Reduce()
|
||||
}
|
59
internal/command/quota_notifications.go
Normal file
59
internal/command/quota_notifications.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
func (c *Commands) GetDueQuotaNotifications(ctx context.Context, config *quota.AddedEvent, periodStart time.Time, usedAbs uint64) ([]*quota.NotifiedEvent, error) {
|
||||
if len(config.Notifications) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aggregate := config.Aggregate()
|
||||
wm, err := c.getQuotaNotificationsWriteModel(ctx, aggregate, periodStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usedRel := uint16(math.Floor(float64(usedAbs*100) / float64(config.Amount)))
|
||||
|
||||
var dueNotifications []*quota.NotifiedEvent
|
||||
for _, notification := range config.Notifications {
|
||||
if notification.Percent > usedRel {
|
||||
continue
|
||||
}
|
||||
|
||||
threshold := notification.Percent
|
||||
if notification.Repeat {
|
||||
threshold = uint16(math.Min(1, math.Floor(float64(usedRel)/float64(notification.Percent)))) * notification.Percent
|
||||
}
|
||||
|
||||
if wm.latestNotifiedThresholds[notification.ID] < threshold {
|
||||
dueNotifications = append(
|
||||
dueNotifications,
|
||||
quota.NewNotifiedEvent(
|
||||
ctx,
|
||||
&aggregate,
|
||||
config.Unit,
|
||||
notification.ID,
|
||||
notification.CallURL,
|
||||
periodStart,
|
||||
threshold,
|
||||
usedAbs,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return dueNotifications, nil
|
||||
}
|
||||
|
||||
func (c *Commands) getQuotaNotificationsWriteModel(ctx context.Context, aggregate eventstore.Aggregate, periodStart time.Time) (*quotaNotificationsWriteModel, error) {
|
||||
wm := newQuotaNotificationsWriteModel(aggregate.ID, aggregate.InstanceID, aggregate.ResourceOwner, periodStart)
|
||||
return wm, c.eventstore.FilterToQueryReducer(ctx, wm)
|
||||
}
|
45
internal/command/quota_notifications_model.go
Normal file
45
internal/command/quota_notifications_model.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
type quotaNotificationsWriteModel struct {
|
||||
eventstore.WriteModel
|
||||
periodStart time.Time
|
||||
latestNotifiedThresholds map[string]uint16
|
||||
}
|
||||
|
||||
func newQuotaNotificationsWriteModel(aggregateId, instanceId, resourceOwner string, periodStart time.Time) *quotaNotificationsWriteModel {
|
||||
return "aNotificationsWriteModel{
|
||||
WriteModel: eventstore.WriteModel{
|
||||
AggregateID: aggregateId,
|
||||
InstanceID: instanceId,
|
||||
ResourceOwner: resourceOwner,
|
||||
},
|
||||
periodStart: periodStart,
|
||||
latestNotifiedThresholds: make(map[string]uint16),
|
||||
}
|
||||
}
|
||||
|
||||
func (wm *quotaNotificationsWriteModel) Query() *eventstore.SearchQueryBuilder {
|
||||
return eventstore.NewSearchQueryBuilder(eventstore.ColumnsEvent).
|
||||
ResourceOwner(wm.ResourceOwner).
|
||||
AddQuery().
|
||||
InstanceID(wm.InstanceID).
|
||||
AggregateTypes(quota.AggregateType).
|
||||
AggregateIDs(wm.AggregateID).
|
||||
CreationDateAfter(wm.periodStart).
|
||||
EventTypes(quota.NotifiedEventType).Builder()
|
||||
}
|
||||
|
||||
func (wm *quotaNotificationsWriteModel) Reduce() error {
|
||||
for _, event := range wm.Events {
|
||||
e := event.(*quota.NotifiedEvent)
|
||||
wm.latestNotifiedThresholds[e.ID] = e.Threshold
|
||||
}
|
||||
return wm.WriteModel.Reduce()
|
||||
}
|
25
internal/command/quota_period.go
Normal file
25
internal/command/quota_period.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
func (c *Commands) GetCurrentQuotaPeriod(ctx context.Context, instanceID string, unit quota.Unit) (*quota.AddedEvent, time.Time, error) {
|
||||
wm, err := c.getQuotaWriteModel(ctx, instanceID, instanceID, unit)
|
||||
if err != nil || !wm.active {
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
|
||||
return wm.config, pushPeriodStart(wm.config.From, wm.config.ResetInterval, time.Now()), nil
|
||||
}
|
||||
|
||||
func pushPeriodStart(from time.Time, interval time.Duration, now time.Time) time.Time {
|
||||
next := from.Add(interval)
|
||||
if next.After(now) {
|
||||
return from
|
||||
}
|
||||
return pushPeriodStart(next, interval, now)
|
||||
}
|
58
internal/command/quota_report.go
Normal file
58
internal/command/quota_report.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
// ReportUsage calls notification hooks and emits the notified events
|
||||
func (c *Commands) ReportUsage(ctx context.Context, dueNotifications []*quota.NotifiedEvent) error {
|
||||
for _, notification := range dueNotifications {
|
||||
|
||||
if err := notify(ctx, notification); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := c.eventstore.Push(ctx, notification); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func notify(ctx context.Context, notification *quota.NotifiedEvent) error {
|
||||
payload, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, notification.CallURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return fmt.Errorf("calling url %s returned %s", notification.CallURL, resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
48
internal/errors/resource_exhausted.go
Normal file
48
internal/errors/resource_exhausted.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
_ ResourceExhausted = (*ResourceExhaustedError)(nil)
|
||||
_ Error = (*ResourceExhaustedError)(nil)
|
||||
)
|
||||
|
||||
type ResourceExhausted interface {
|
||||
error
|
||||
IsResourceExhausted()
|
||||
}
|
||||
|
||||
type ResourceExhaustedError struct {
|
||||
*CaosError
|
||||
}
|
||||
|
||||
func ThrowResourceExhausted(parent error, id, message string) error {
|
||||
return &ResourceExhaustedError{CreateCaosError(parent, id, message)}
|
||||
}
|
||||
|
||||
func ThrowResourceExhaustedf(parent error, id, format string, a ...interface{}) error {
|
||||
return ThrowResourceExhausted(parent, id, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func (err *ResourceExhaustedError) IsResourceExhausted() {}
|
||||
|
||||
func IsResourceExhausted(err error) bool {
|
||||
//nolint:errorlint
|
||||
_, ok := err.(ResourceExhausted)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err *ResourceExhaustedError) Is(target error) bool {
|
||||
//nolint:errorlint
|
||||
t, ok := target.(*ResourceExhaustedError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return err.CaosError.Is(t.CaosError)
|
||||
}
|
||||
|
||||
func (err *ResourceExhaustedError) Unwrap() error {
|
||||
return err.CaosError
|
||||
}
|
34
internal/errors/resource_exhausted_test.go
Normal file
34
internal/errors/resource_exhausted_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package errors_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
caos_errs "github.com/zitadel/zitadel/internal/errors"
|
||||
)
|
||||
|
||||
func TestResourceExhaustedError(t *testing.T) {
|
||||
var err interface{} = new(caos_errs.ResourceExhaustedError)
|
||||
_, ok := err.(caos_errs.ResourceExhausted)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestThrowResourceExhaustedf(t *testing.T) {
|
||||
err := caos_errs.ThrowResourceExhaustedf(nil, "id", "msg")
|
||||
// TODO: refactor errors package
|
||||
//nolint:errorlint
|
||||
_, ok := err.(*caos_errs.ResourceExhaustedError)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestIsResourceExhausted(t *testing.T) {
|
||||
err := caos_errs.ThrowResourceExhausted(nil, "id", "msg")
|
||||
ok := caos_errs.IsResourceExhausted(err)
|
||||
assert.True(t, ok)
|
||||
|
||||
err = errors.New("I am found!")
|
||||
ok = caos_errs.IsResourceExhausted(err)
|
||||
assert.False(t, ok)
|
||||
}
|
@@ -119,6 +119,7 @@ const (
|
||||
ColumnTypeJSONB
|
||||
ColumnTypeBytes
|
||||
ColumnTypeTimestamp
|
||||
ColumnTypeInterval
|
||||
ColumnTypeEnum
|
||||
ColumnTypeEnumArray
|
||||
ColumnTypeInt64
|
||||
@@ -389,6 +390,8 @@ func columnType(columnType ColumnType) string {
|
||||
return "TEXT[]"
|
||||
case ColumnTypeTimestamp:
|
||||
return "TIMESTAMPTZ"
|
||||
case ColumnTypeInterval:
|
||||
return "INTERVAL"
|
||||
case ColumnTypeEnum:
|
||||
return "SMALLINT"
|
||||
case ColumnTypeEnumArray:
|
||||
|
11
internal/logstore/config.go
Normal file
11
internal/logstore/config.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package logstore
|
||||
|
||||
type Configs struct {
|
||||
Access *Config
|
||||
Execution *Config
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Database *EmitterConfig
|
||||
Stdout *EmitterConfig
|
||||
}
|
92
internal/logstore/debouncer.go
Normal file
92
internal/logstore/debouncer.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package logstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
type bulkSink interface {
|
||||
sendBulk(ctx context.Context, bulk []LogRecord) error
|
||||
}
|
||||
|
||||
var _ bulkSink = bulkSinkFunc(nil)
|
||||
|
||||
type bulkSinkFunc func(ctx context.Context, items []LogRecord) error
|
||||
|
||||
func (s bulkSinkFunc) sendBulk(ctx context.Context, items []LogRecord) error {
|
||||
return s(ctx, items)
|
||||
}
|
||||
|
||||
type debouncer struct {
|
||||
// Storing context.Context in a struct is generally bad practice
|
||||
// https://go.dev/blog/context-and-structs
|
||||
// However, debouncer starts a go routine that triggers side effects itself.
|
||||
// So, there is no incoming context.Context available when these events trigger.
|
||||
// The only context we can use for the side effects is the app context.
|
||||
// Because this can be cancelled by os signals, it's the better solution than creating new background contexts.
|
||||
binarySignaledCtx context.Context
|
||||
clock clock.Clock
|
||||
ticker *clock.Ticker
|
||||
mux sync.Mutex
|
||||
cfg DebouncerConfig
|
||||
storage bulkSink
|
||||
cache []LogRecord
|
||||
cacheLen uint
|
||||
}
|
||||
|
||||
type DebouncerConfig struct {
|
||||
MinFrequency time.Duration
|
||||
MaxBulkSize uint
|
||||
}
|
||||
|
||||
func newDebouncer(binarySignaledCtx context.Context, cfg DebouncerConfig, clock clock.Clock, ship bulkSink) *debouncer {
|
||||
a := &debouncer{
|
||||
binarySignaledCtx: binarySignaledCtx,
|
||||
clock: clock,
|
||||
cfg: cfg,
|
||||
storage: ship,
|
||||
}
|
||||
|
||||
if cfg.MinFrequency > 0 {
|
||||
a.ticker = clock.Ticker(cfg.MinFrequency)
|
||||
go a.shipOnTicks()
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (d *debouncer) add(item LogRecord) {
|
||||
d.mux.Lock()
|
||||
defer d.mux.Unlock()
|
||||
d.cache = append(d.cache, item)
|
||||
d.cacheLen++
|
||||
if d.cfg.MaxBulkSize > 0 && d.cacheLen >= d.cfg.MaxBulkSize {
|
||||
// Add should not block and release the lock
|
||||
go d.ship()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *debouncer) ship() {
|
||||
if d.cacheLen == 0 {
|
||||
return
|
||||
}
|
||||
d.mux.Lock()
|
||||
defer d.mux.Unlock()
|
||||
if err := d.storage.sendBulk(d.binarySignaledCtx, d.cache); err != nil {
|
||||
logging.WithError(err).WithField("size", len(d.cache)).Error("storing bulk failed")
|
||||
}
|
||||
d.cache = nil
|
||||
d.cacheLen = 0
|
||||
if d.cfg.MinFrequency > 0 {
|
||||
d.ticker.Reset(d.cfg.MinFrequency)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *debouncer) shipOnTicks() {
|
||||
for range d.ticker.C {
|
||||
d.ship()
|
||||
}
|
||||
}
|
112
internal/logstore/emitter.go
Normal file
112
internal/logstore/emitter.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package logstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
type EmitterConfig struct {
|
||||
Enabled bool
|
||||
Keep time.Duration
|
||||
CleanupInterval time.Duration
|
||||
Debounce *DebouncerConfig
|
||||
}
|
||||
|
||||
type emitter struct {
|
||||
enabled bool
|
||||
ctx context.Context
|
||||
debouncer *debouncer
|
||||
emitter LogEmitter
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
type LogRecord interface {
|
||||
Normalize() LogRecord
|
||||
}
|
||||
|
||||
type LogRecordFunc func() LogRecord
|
||||
|
||||
func (r LogRecordFunc) Normalize() LogRecord {
|
||||
return r()
|
||||
}
|
||||
|
||||
type LogEmitter interface {
|
||||
Emit(ctx context.Context, bulk []LogRecord) error
|
||||
}
|
||||
|
||||
type LogEmitterFunc func(ctx context.Context, bulk []LogRecord) error
|
||||
|
||||
func (l LogEmitterFunc) Emit(ctx context.Context, bulk []LogRecord) error {
|
||||
return l(ctx, bulk)
|
||||
}
|
||||
|
||||
type LogCleanupper interface {
|
||||
LogEmitter
|
||||
Cleanup(ctx context.Context, keep time.Duration) error
|
||||
}
|
||||
|
||||
// NewEmitter accepts Clock from github.com/benbjohnson/clock so we can control timers and tickers in the unit tests
|
||||
func NewEmitter(ctx context.Context, clock clock.Clock, cfg *EmitterConfig, logger LogEmitter) (*emitter, error) {
|
||||
svc := &emitter{
|
||||
enabled: cfg != nil && cfg.Enabled,
|
||||
ctx: ctx,
|
||||
emitter: logger,
|
||||
clock: clock,
|
||||
}
|
||||
|
||||
if !svc.enabled {
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
if cfg.Debounce != nil && (cfg.Debounce.MinFrequency > 0 || cfg.Debounce.MaxBulkSize > 0) {
|
||||
svc.debouncer = newDebouncer(ctx, *cfg.Debounce, clock, newStorageBulkSink(svc.emitter))
|
||||
}
|
||||
|
||||
cleanupper, ok := logger.(LogCleanupper)
|
||||
if !ok {
|
||||
if cfg.Keep != 0 {
|
||||
return nil, fmt.Errorf("cleaning up for this storage type is not supported, so keep duration must be 0, but is %d", cfg.Keep)
|
||||
}
|
||||
if cfg.CleanupInterval != 0 {
|
||||
return nil, fmt.Errorf("cleaning up for this storage type is not supported, so cleanup interval duration must be 0, but is %d", cfg.Keep)
|
||||
}
|
||||
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
if cfg.Keep != 0 && cfg.CleanupInterval != 0 {
|
||||
go svc.startCleanupping(cleanupper, cfg.CleanupInterval, cfg.Keep)
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (s *emitter) startCleanupping(cleanupper LogCleanupper, cleanupInterval, keep time.Duration) {
|
||||
for range s.clock.Tick(cleanupInterval) {
|
||||
if err := cleanupper.Cleanup(s.ctx, keep); err != nil {
|
||||
logging.WithError(err).Error("cleaning up logs failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *emitter) Emit(ctx context.Context, record LogRecord) (err error) {
|
||||
if !s.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.debouncer != nil {
|
||||
s.debouncer.add(record)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.emitter.Emit(ctx, []LogRecord{record})
|
||||
}
|
||||
|
||||
func newStorageBulkSink(emitter LogEmitter) bulkSinkFunc {
|
||||
return func(ctx context.Context, bulk []LogRecord) error {
|
||||
return emitter.Emit(ctx, bulk)
|
||||
}
|
||||
}
|
159
internal/logstore/emitters/access/database.go
Normal file
159
internal/logstore/emitters/access/database.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package access
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/squirrel"
|
||||
"github.com/zitadel/logging"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
zitadel_http "github.com/zitadel/zitadel/internal/api/http"
|
||||
caos_errors "github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
const (
|
||||
accessLogsTable = "logstore.access"
|
||||
accessTimestampCol = "log_date"
|
||||
accessProtocolCol = "protocol"
|
||||
accessRequestURLCol = "request_url"
|
||||
accessResponseStatusCol = "response_status"
|
||||
accessRequestHeadersCol = "request_headers"
|
||||
accessResponseHeadersCol = "response_headers"
|
||||
accessInstanceIdCol = "instance_id"
|
||||
accessProjectIdCol = "project_id"
|
||||
accessRequestedDomainCol = "requested_domain"
|
||||
accessRequestedHostCol = "requested_host"
|
||||
)
|
||||
|
||||
var _ logstore.UsageQuerier = (*databaseLogStorage)(nil)
|
||||
var _ logstore.LogCleanupper = (*databaseLogStorage)(nil)
|
||||
|
||||
type databaseLogStorage struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func NewDatabaseLogStorage(dbClient *sql.DB) *databaseLogStorage {
|
||||
return &databaseLogStorage{dbClient: dbClient}
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) QuotaUnit() quota.Unit {
|
||||
return quota.RequestsAllAuthenticated
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) Emit(ctx context.Context, bulk []logstore.LogRecord) error {
|
||||
builder := squirrel.Insert(accessLogsTable).
|
||||
Columns(
|
||||
accessTimestampCol,
|
||||
accessProtocolCol,
|
||||
accessRequestURLCol,
|
||||
accessResponseStatusCol,
|
||||
accessRequestHeadersCol,
|
||||
accessResponseHeadersCol,
|
||||
accessInstanceIdCol,
|
||||
accessProjectIdCol,
|
||||
accessRequestedDomainCol,
|
||||
accessRequestedHostCol,
|
||||
).
|
||||
PlaceholderFormat(squirrel.Dollar)
|
||||
|
||||
for idx := range bulk {
|
||||
item := bulk[idx].(*Record)
|
||||
builder = builder.Values(
|
||||
item.LogDate,
|
||||
item.Protocol,
|
||||
item.RequestURL,
|
||||
item.ResponseStatus,
|
||||
item.RequestHeaders,
|
||||
item.ResponseHeaders,
|
||||
item.InstanceID,
|
||||
item.ProjectID,
|
||||
item.RequestedDomain,
|
||||
item.RequestedHost,
|
||||
)
|
||||
}
|
||||
|
||||
stmt, args, err := builder.ToSql()
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "ACCESS-KOS7I", "Errors.Internal")
|
||||
}
|
||||
|
||||
result, err := l.dbClient.ExecContext(ctx, stmt, args...)
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "ACCESS-alnT9", "Errors.Access.StorageFailed")
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "ACCESS-7KIpL", "Errors.Internal")
|
||||
}
|
||||
|
||||
logging.WithFields("rows", rows).Debug("successfully stored access logs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: AS OF SYSTEM TIME
|
||||
func (l *databaseLogStorage) QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error) {
|
||||
stmt, args, err := squirrel.Select(
|
||||
fmt.Sprintf("count(%s)", accessInstanceIdCol),
|
||||
).
|
||||
From(accessLogsTable).
|
||||
Where(squirrel.And{
|
||||
squirrel.Eq{accessInstanceIdCol: instanceId},
|
||||
squirrel.GtOrEq{accessTimestampCol: start},
|
||||
squirrel.Expr(fmt.Sprintf(`%s #>> '{%s,0}' = '[REDACTED]'`, accessRequestHeadersCol, strings.ToLower(zitadel_http.Authorization))),
|
||||
squirrel.NotLike{accessRequestURLCol: "%/zitadel.system.v1.SystemService/%"},
|
||||
squirrel.NotLike{accessRequestURLCol: "%/system/v1/%"},
|
||||
squirrel.Or{
|
||||
squirrel.And{
|
||||
squirrel.Eq{accessProtocolCol: HTTP},
|
||||
squirrel.NotEq{accessResponseStatusCol: http.StatusForbidden},
|
||||
squirrel.NotEq{accessResponseStatusCol: http.StatusInternalServerError},
|
||||
squirrel.NotEq{accessResponseStatusCol: http.StatusTooManyRequests},
|
||||
},
|
||||
squirrel.And{
|
||||
squirrel.Eq{accessProtocolCol: GRPC},
|
||||
squirrel.NotEq{accessResponseStatusCol: codes.PermissionDenied},
|
||||
squirrel.NotEq{accessResponseStatusCol: codes.Internal},
|
||||
squirrel.NotEq{accessResponseStatusCol: codes.ResourceExhausted},
|
||||
},
|
||||
},
|
||||
}).
|
||||
PlaceholderFormat(squirrel.Dollar).
|
||||
ToSql()
|
||||
|
||||
if err != nil {
|
||||
return 0, caos_errors.ThrowInternal(err, "ACCESS-V9Sde", "Errors.Internal")
|
||||
}
|
||||
|
||||
var count uint64
|
||||
if err = l.dbClient.
|
||||
QueryRowContext(ctx, stmt, args...).
|
||||
Scan(&count); err != nil {
|
||||
return 0, caos_errors.ThrowInternal(err, "ACCESS-pBPrM", "Errors.Logstore.Access.ScanFailed")
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) Cleanup(ctx context.Context, keep time.Duration) error {
|
||||
stmt, args, err := squirrel.Delete(accessLogsTable).
|
||||
Where(squirrel.LtOrEq{accessTimestampCol: time.Now().Add(-keep)}).
|
||||
PlaceholderFormat(squirrel.Dollar).
|
||||
ToSql()
|
||||
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "ACCESS-2oTh6", "Errors.Internal")
|
||||
}
|
||||
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
_, err = l.dbClient.ExecContext(execCtx, stmt, args...)
|
||||
return err
|
||||
}
|
91
internal/logstore/emitters/access/record.go
Normal file
91
internal/logstore/emitters/access/record.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package access
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
zitadel_http "github.com/zitadel/zitadel/internal/api/http"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
var _ logstore.LogRecord = (*Record)(nil)
|
||||
|
||||
type Record struct {
|
||||
LogDate time.Time `json:"logDate"`
|
||||
Protocol Protocol `json:"protocol"`
|
||||
RequestURL string `json:"requestUrl"`
|
||||
ResponseStatus uint32 `json:"responseStatus"`
|
||||
// RequestHeaders are plain maps so varying implementation
|
||||
// between HTTP and gRPC don't interfere with each other
|
||||
RequestHeaders map[string][]string `json:"requestHeaders"`
|
||||
// ResponseHeaders are plain maps so varying implementation
|
||||
// between HTTP and gRPC don't interfere with each other
|
||||
ResponseHeaders map[string][]string `json:"responseHeaders"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
ProjectID string `json:"projectId"`
|
||||
RequestedDomain string `json:"requestedDomain"`
|
||||
RequestedHost string `json:"requestedHost"`
|
||||
}
|
||||
|
||||
type Protocol uint8
|
||||
|
||||
const (
|
||||
GRPC Protocol = iota
|
||||
HTTP
|
||||
|
||||
redacted = "[REDACTED]"
|
||||
)
|
||||
|
||||
func (a Record) Normalize() logstore.LogRecord {
|
||||
a.RequestedDomain = cutString(a.RequestedDomain, 200)
|
||||
a.RequestURL = cutString(a.RequestURL, 200)
|
||||
normalizeHeaders(a.RequestHeaders, strings.ToLower(zitadel_http.Authorization), "grpcgateway-authorization", "cookie", "grpcgateway-cookie")
|
||||
normalizeHeaders(a.ResponseHeaders, "set-cookie")
|
||||
return &a
|
||||
}
|
||||
|
||||
const maxValuesPerKey = 10
|
||||
|
||||
// normalizeHeaders lowers all header keys and redacts secrets
|
||||
func normalizeHeaders(header map[string][]string, redactKeysLower ...string) {
|
||||
lowerKeys(header)
|
||||
redactKeys(header, redactKeysLower...)
|
||||
pruneKeys(header)
|
||||
}
|
||||
|
||||
func lowerKeys(header map[string][]string) {
|
||||
for k, v := range header {
|
||||
delete(header, k)
|
||||
header[strings.ToLower(k)] = v
|
||||
}
|
||||
}
|
||||
|
||||
func redactKeys(header map[string][]string, redactKeysLower ...string) {
|
||||
for _, redactKey := range redactKeysLower {
|
||||
if _, ok := header[redactKey]; ok {
|
||||
header[redactKey] = []string{redacted}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneKeys(header map[string][]string) {
|
||||
for key, value := range header {
|
||||
valueItems := make([]string, 0, maxValuesPerKey)
|
||||
for i, valueItem := range value {
|
||||
// Max 10 header values per key
|
||||
if i > maxValuesPerKey {
|
||||
break
|
||||
}
|
||||
// Max 200 value length
|
||||
valueItems = append(valueItems, cutString(valueItem, 200))
|
||||
}
|
||||
header[key] = valueItems
|
||||
}
|
||||
}
|
||||
|
||||
func cutString(str string, pos int) string {
|
||||
if len(str) <= pos {
|
||||
return str
|
||||
}
|
||||
return str[:pos-1]
|
||||
}
|
135
internal/logstore/emitters/execution/database.go
Normal file
135
internal/logstore/emitters/execution/database.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/squirrel"
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
caos_errors "github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
const (
|
||||
executionLogsTable = "logstore.execution"
|
||||
executionTimestampCol = "log_date"
|
||||
executionTookCol = "took"
|
||||
executionMessageCol = "message"
|
||||
executionLogLevelCol = "loglevel"
|
||||
executionInstanceIdCol = "instance_id"
|
||||
executionActionIdCol = "action_id"
|
||||
executionMetadataCol = "metadata"
|
||||
)
|
||||
|
||||
var _ logstore.UsageQuerier = (*databaseLogStorage)(nil)
|
||||
var _ logstore.LogCleanupper = (*databaseLogStorage)(nil)
|
||||
|
||||
type databaseLogStorage struct {
|
||||
dbClient *sql.DB
|
||||
}
|
||||
|
||||
func NewDatabaseLogStorage(dbClient *sql.DB) *databaseLogStorage {
|
||||
return &databaseLogStorage{dbClient: dbClient}
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) QuotaUnit() quota.Unit {
|
||||
return quota.ActionsAllRunsSeconds
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) Emit(ctx context.Context, bulk []logstore.LogRecord) error {
|
||||
builder := squirrel.Insert(executionLogsTable).
|
||||
Columns(
|
||||
executionTimestampCol,
|
||||
executionTookCol,
|
||||
executionMessageCol,
|
||||
executionLogLevelCol,
|
||||
executionInstanceIdCol,
|
||||
executionActionIdCol,
|
||||
executionMetadataCol,
|
||||
).
|
||||
PlaceholderFormat(squirrel.Dollar)
|
||||
|
||||
for idx := range bulk {
|
||||
item := bulk[idx].(*Record)
|
||||
|
||||
var took interface{}
|
||||
if item.Took > 0 {
|
||||
took = item.Took
|
||||
}
|
||||
|
||||
builder = builder.Values(
|
||||
item.LogDate,
|
||||
took,
|
||||
item.Message,
|
||||
item.LogLevel,
|
||||
item.InstanceID,
|
||||
item.ActionID,
|
||||
item.Metadata,
|
||||
)
|
||||
}
|
||||
|
||||
stmt, args, err := builder.ToSql()
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "EXEC-KOS7I", "Errors.Internal")
|
||||
}
|
||||
|
||||
result, err := l.dbClient.ExecContext(ctx, stmt, args...)
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "EXEC-0j6i5", "Errors.Access.StorageFailed")
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "EXEC-MGchJ", "Errors.Internal")
|
||||
}
|
||||
|
||||
logging.WithFields("rows", rows).Debug("successfully stored execution logs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: AS OF SYSTEM TIME
|
||||
func (l *databaseLogStorage) QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error) {
|
||||
stmt, args, err := squirrel.Select(
|
||||
fmt.Sprintf("COALESCE(SUM(%s)::INT,0)", executionTookCol),
|
||||
).
|
||||
From(executionLogsTable).
|
||||
Where(squirrel.And{
|
||||
squirrel.Eq{executionInstanceIdCol: instanceId},
|
||||
squirrel.GtOrEq{executionTimestampCol: start},
|
||||
squirrel.NotEq{executionTookCol: nil},
|
||||
}).
|
||||
PlaceholderFormat(squirrel.Dollar).
|
||||
ToSql()
|
||||
|
||||
if err != nil {
|
||||
return 0, caos_errors.ThrowInternal(err, "EXEC-DXtzg", "Errors.Internal")
|
||||
}
|
||||
|
||||
var durationSeconds uint64
|
||||
if err = l.dbClient.
|
||||
QueryRowContext(ctx, stmt, args...).
|
||||
Scan(&durationSeconds); err != nil {
|
||||
return 0, caos_errors.ThrowInternal(err, "EXEC-Ad8nP", "Errors.Logstore.Execution.ScanFailed")
|
||||
}
|
||||
return durationSeconds, nil
|
||||
}
|
||||
|
||||
func (l *databaseLogStorage) Cleanup(ctx context.Context, keep time.Duration) error {
|
||||
stmt, args, err := squirrel.Delete(executionLogsTable).
|
||||
Where(squirrel.LtOrEq{executionTimestampCol: time.Now().Add(-keep)}).
|
||||
PlaceholderFormat(squirrel.Dollar).
|
||||
ToSql()
|
||||
|
||||
if err != nil {
|
||||
return caos_errors.ThrowInternal(err, "EXEC-Bja8V", "Errors.Internal")
|
||||
}
|
||||
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
_, err = l.dbClient.ExecContext(execCtx, stmt, args...)
|
||||
return err
|
||||
}
|
33
internal/logstore/emitters/execution/record.go
Normal file
33
internal/logstore/emitters/execution/record.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
var _ logstore.LogRecord = (*Record)(nil)
|
||||
|
||||
type Record struct {
|
||||
LogDate time.Time `json:"logDate"`
|
||||
Took time.Duration `json:"took"`
|
||||
Message string `json:"message"`
|
||||
LogLevel logrus.Level `json:"logLevel"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
ActionID string `json:"actionId,omitempty"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
func (e Record) Normalize() logstore.LogRecord {
|
||||
e.Message = cutString(e.Message, 2000)
|
||||
return &e
|
||||
}
|
||||
|
||||
func cutString(str string, pos int) string {
|
||||
if len(str) <= pos {
|
||||
return str
|
||||
}
|
||||
return str[:pos]
|
||||
}
|
89
internal/logstore/emitters/mock/inmem.go
Normal file
89
internal/logstore/emitters/mock/inmem.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
var _ logstore.UsageQuerier = (*InmemLogStorage)(nil)
|
||||
var _ logstore.LogCleanupper = (*InmemLogStorage)(nil)
|
||||
|
||||
type InmemLogStorage struct {
|
||||
mux sync.Mutex
|
||||
clock clock.Clock
|
||||
emitted []*record
|
||||
bulks []int
|
||||
}
|
||||
|
||||
func NewInMemoryStorage(clock clock.Clock) *InmemLogStorage {
|
||||
return &InmemLogStorage{
|
||||
clock: clock,
|
||||
emitted: make([]*record, 0),
|
||||
bulks: make([]int, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) QuotaUnit() quota.Unit {
|
||||
return quota.Unimplemented
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) Emit(_ context.Context, bulk []logstore.LogRecord) error {
|
||||
if len(bulk) == 0 {
|
||||
return nil
|
||||
}
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
for idx := range bulk {
|
||||
l.emitted = append(l.emitted, bulk[idx].(*record))
|
||||
}
|
||||
l.bulks = append(l.bulks, len(bulk))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) QueryUsage(_ context.Context, _ string, start time.Time) (uint64, error) {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
|
||||
var count uint64
|
||||
for _, r := range l.emitted {
|
||||
if r.ts.After(start) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) Cleanup(_ context.Context, keep time.Duration) error {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
|
||||
clean := make([]*record, 0)
|
||||
from := l.clock.Now().Add(-(keep + 1))
|
||||
for _, r := range l.emitted {
|
||||
if r.ts.After(from) {
|
||||
clean = append(clean, r)
|
||||
}
|
||||
}
|
||||
l.emitted = clean
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) Bulks() []int {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
|
||||
return l.bulks
|
||||
}
|
||||
|
||||
func (l *InmemLogStorage) Len() int {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
|
||||
return len(l.emitted)
|
||||
}
|
25
internal/logstore/emitters/mock/record.go
Normal file
25
internal/logstore/emitters/mock/record.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
var _ logstore.LogRecord = (*record)(nil)
|
||||
|
||||
func NewRecord(clock clock.Clock) *record {
|
||||
return &record{ts: clock.Now()}
|
||||
}
|
||||
|
||||
type record struct {
|
||||
ts time.Time
|
||||
redacted bool
|
||||
}
|
||||
|
||||
func (r record) Normalize() logstore.LogRecord {
|
||||
r.redacted = true
|
||||
return &r
|
||||
}
|
23
internal/logstore/emitters/stdout/stdout.go
Normal file
23
internal/logstore/emitters/stdout/stdout.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package stdout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
)
|
||||
|
||||
func NewStdoutEmitter() logstore.LogEmitter {
|
||||
return logstore.LogEmitterFunc(func(ctx context.Context, bulk []logstore.LogRecord) error {
|
||||
for idx := range bulk {
|
||||
bytes, err := json.Marshal(bulk[idx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logging.WithFields("record", string(bytes)).Info("log record emitted")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
83
internal/logstore/helpers_test.go
Normal file
83
internal/logstore/helpers_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package logstore_test
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
type emitterOption func(config *logstore.EmitterConfig)
|
||||
|
||||
func emitterConfig(options ...emitterOption) *logstore.EmitterConfig {
|
||||
cfg := &logstore.EmitterConfig{
|
||||
Enabled: true,
|
||||
Keep: time.Hour,
|
||||
CleanupInterval: time.Hour,
|
||||
Debounce: &logstore.DebouncerConfig{
|
||||
MinFrequency: 0,
|
||||
MaxBulkSize: 0,
|
||||
},
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func withDebouncerConfig(config *logstore.DebouncerConfig) emitterOption {
|
||||
return func(c *logstore.EmitterConfig) {
|
||||
c.Debounce = config
|
||||
}
|
||||
}
|
||||
|
||||
func withDisabled() emitterOption {
|
||||
return func(c *logstore.EmitterConfig) {
|
||||
c.Enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
func withCleanupping(keep, interval time.Duration) emitterOption {
|
||||
return func(c *logstore.EmitterConfig) {
|
||||
c.Keep = keep
|
||||
c.CleanupInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
type quotaOption func(config *quota.AddedEvent)
|
||||
|
||||
func quotaConfig(quotaOptions ...quotaOption) quota.AddedEvent {
|
||||
q := "a.AddedEvent{
|
||||
Amount: 90,
|
||||
Limit: false,
|
||||
ResetInterval: 90 * time.Second,
|
||||
From: time.Unix(0, 0),
|
||||
}
|
||||
for _, opt := range quotaOptions {
|
||||
opt(q)
|
||||
}
|
||||
return *q
|
||||
}
|
||||
|
||||
func withAmountAndInterval(n uint64) quotaOption {
|
||||
return func(c *quota.AddedEvent) {
|
||||
c.Amount = n
|
||||
c.ResetInterval = time.Duration(n) * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
func withLimiting() quotaOption {
|
||||
return func(c *quota.AddedEvent) {
|
||||
c.Limit = true
|
||||
}
|
||||
}
|
||||
|
||||
func repeat(value, times int) []int {
|
||||
ints := make([]int, times)
|
||||
for i := 0; i < times; i++ {
|
||||
ints[i] = value
|
||||
}
|
||||
return ints
|
||||
}
|
||||
|
||||
func uint64Ptr(n uint64) *uint64 { return &n }
|
28
internal/logstore/quotaqueriers/mock/noop.go
Normal file
28
internal/logstore/quotaqueriers/mock/noop.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
var _ logstore.QuotaQuerier = (*inmemReporter)(nil)
|
||||
|
||||
type inmemReporter struct {
|
||||
config *quota.AddedEvent
|
||||
startPeriod time.Time
|
||||
}
|
||||
|
||||
func NewNoopQuerier(quota *quota.AddedEvent, startPeriod time.Time) *inmemReporter {
|
||||
return &inmemReporter{config: quota, startPeriod: startPeriod}
|
||||
}
|
||||
|
||||
func (i *inmemReporter) GetCurrentQuotaPeriod(context.Context, string, quota.Unit) (*quota.AddedEvent, time.Time, error) {
|
||||
return i.config, i.startPeriod, nil
|
||||
}
|
||||
|
||||
func (*inmemReporter) GetDueQuotaNotifications(context.Context, *quota.AddedEvent, time.Time, uint64) ([]*quota.NotifiedEvent, error) {
|
||||
return nil, nil
|
||||
}
|
110
internal/logstore/service.go
Normal file
110
internal/logstore/service.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package logstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/logging"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
type QuotaQuerier interface {
|
||||
GetCurrentQuotaPeriod(ctx context.Context, instanceID string, unit quota.Unit) (config *quota.AddedEvent, periodStart time.Time, err error)
|
||||
GetDueQuotaNotifications(ctx context.Context, config *quota.AddedEvent, periodStart time.Time, used uint64) ([]*quota.NotifiedEvent, error)
|
||||
}
|
||||
|
||||
type UsageQuerier interface {
|
||||
LogEmitter
|
||||
QuotaUnit() quota.Unit
|
||||
QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error)
|
||||
}
|
||||
|
||||
type UsageReporter interface {
|
||||
Report(ctx context.Context, notifications []*quota.NotifiedEvent) (err error)
|
||||
}
|
||||
|
||||
type UsageReporterFunc func(context.Context, []*quota.NotifiedEvent) (err error)
|
||||
|
||||
func (u UsageReporterFunc) Report(ctx context.Context, notifications []*quota.NotifiedEvent) (err error) {
|
||||
return u(ctx, notifications)
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
usageQuerier UsageQuerier
|
||||
quotaQuerier QuotaQuerier
|
||||
usageReporter UsageReporter
|
||||
enabledSinks []*emitter
|
||||
sinkEnabled bool
|
||||
reportingEnabled bool
|
||||
}
|
||||
|
||||
func New(quotaQuerier QuotaQuerier, usageReporter UsageReporter, usageQuerierSink *emitter, additionalSink ...*emitter) *Service {
|
||||
var usageQuerier UsageQuerier
|
||||
if usageQuerierSink != nil {
|
||||
usageQuerier = usageQuerierSink.emitter.(UsageQuerier)
|
||||
}
|
||||
|
||||
svc := &Service{
|
||||
reportingEnabled: usageQuerierSink != nil && usageQuerierSink.enabled,
|
||||
usageQuerier: usageQuerier,
|
||||
quotaQuerier: quotaQuerier,
|
||||
usageReporter: usageReporter,
|
||||
}
|
||||
|
||||
for _, s := range append([]*emitter{usageQuerierSink}, additionalSink...) {
|
||||
if s != nil && s.enabled {
|
||||
svc.enabledSinks = append(svc.enabledSinks, s)
|
||||
}
|
||||
}
|
||||
|
||||
svc.sinkEnabled = len(svc.enabledSinks) > 0
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func (s *Service) Enabled() bool {
|
||||
return s.sinkEnabled
|
||||
}
|
||||
|
||||
func (s *Service) Handle(ctx context.Context, record LogRecord) {
|
||||
for _, sink := range s.enabledSinks {
|
||||
logging.OnError(sink.Emit(ctx, record.Normalize())).WithField("record", record).Warn("failed to emit log record")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Limit(ctx context.Context, instanceID string) *uint64 {
|
||||
var err error
|
||||
defer func() {
|
||||
logging.OnError(err).Warn("failed to check is usage should be limited")
|
||||
}()
|
||||
|
||||
if !s.reportingEnabled || instanceID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
quota, periodStart, err := s.quotaQuerier.GetCurrentQuotaPeriod(ctx, instanceID, s.usageQuerier.QuotaUnit())
|
||||
if err != nil || quota == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage, err := s.usageQuerier.QueryUsage(ctx, instanceID, periodStart)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var remaining *uint64
|
||||
if quota.Limit {
|
||||
r := uint64(math.Max(0, float64(quota.Amount)-float64(usage)))
|
||||
remaining = &r
|
||||
}
|
||||
|
||||
notifications, err := s.quotaQuerier.GetDueQuotaNotifications(ctx, quota, periodStart, usage)
|
||||
if err != nil {
|
||||
return remaining
|
||||
}
|
||||
|
||||
err = s.usageReporter.Report(ctx, notifications)
|
||||
return remaining
|
||||
}
|
312
internal/logstore/service_test.go
Normal file
312
internal/logstore/service_test.go
Normal file
@@ -0,0 +1,312 @@
|
||||
// The library github.com/benbjohnson/clock fails when race is enabled
|
||||
// https://github.com/benbjohnson/clock/issues/44
|
||||
//go:build !race
|
||||
|
||||
package logstore_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/logstore"
|
||||
emittermock "github.com/zitadel/zitadel/internal/logstore/emitters/mock"
|
||||
quotaqueriermock "github.com/zitadel/zitadel/internal/logstore/quotaqueriers/mock"
|
||||
"github.com/zitadel/zitadel/internal/repository/quota"
|
||||
)
|
||||
|
||||
const (
|
||||
tick = time.Second
|
||||
ticks = 60
|
||||
)
|
||||
|
||||
type args struct {
|
||||
mainSink *logstore.EmitterConfig
|
||||
secondarySink *logstore.EmitterConfig
|
||||
config quota.AddedEvent
|
||||
}
|
||||
|
||||
type want struct {
|
||||
enabled bool
|
||||
remaining *uint64
|
||||
mainSink wantSink
|
||||
secondarySink wantSink
|
||||
}
|
||||
|
||||
type wantSink struct {
|
||||
bulks []int
|
||||
len int
|
||||
}
|
||||
|
||||
func TestService(t *testing.T) {
|
||||
// tests should run on a single thread
|
||||
// important for deterministic results
|
||||
beforeProcs := runtime.GOMAXPROCS(1)
|
||||
defer runtime.GOMAXPROCS(beforeProcs)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{{
|
||||
name: "max and min debouncing works",
|
||||
args: args{
|
||||
mainSink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
|
||||
MinFrequency: 1 * time.Minute,
|
||||
MaxBulkSize: 60,
|
||||
})),
|
||||
secondarySink: emitterConfig(),
|
||||
config: quotaConfig(),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(60, 1),
|
||||
len: 60,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "mixed debouncing works",
|
||||
args: args{
|
||||
mainSink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
|
||||
MinFrequency: 0,
|
||||
MaxBulkSize: 6,
|
||||
})),
|
||||
secondarySink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
|
||||
MinFrequency: 10 * time.Second,
|
||||
MaxBulkSize: 0,
|
||||
})),
|
||||
config: quotaConfig(),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(6, 10),
|
||||
len: 60,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(10, 6),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "when disabling main sink, secondary sink still works",
|
||||
args: args{
|
||||
mainSink: emitterConfig(withDisabled()),
|
||||
secondarySink: emitterConfig(),
|
||||
config: quotaConfig(),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(99, 0),
|
||||
len: 0,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "when all sink are disabled, the service is disabled",
|
||||
args: args{
|
||||
mainSink: emitterConfig(withDisabled()),
|
||||
secondarySink: emitterConfig(withDisabled()),
|
||||
config: quotaConfig(),
|
||||
},
|
||||
want: want{
|
||||
enabled: false,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(99, 0),
|
||||
len: 0,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(99, 0),
|
||||
len: 0,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "cleanupping works",
|
||||
args: args{
|
||||
mainSink: emitterConfig(withCleanupping(17*time.Second, 28*time.Second)),
|
||||
secondarySink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
|
||||
MinFrequency: 0,
|
||||
MaxBulkSize: 15,
|
||||
}), withCleanupping(5*time.Second, 47*time.Second)),
|
||||
config: quotaConfig(),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 21,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(15, 4),
|
||||
len: 18,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "when quota has a limit of 90, 30 are remaining",
|
||||
args: args{
|
||||
mainSink: emitterConfig(),
|
||||
secondarySink: emitterConfig(),
|
||||
config: quotaConfig(withLimiting()),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: uint64Ptr(30),
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "when quota has a limit of 30, 0 are remaining",
|
||||
args: args{
|
||||
mainSink: emitterConfig(),
|
||||
secondarySink: emitterConfig(),
|
||||
config: quotaConfig(withLimiting(), withAmountAndInterval(30)),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: uint64Ptr(0),
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "when quota has amount of 30 but is not limited, remaining is nil",
|
||||
args: args{
|
||||
mainSink: emitterConfig(),
|
||||
secondarySink: emitterConfig(),
|
||||
config: quotaConfig(withAmountAndInterval(30)),
|
||||
},
|
||||
want: want{
|
||||
enabled: true,
|
||||
remaining: nil,
|
||||
mainSink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
secondarySink: wantSink{
|
||||
bulks: repeat(1, 60),
|
||||
len: 60,
|
||||
},
|
||||
},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
runTest(t, tt.name, tt.args, tt.want)
|
||||
}
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, name string, args args, want want) bool {
|
||||
return t.Run("Given over a minute, each second a log record is emitted", func(tt *testing.T) {
|
||||
tt.Run(name, func(t *testing.T) {
|
||||
ctx, clock, mainStorage, secondaryStorage, svc := given(t, args, want)
|
||||
remaining := when(svc, ctx, clock)
|
||||
then(t, mainStorage, secondaryStorage, remaining, want)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func given(t *testing.T, args args, want want) (context.Context, *clock.Mock, *emittermock.InmemLogStorage, *emittermock.InmemLogStorage, *logstore.Service) {
|
||||
ctx := context.Background()
|
||||
clock := clock.NewMock()
|
||||
|
||||
periodStart := time.Time{}
|
||||
clock.Set(args.config.From)
|
||||
|
||||
mainStorage := emittermock.NewInMemoryStorage(clock)
|
||||
mainEmitter, err := logstore.NewEmitter(ctx, clock, args.mainSink, mainStorage)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error but got %v", err)
|
||||
}
|
||||
secondaryStorage := emittermock.NewInMemoryStorage(clock)
|
||||
secondaryEmitter, err := logstore.NewEmitter(ctx, clock, args.secondarySink, secondaryStorage)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error but got %v", err)
|
||||
}
|
||||
|
||||
svc := logstore.New(
|
||||
quotaqueriermock.NewNoopQuerier(&args.config, periodStart),
|
||||
logstore.UsageReporterFunc(func(context.Context, []*quota.NotifiedEvent) error { return nil }),
|
||||
mainEmitter,
|
||||
secondaryEmitter)
|
||||
|
||||
if svc.Enabled() != want.enabled {
|
||||
t.Errorf("wantet service enabled to be %t but is %t", want.enabled, svc.Enabled())
|
||||
}
|
||||
return ctx, clock, mainStorage, secondaryStorage, svc
|
||||
}
|
||||
|
||||
func when(svc *logstore.Service, ctx context.Context, clock *clock.Mock) *uint64 {
|
||||
var remaining *uint64
|
||||
for i := 0; i < ticks; i++ {
|
||||
svc.Handle(ctx, emittermock.NewRecord(clock))
|
||||
runtime.Gosched()
|
||||
remaining = svc.Limit(ctx, "non-empty-instance-id")
|
||||
clock.Add(tick)
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
runtime.Gosched()
|
||||
return remaining
|
||||
}
|
||||
|
||||
func then(t *testing.T, mainStorage, secondaryStorage *emittermock.InmemLogStorage, remaining *uint64, want want) {
|
||||
mainBulks := mainStorage.Bulks()
|
||||
if !reflect.DeepEqual(want.mainSink.bulks, mainBulks) {
|
||||
t.Errorf("wanted main storage to have bulks %v, but got %v", want.mainSink.bulks, mainBulks)
|
||||
}
|
||||
|
||||
mainLen := mainStorage.Len()
|
||||
if !reflect.DeepEqual(want.mainSink.len, mainLen) {
|
||||
t.Errorf("wanted main storage to have len %d, but got %d", want.mainSink.len, mainLen)
|
||||
}
|
||||
|
||||
secondaryBulks := secondaryStorage.Bulks()
|
||||
if !reflect.DeepEqual(want.secondarySink.bulks, secondaryBulks) {
|
||||
t.Errorf("wanted secondary storage to have bulks %v, but got %v", want.secondarySink.bulks, secondaryBulks)
|
||||
}
|
||||
|
||||
secondaryLen := secondaryStorage.Len()
|
||||
if !reflect.DeepEqual(want.secondarySink.len, secondaryLen) {
|
||||
t.Errorf("wanted secondary storage to have len %d, but got %d", want.secondarySink.len, secondaryLen)
|
||||
}
|
||||
|
||||
if remaining == nil && want.remaining == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if remaining == nil && want.remaining != nil ||
|
||||
remaining != nil && want.remaining == nil {
|
||||
t.Errorf("wantet remaining nil %t but got %t", want.remaining == nil, remaining == nil)
|
||||
return
|
||||
}
|
||||
if *remaining != *want.remaining {
|
||||
t.Errorf("wantet remaining %d but got %d", *want.remaining, *remaining)
|
||||
return
|
||||
}
|
||||
}
|
@@ -219,6 +219,10 @@ func (q *Queries) InstanceByHost(ctx context.Context, host string) (_ authz.Inst
|
||||
return scan(row)
|
||||
}
|
||||
|
||||
func (q *Queries) InstanceByID(ctx context.Context) (_ authz.Instance, err error) {
|
||||
return q.Instance(ctx, true)
|
||||
}
|
||||
|
||||
func (q *Queries) GetDefaultLanguage(ctx context.Context) language.Tag {
|
||||
instance, err := q.Instance(ctx, false)
|
||||
if err != nil {
|
||||
|
@@ -156,7 +156,6 @@ func Start() {
|
||||
|
||||
func ApplyCustomConfig(customConfig CustomConfig) crdb.StatementHandlerConfig {
|
||||
return applyCustomConfig(projectionConfig, customConfig)
|
||||
|
||||
}
|
||||
|
||||
func applyCustomConfig(config crdb.StatementHandlerConfig, customConfig CustomConfig) crdb.StatementHandlerConfig {
|
||||
|
26
internal/repository/quota/aggregate.go
Normal file
26
internal/repository/quota/aggregate.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package quota
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
const (
|
||||
AggregateType = "quota"
|
||||
AggregateVersion = "v1"
|
||||
)
|
||||
|
||||
type Aggregate struct {
|
||||
eventstore.Aggregate
|
||||
}
|
||||
|
||||
func NewAggregate(id, instanceId, resourceOwner string) *Aggregate {
|
||||
return &Aggregate{
|
||||
Aggregate: eventstore.Aggregate{
|
||||
Type: AggregateType,
|
||||
Version: AggregateVersion,
|
||||
ID: id,
|
||||
InstanceID: instanceId,
|
||||
ResourceOwner: resourceOwner,
|
||||
},
|
||||
}
|
||||
}
|
205
internal/repository/quota/events.go
Normal file
205
internal/repository/quota/events.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/zitadel/zitadel/internal/errors"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
"github.com/zitadel/zitadel/internal/eventstore/repository"
|
||||
)
|
||||
|
||||
type Unit uint
|
||||
|
||||
const (
|
||||
UniqueQuotaNameType = "quota_units"
|
||||
UniqueQuotaNotificationIDType = "quota_notification"
|
||||
eventTypePrefix = eventstore.EventType("quota.")
|
||||
AddedEventType = eventTypePrefix + "added"
|
||||
NotifiedEventType = eventTypePrefix + "notified"
|
||||
RemovedEventType = eventTypePrefix + "removed"
|
||||
)
|
||||
|
||||
const (
|
||||
Unimplemented Unit = iota
|
||||
RequestsAllAuthenticated
|
||||
ActionsAllRunsSeconds
|
||||
)
|
||||
|
||||
func NewAddQuotaUnitUniqueConstraint(unit Unit) *eventstore.EventUniqueConstraint {
|
||||
return eventstore.NewAddEventUniqueConstraint(
|
||||
UniqueQuotaNameType,
|
||||
strconv.FormatUint(uint64(unit), 10),
|
||||
"Errors.Quota.AlreadyExists",
|
||||
)
|
||||
}
|
||||
|
||||
func NewRemoveQuotaNameUniqueConstraint(unit Unit) *eventstore.EventUniqueConstraint {
|
||||
return eventstore.NewRemoveEventUniqueConstraint(
|
||||
UniqueQuotaNameType,
|
||||
strconv.FormatUint(uint64(unit), 10),
|
||||
)
|
||||
}
|
||||
|
||||
type AddedEvent struct {
|
||||
eventstore.BaseEvent `json:"-"`
|
||||
|
||||
Unit Unit `json:"unit"`
|
||||
From time.Time `json:"from"`
|
||||
ResetInterval time.Duration `json:"interval,omitempty"`
|
||||
Amount uint64 `json:"amount"`
|
||||
Limit bool `json:"limit"`
|
||||
Notifications []*AddedEventNotification `json:"notifications,omitempty"`
|
||||
}
|
||||
|
||||
type AddedEventNotification struct {
|
||||
ID string `json:"id"`
|
||||
Percent uint16 `json:"percent"`
|
||||
Repeat bool `json:"repeat,omitempty"`
|
||||
CallURL string `json:"callUrl"`
|
||||
}
|
||||
|
||||
func (e *AddedEvent) Data() interface{} {
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *AddedEvent) UniqueConstraints() []*eventstore.EventUniqueConstraint {
|
||||
return []*eventstore.EventUniqueConstraint{NewAddQuotaUnitUniqueConstraint(e.Unit)}
|
||||
}
|
||||
|
||||
func NewAddedEvent(
|
||||
ctx context.Context,
|
||||
aggregate *eventstore.Aggregate,
|
||||
unit Unit,
|
||||
from time.Time,
|
||||
resetInterval time.Duration,
|
||||
amount uint64,
|
||||
limit bool,
|
||||
notifications []*AddedEventNotification,
|
||||
) *AddedEvent {
|
||||
return &AddedEvent{
|
||||
BaseEvent: *eventstore.NewBaseEventForPush(
|
||||
ctx,
|
||||
aggregate,
|
||||
AddedEventType,
|
||||
),
|
||||
Unit: unit,
|
||||
From: from,
|
||||
ResetInterval: resetInterval,
|
||||
Amount: amount,
|
||||
Limit: limit,
|
||||
Notifications: notifications,
|
||||
}
|
||||
}
|
||||
|
||||
func AddedEventMapper(event *repository.Event) (eventstore.Event, error) {
|
||||
e := &AddedEvent{
|
||||
BaseEvent: *eventstore.BaseEventFromRepo(event),
|
||||
}
|
||||
|
||||
err := json.Unmarshal(event.Data, e)
|
||||
if err != nil {
|
||||
return nil, errors.ThrowInternal(err, "ACTION-4n8vs", "unable to unmarshal quota added")
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
type NotifiedEvent struct {
|
||||
eventstore.BaseEvent `json:"-"`
|
||||
Unit Unit `json:"unit"`
|
||||
ID string `json:"id"`
|
||||
CallURL string `json:"callURL"`
|
||||
PeriodStart time.Time `json:"periodStart"`
|
||||
Threshold uint16 `json:"threshold"`
|
||||
Usage uint64 `json:"usage"`
|
||||
}
|
||||
|
||||
func (e *NotifiedEvent) Data() interface{} {
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *NotifiedEvent) UniqueConstraints() []*eventstore.EventUniqueConstraint {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewNotifiedEvent(
|
||||
ctx context.Context,
|
||||
aggregate *eventstore.Aggregate,
|
||||
unit Unit,
|
||||
id string,
|
||||
callURL string,
|
||||
periodStart time.Time,
|
||||
threshold uint16,
|
||||
usage uint64,
|
||||
) *NotifiedEvent {
|
||||
return &NotifiedEvent{
|
||||
BaseEvent: *eventstore.NewBaseEventForPush(
|
||||
ctx,
|
||||
aggregate,
|
||||
NotifiedEventType,
|
||||
),
|
||||
Unit: unit,
|
||||
ID: id,
|
||||
CallURL: callURL,
|
||||
PeriodStart: periodStart,
|
||||
Threshold: threshold,
|
||||
Usage: usage,
|
||||
}
|
||||
}
|
||||
|
||||
func NotifiedEventMapper(event *repository.Event) (eventstore.Event, error) {
|
||||
e := &NotifiedEvent{
|
||||
BaseEvent: *eventstore.BaseEventFromRepo(event),
|
||||
}
|
||||
|
||||
err := json.Unmarshal(event.Data, e)
|
||||
if err != nil {
|
||||
return nil, errors.ThrowInternal(err, "ACTION-4n8vs", "unable to unmarshal quota notified")
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
type RemovedEvent struct {
|
||||
eventstore.BaseEvent `json:"-"`
|
||||
Unit Unit `json:"unit"`
|
||||
}
|
||||
|
||||
func (e *RemovedEvent) Data() interface{} {
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *RemovedEvent) UniqueConstraints() []*eventstore.EventUniqueConstraint {
|
||||
return []*eventstore.EventUniqueConstraint{NewRemoveQuotaNameUniqueConstraint(e.Unit)}
|
||||
}
|
||||
|
||||
func NewRemovedEvent(
|
||||
ctx context.Context,
|
||||
aggregate *eventstore.Aggregate,
|
||||
unit Unit,
|
||||
) *RemovedEvent {
|
||||
return &RemovedEvent{
|
||||
BaseEvent: *eventstore.NewBaseEventForPush(
|
||||
ctx,
|
||||
aggregate,
|
||||
RemovedEventType,
|
||||
),
|
||||
Unit: unit,
|
||||
}
|
||||
}
|
||||
|
||||
func RemovedEventMapper(event *repository.Event) (eventstore.Event, error) {
|
||||
e := &RemovedEvent{
|
||||
BaseEvent: *eventstore.BaseEventFromRepo(event),
|
||||
}
|
||||
|
||||
err := json.Unmarshal(event.Data, e)
|
||||
if err != nil {
|
||||
return nil, errors.ThrowInternal(err, "ACTION-4bReE", "unable to unmarshal quota removed")
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
11
internal/repository/quota/eventstore.go
Normal file
11
internal/repository/quota/eventstore.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package quota
|
||||
|
||||
import (
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
)
|
||||
|
||||
func RegisterEventMappers(es *eventstore.Eventstore) {
|
||||
es.RegisterFilterEventMapper(AggregateType, AddedEventType, AddedEventMapper).
|
||||
RegisterFilterEventMapper(AggregateType, NotifiedEventType, NotifiedEventMapper).
|
||||
RegisterFilterEventMapper(AggregateType, RemovedEventType, RemovedEventMapper)
|
||||
}
|
@@ -437,6 +437,27 @@ Errors:
|
||||
CloseRows: SQL Statement konnte nicht abgeschlossen werden
|
||||
SQLStatement: SQL Statement konnte nicht erstellt werden
|
||||
InvalidRequest: Anfrage ist ungültig
|
||||
Quota:
|
||||
AlreadyExists: Das Kontingent existiert bereits für diese Einheit
|
||||
NotFound: Kontingent für diese Einheit nicht gefunden
|
||||
Invalid:
|
||||
CallURL: Aufruf URL für Kontingent ist ungültig
|
||||
Percent: Kontingent Prozent is unter 1
|
||||
Unimplemented: Kontingente sind für diese Einheit nicht implementiert
|
||||
Amount: Kontingent Menge ist kleiner als 1
|
||||
ResetInterval: Das Rücksetzungsintervall für das Kontingent ist kürzer als eine Minute
|
||||
Noop: Ein unlimitiertes Kontingent ohne Benachrichtigungen hat keinen Effekt
|
||||
Access:
|
||||
Exhausted: Das Kontingent für authentifizierte Requests ist aufgebraucht
|
||||
Execution:
|
||||
Exhausted: Das Kontingent für Action Sekunden ist aufgebraucht
|
||||
LogStore:
|
||||
Access:
|
||||
StorageFailed: Das Speichern des Access Logs in der Datenbank ist fehlgeschlagen
|
||||
ScanFailed: Das Abfragen der verbrauchten authentifizierten Requests ist fehlgeschlagen
|
||||
Execution:
|
||||
StorageFailed: Das Speichern des Action Logs in der Datenbank ist fehlgeschlagen
|
||||
ScanFailed: Das Abfragen der verbrauchten Actions Sekunden ist fehlgeschlagen
|
||||
|
||||
AggregateTypes:
|
||||
action: Action
|
||||
@@ -446,6 +467,7 @@ AggregateTypes:
|
||||
project: Projekt
|
||||
user: Benutzer
|
||||
usergrant: Benutzerberechtigung
|
||||
quota: Kontingent
|
||||
|
||||
EventTypes:
|
||||
user:
|
||||
|
@@ -437,6 +437,28 @@ Errors:
|
||||
CloseRows: SQL Statement could not be finished
|
||||
SQLStatement: SQL Statement could not be created
|
||||
InvalidRequest: Request is invalid
|
||||
Quota:
|
||||
AlreadyExists: Quota already exists for this unit
|
||||
NotFound: Quota not found for this unit
|
||||
Invalid:
|
||||
CallURL: Quota call URL is invalid
|
||||
Percent: Quota percent is lower than 1
|
||||
Unimplemented: Quotas are not implemented for this unit
|
||||
Amount: Quota amount is lower than 1
|
||||
ResetInterval: Quota reset interval is shorter than a minute
|
||||
Noop: An unlimited quota without notifications has no effect
|
||||
Access:
|
||||
Exhausted: The quota for authenticated requests is exhausted
|
||||
Execution:
|
||||
Exhausted: The quota for execution seconds is exhausted
|
||||
LogStore:
|
||||
Access:
|
||||
StorageFailed: Storing access log to database failed
|
||||
ScanFailed: Querying usage for authenticated requests failed
|
||||
Execution:
|
||||
StorageFailed: Storing action execution log to database failed
|
||||
ScanFailed: Querying usage for action execution seconds failed
|
||||
|
||||
|
||||
AggregateTypes:
|
||||
action: Action
|
||||
@@ -446,6 +468,7 @@ AggregateTypes:
|
||||
project: Project
|
||||
user: User
|
||||
usergrant: User grant
|
||||
quota: Quota
|
||||
|
||||
EventTypes:
|
||||
user:
|
||||
|
@@ -437,6 +437,27 @@ Errors:
|
||||
CloseRows: L'instruction SQL n'a pas pu être terminée
|
||||
SQLStatement: L'instruction SQL n'a pas pu être créée
|
||||
InvalidRequest: La requête n'est pas valide
|
||||
Quota:
|
||||
AlreadyExists: Contingent existe déjà pour cette unité
|
||||
NotFound: Contingent non trouvé pour cette unité
|
||||
Invalid:
|
||||
CallURL: L'URL d'appel du contingent n'est pas valide
|
||||
Percent: Pourcent du contingent est inférieure à 1
|
||||
Unimplemented: Les contingents ne sont pas implémentés pour cette unité
|
||||
Amount: Quantité contingentée est inférieure à 1
|
||||
ResetInterval: L'intervalle de réinitialisation entre les contingents est inférieur à une minute
|
||||
Noop: Un contingent illimité sans notifications n'a aucun effet
|
||||
Access:
|
||||
Exhausted: Le quota de requêtes authentifiées est épuisé
|
||||
Execution:
|
||||
Exhausted: Le quota de secondes d'action est épuisé
|
||||
LogStore:
|
||||
Access:
|
||||
StorageFailed: L'enregistrement du journal d'accès dans la base de données a échoué
|
||||
ScanFailed: L'interrogation des requêtes authentifiées consommées a échoué
|
||||
Execution:
|
||||
StorageFailed: L'enregistrement du journal d'action dans la base de données a échoué
|
||||
ScanFailed: L'interrogation des secondes d'action consommées a échoué
|
||||
|
||||
AggregateTypes:
|
||||
action: Action
|
||||
@@ -446,6 +467,7 @@ AggregateTypes:
|
||||
project: Projet
|
||||
user: Utilisateur
|
||||
usergrant: Subvention de l'utilisateur
|
||||
quota: Contingent
|
||||
|
||||
EventTypes:
|
||||
user:
|
||||
|
@@ -437,6 +437,27 @@ Errors:
|
||||
CloseRows: Lo statement SQL non può essere terminato
|
||||
SQLStatement: Lo statement SQL non può essere creato
|
||||
InvalidRequest: La richiesta non è valida
|
||||
Quota:
|
||||
AlreadyExists: La quota esiste già per questa unità
|
||||
NotFound: Quota non trovata per questa unità
|
||||
Invalid:
|
||||
CallURL: L'URL di chiamata per la quota non è valido
|
||||
Percent: La percentuale contingente è inferiore all'1
|
||||
Unimplemented: La quota non è implementata per questa unità
|
||||
Amount: L'importo contingente è inferiore all'1
|
||||
ResetInterval: L'intervallo di reset contingente è inferiore a un minuto
|
||||
Noop: Una quota illimitata senza notifiche non ha alcun effetto
|
||||
Access:
|
||||
Exhausted: La quota per le richieste autenticate è esaurita
|
||||
Execution:
|
||||
Exhausted: La quota per i secondi di azione è esaurita
|
||||
LogStore:
|
||||
Access:
|
||||
StorageFailed: Il salvataggio del registro degli accessi nel database non è riuscito
|
||||
ScanFailed: La query delle richieste autenticate utilizzate non è riuscita
|
||||
Execution:
|
||||
StorageFailed: Il salvataggio del registro delle azioni nel database non è riuscito
|
||||
ScanFailed: La query dei secondi delle azioni utilizzate non è riuscita
|
||||
|
||||
AggregateTypes:
|
||||
action: Azione
|
||||
@@ -446,6 +467,7 @@ AggregateTypes:
|
||||
project: Progetto
|
||||
user: Utente
|
||||
usergrant: Sovvenzione utente
|
||||
quota: Quota
|
||||
|
||||
EventTypes:
|
||||
user:
|
||||
|
@@ -437,6 +437,38 @@ Errors:
|
||||
CloseRows: SQL 语句无法完成
|
||||
SQLStatement: 无法创建 SQL 语句
|
||||
InvalidRequest: 请求无效
|
||||
Quota:
|
||||
AlreadyExists: 这个单位的配额已经存在
|
||||
NotFound: 没有找到该单位的配额
|
||||
Invalid:
|
||||
CallURL: 配额调用的URL是无效的
|
||||
Percent: 配额百分比低于1
|
||||
Unimplemented: 该单位没有实施配额
|
||||
Amount: 配额数量低于1
|
||||
ResetInterval: 配额重置时间间隔短于1分钟
|
||||
Noop: 没有通知的无限配额没有效果
|
||||
Access:
|
||||
Exhausted: 认证请求的配额已用完
|
||||
Execution:
|
||||
Exhausted: 行动秒数的配额已用完
|
||||
LogStore:
|
||||
Access:
|
||||
StorageFailed: 存储访问日志到数据库失败
|
||||
ScanFailed: 查询已认证请求的使用情况失败
|
||||
Execution:
|
||||
StorageFailed: 将行动执行日志存储到数据库失败
|
||||
ScanFailed: Q查询动作执行秒数的使用情况失败
|
||||
|
||||
AggregateTypes:
|
||||
action: 动作
|
||||
instance: 实例
|
||||
key_pair: 密钥对
|
||||
org: 组织
|
||||
project: 项目
|
||||
user: 用户
|
||||
usergrant: 用户授权
|
||||
quota: 配额
|
||||
|
||||
EventTypes:
|
||||
user:
|
||||
added: 已添加用户
|
||||
|
Reference in New Issue
Block a user