feat: add quotas (#4779)

adds possibilities to cap authenticated requests and execution seconds of actions on a defined intervall
This commit is contained in:
Elio Bischof
2023-02-15 02:52:11 +01:00
committed by GitHub
parent 45f6a4436e
commit 681541f41b
117 changed files with 4652 additions and 510 deletions

View File

@@ -0,0 +1,11 @@
package logstore
type Configs struct {
Access *Config
Execution *Config
}
type Config struct {
Database *EmitterConfig
Stdout *EmitterConfig
}

View File

@@ -0,0 +1,92 @@
package logstore
import (
"context"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/zitadel/logging"
)
type bulkSink interface {
sendBulk(ctx context.Context, bulk []LogRecord) error
}
var _ bulkSink = bulkSinkFunc(nil)
type bulkSinkFunc func(ctx context.Context, items []LogRecord) error
func (s bulkSinkFunc) sendBulk(ctx context.Context, items []LogRecord) error {
return s(ctx, items)
}
type debouncer struct {
// Storing context.Context in a struct is generally bad practice
// https://go.dev/blog/context-and-structs
// However, debouncer starts a go routine that triggers side effects itself.
// So, there is no incoming context.Context available when these events trigger.
// The only context we can use for the side effects is the app context.
// Because this can be cancelled by os signals, it's the better solution than creating new background contexts.
binarySignaledCtx context.Context
clock clock.Clock
ticker *clock.Ticker
mux sync.Mutex
cfg DebouncerConfig
storage bulkSink
cache []LogRecord
cacheLen uint
}
type DebouncerConfig struct {
MinFrequency time.Duration
MaxBulkSize uint
}
func newDebouncer(binarySignaledCtx context.Context, cfg DebouncerConfig, clock clock.Clock, ship bulkSink) *debouncer {
a := &debouncer{
binarySignaledCtx: binarySignaledCtx,
clock: clock,
cfg: cfg,
storage: ship,
}
if cfg.MinFrequency > 0 {
a.ticker = clock.Ticker(cfg.MinFrequency)
go a.shipOnTicks()
}
return a
}
func (d *debouncer) add(item LogRecord) {
d.mux.Lock()
defer d.mux.Unlock()
d.cache = append(d.cache, item)
d.cacheLen++
if d.cfg.MaxBulkSize > 0 && d.cacheLen >= d.cfg.MaxBulkSize {
// Add should not block and release the lock
go d.ship()
}
}
func (d *debouncer) ship() {
if d.cacheLen == 0 {
return
}
d.mux.Lock()
defer d.mux.Unlock()
if err := d.storage.sendBulk(d.binarySignaledCtx, d.cache); err != nil {
logging.WithError(err).WithField("size", len(d.cache)).Error("storing bulk failed")
}
d.cache = nil
d.cacheLen = 0
if d.cfg.MinFrequency > 0 {
d.ticker.Reset(d.cfg.MinFrequency)
}
}
func (d *debouncer) shipOnTicks() {
for range d.ticker.C {
d.ship()
}
}

View File

@@ -0,0 +1,112 @@
package logstore
import (
"context"
"fmt"
"time"
"github.com/benbjohnson/clock"
"github.com/zitadel/logging"
)
type EmitterConfig struct {
Enabled bool
Keep time.Duration
CleanupInterval time.Duration
Debounce *DebouncerConfig
}
type emitter struct {
enabled bool
ctx context.Context
debouncer *debouncer
emitter LogEmitter
clock clock.Clock
}
type LogRecord interface {
Normalize() LogRecord
}
type LogRecordFunc func() LogRecord
func (r LogRecordFunc) Normalize() LogRecord {
return r()
}
type LogEmitter interface {
Emit(ctx context.Context, bulk []LogRecord) error
}
type LogEmitterFunc func(ctx context.Context, bulk []LogRecord) error
func (l LogEmitterFunc) Emit(ctx context.Context, bulk []LogRecord) error {
return l(ctx, bulk)
}
type LogCleanupper interface {
LogEmitter
Cleanup(ctx context.Context, keep time.Duration) error
}
// NewEmitter accepts Clock from github.com/benbjohnson/clock so we can control timers and tickers in the unit tests
func NewEmitter(ctx context.Context, clock clock.Clock, cfg *EmitterConfig, logger LogEmitter) (*emitter, error) {
svc := &emitter{
enabled: cfg != nil && cfg.Enabled,
ctx: ctx,
emitter: logger,
clock: clock,
}
if !svc.enabled {
return svc, nil
}
if cfg.Debounce != nil && (cfg.Debounce.MinFrequency > 0 || cfg.Debounce.MaxBulkSize > 0) {
svc.debouncer = newDebouncer(ctx, *cfg.Debounce, clock, newStorageBulkSink(svc.emitter))
}
cleanupper, ok := logger.(LogCleanupper)
if !ok {
if cfg.Keep != 0 {
return nil, fmt.Errorf("cleaning up for this storage type is not supported, so keep duration must be 0, but is %d", cfg.Keep)
}
if cfg.CleanupInterval != 0 {
return nil, fmt.Errorf("cleaning up for this storage type is not supported, so cleanup interval duration must be 0, but is %d", cfg.Keep)
}
return svc, nil
}
if cfg.Keep != 0 && cfg.CleanupInterval != 0 {
go svc.startCleanupping(cleanupper, cfg.CleanupInterval, cfg.Keep)
}
return svc, nil
}
func (s *emitter) startCleanupping(cleanupper LogCleanupper, cleanupInterval, keep time.Duration) {
for range s.clock.Tick(cleanupInterval) {
if err := cleanupper.Cleanup(s.ctx, keep); err != nil {
logging.WithError(err).Error("cleaning up logs failed")
}
}
}
func (s *emitter) Emit(ctx context.Context, record LogRecord) (err error) {
if !s.enabled {
return nil
}
if s.debouncer != nil {
s.debouncer.add(record)
return nil
}
return s.emitter.Emit(ctx, []LogRecord{record})
}
func newStorageBulkSink(emitter LogEmitter) bulkSinkFunc {
return func(ctx context.Context, bulk []LogRecord) error {
return emitter.Emit(ctx, bulk)
}
}

View File

@@ -0,0 +1,159 @@
package access
import (
"context"
"database/sql"
"fmt"
"net/http"
"strings"
"time"
"github.com/Masterminds/squirrel"
"github.com/zitadel/logging"
"google.golang.org/grpc/codes"
zitadel_http "github.com/zitadel/zitadel/internal/api/http"
caos_errors "github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/repository/quota"
)
const (
accessLogsTable = "logstore.access"
accessTimestampCol = "log_date"
accessProtocolCol = "protocol"
accessRequestURLCol = "request_url"
accessResponseStatusCol = "response_status"
accessRequestHeadersCol = "request_headers"
accessResponseHeadersCol = "response_headers"
accessInstanceIdCol = "instance_id"
accessProjectIdCol = "project_id"
accessRequestedDomainCol = "requested_domain"
accessRequestedHostCol = "requested_host"
)
var _ logstore.UsageQuerier = (*databaseLogStorage)(nil)
var _ logstore.LogCleanupper = (*databaseLogStorage)(nil)
type databaseLogStorage struct {
dbClient *sql.DB
}
func NewDatabaseLogStorage(dbClient *sql.DB) *databaseLogStorage {
return &databaseLogStorage{dbClient: dbClient}
}
func (l *databaseLogStorage) QuotaUnit() quota.Unit {
return quota.RequestsAllAuthenticated
}
func (l *databaseLogStorage) Emit(ctx context.Context, bulk []logstore.LogRecord) error {
builder := squirrel.Insert(accessLogsTable).
Columns(
accessTimestampCol,
accessProtocolCol,
accessRequestURLCol,
accessResponseStatusCol,
accessRequestHeadersCol,
accessResponseHeadersCol,
accessInstanceIdCol,
accessProjectIdCol,
accessRequestedDomainCol,
accessRequestedHostCol,
).
PlaceholderFormat(squirrel.Dollar)
for idx := range bulk {
item := bulk[idx].(*Record)
builder = builder.Values(
item.LogDate,
item.Protocol,
item.RequestURL,
item.ResponseStatus,
item.RequestHeaders,
item.ResponseHeaders,
item.InstanceID,
item.ProjectID,
item.RequestedDomain,
item.RequestedHost,
)
}
stmt, args, err := builder.ToSql()
if err != nil {
return caos_errors.ThrowInternal(err, "ACCESS-KOS7I", "Errors.Internal")
}
result, err := l.dbClient.ExecContext(ctx, stmt, args...)
if err != nil {
return caos_errors.ThrowInternal(err, "ACCESS-alnT9", "Errors.Access.StorageFailed")
}
rows, err := result.RowsAffected()
if err != nil {
return caos_errors.ThrowInternal(err, "ACCESS-7KIpL", "Errors.Internal")
}
logging.WithFields("rows", rows).Debug("successfully stored access logs")
return nil
}
// TODO: AS OF SYSTEM TIME
func (l *databaseLogStorage) QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error) {
stmt, args, err := squirrel.Select(
fmt.Sprintf("count(%s)", accessInstanceIdCol),
).
From(accessLogsTable).
Where(squirrel.And{
squirrel.Eq{accessInstanceIdCol: instanceId},
squirrel.GtOrEq{accessTimestampCol: start},
squirrel.Expr(fmt.Sprintf(`%s #>> '{%s,0}' = '[REDACTED]'`, accessRequestHeadersCol, strings.ToLower(zitadel_http.Authorization))),
squirrel.NotLike{accessRequestURLCol: "%/zitadel.system.v1.SystemService/%"},
squirrel.NotLike{accessRequestURLCol: "%/system/v1/%"},
squirrel.Or{
squirrel.And{
squirrel.Eq{accessProtocolCol: HTTP},
squirrel.NotEq{accessResponseStatusCol: http.StatusForbidden},
squirrel.NotEq{accessResponseStatusCol: http.StatusInternalServerError},
squirrel.NotEq{accessResponseStatusCol: http.StatusTooManyRequests},
},
squirrel.And{
squirrel.Eq{accessProtocolCol: GRPC},
squirrel.NotEq{accessResponseStatusCol: codes.PermissionDenied},
squirrel.NotEq{accessResponseStatusCol: codes.Internal},
squirrel.NotEq{accessResponseStatusCol: codes.ResourceExhausted},
},
},
}).
PlaceholderFormat(squirrel.Dollar).
ToSql()
if err != nil {
return 0, caos_errors.ThrowInternal(err, "ACCESS-V9Sde", "Errors.Internal")
}
var count uint64
if err = l.dbClient.
QueryRowContext(ctx, stmt, args...).
Scan(&count); err != nil {
return 0, caos_errors.ThrowInternal(err, "ACCESS-pBPrM", "Errors.Logstore.Access.ScanFailed")
}
return count, nil
}
func (l *databaseLogStorage) Cleanup(ctx context.Context, keep time.Duration) error {
stmt, args, err := squirrel.Delete(accessLogsTable).
Where(squirrel.LtOrEq{accessTimestampCol: time.Now().Add(-keep)}).
PlaceholderFormat(squirrel.Dollar).
ToSql()
if err != nil {
return caos_errors.ThrowInternal(err, "ACCESS-2oTh6", "Errors.Internal")
}
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
_, err = l.dbClient.ExecContext(execCtx, stmt, args...)
return err
}

View File

@@ -0,0 +1,91 @@
package access
import (
"strings"
"time"
zitadel_http "github.com/zitadel/zitadel/internal/api/http"
"github.com/zitadel/zitadel/internal/logstore"
)
var _ logstore.LogRecord = (*Record)(nil)
type Record struct {
LogDate time.Time `json:"logDate"`
Protocol Protocol `json:"protocol"`
RequestURL string `json:"requestUrl"`
ResponseStatus uint32 `json:"responseStatus"`
// RequestHeaders are plain maps so varying implementation
// between HTTP and gRPC don't interfere with each other
RequestHeaders map[string][]string `json:"requestHeaders"`
// ResponseHeaders are plain maps so varying implementation
// between HTTP and gRPC don't interfere with each other
ResponseHeaders map[string][]string `json:"responseHeaders"`
InstanceID string `json:"instanceId"`
ProjectID string `json:"projectId"`
RequestedDomain string `json:"requestedDomain"`
RequestedHost string `json:"requestedHost"`
}
type Protocol uint8
const (
GRPC Protocol = iota
HTTP
redacted = "[REDACTED]"
)
func (a Record) Normalize() logstore.LogRecord {
a.RequestedDomain = cutString(a.RequestedDomain, 200)
a.RequestURL = cutString(a.RequestURL, 200)
normalizeHeaders(a.RequestHeaders, strings.ToLower(zitadel_http.Authorization), "grpcgateway-authorization", "cookie", "grpcgateway-cookie")
normalizeHeaders(a.ResponseHeaders, "set-cookie")
return &a
}
const maxValuesPerKey = 10
// normalizeHeaders lowers all header keys and redacts secrets
func normalizeHeaders(header map[string][]string, redactKeysLower ...string) {
lowerKeys(header)
redactKeys(header, redactKeysLower...)
pruneKeys(header)
}
func lowerKeys(header map[string][]string) {
for k, v := range header {
delete(header, k)
header[strings.ToLower(k)] = v
}
}
func redactKeys(header map[string][]string, redactKeysLower ...string) {
for _, redactKey := range redactKeysLower {
if _, ok := header[redactKey]; ok {
header[redactKey] = []string{redacted}
}
}
}
func pruneKeys(header map[string][]string) {
for key, value := range header {
valueItems := make([]string, 0, maxValuesPerKey)
for i, valueItem := range value {
// Max 10 header values per key
if i > maxValuesPerKey {
break
}
// Max 200 value length
valueItems = append(valueItems, cutString(valueItem, 200))
}
header[key] = valueItems
}
}
func cutString(str string, pos int) string {
if len(str) <= pos {
return str
}
return str[:pos-1]
}

View File

@@ -0,0 +1,135 @@
package execution
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/Masterminds/squirrel"
"github.com/zitadel/logging"
caos_errors "github.com/zitadel/zitadel/internal/errors"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/repository/quota"
)
const (
executionLogsTable = "logstore.execution"
executionTimestampCol = "log_date"
executionTookCol = "took"
executionMessageCol = "message"
executionLogLevelCol = "loglevel"
executionInstanceIdCol = "instance_id"
executionActionIdCol = "action_id"
executionMetadataCol = "metadata"
)
var _ logstore.UsageQuerier = (*databaseLogStorage)(nil)
var _ logstore.LogCleanupper = (*databaseLogStorage)(nil)
type databaseLogStorage struct {
dbClient *sql.DB
}
func NewDatabaseLogStorage(dbClient *sql.DB) *databaseLogStorage {
return &databaseLogStorage{dbClient: dbClient}
}
func (l *databaseLogStorage) QuotaUnit() quota.Unit {
return quota.ActionsAllRunsSeconds
}
func (l *databaseLogStorage) Emit(ctx context.Context, bulk []logstore.LogRecord) error {
builder := squirrel.Insert(executionLogsTable).
Columns(
executionTimestampCol,
executionTookCol,
executionMessageCol,
executionLogLevelCol,
executionInstanceIdCol,
executionActionIdCol,
executionMetadataCol,
).
PlaceholderFormat(squirrel.Dollar)
for idx := range bulk {
item := bulk[idx].(*Record)
var took interface{}
if item.Took > 0 {
took = item.Took
}
builder = builder.Values(
item.LogDate,
took,
item.Message,
item.LogLevel,
item.InstanceID,
item.ActionID,
item.Metadata,
)
}
stmt, args, err := builder.ToSql()
if err != nil {
return caos_errors.ThrowInternal(err, "EXEC-KOS7I", "Errors.Internal")
}
result, err := l.dbClient.ExecContext(ctx, stmt, args...)
if err != nil {
return caos_errors.ThrowInternal(err, "EXEC-0j6i5", "Errors.Access.StorageFailed")
}
rows, err := result.RowsAffected()
if err != nil {
return caos_errors.ThrowInternal(err, "EXEC-MGchJ", "Errors.Internal")
}
logging.WithFields("rows", rows).Debug("successfully stored execution logs")
return nil
}
// TODO: AS OF SYSTEM TIME
func (l *databaseLogStorage) QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error) {
stmt, args, err := squirrel.Select(
fmt.Sprintf("COALESCE(SUM(%s)::INT,0)", executionTookCol),
).
From(executionLogsTable).
Where(squirrel.And{
squirrel.Eq{executionInstanceIdCol: instanceId},
squirrel.GtOrEq{executionTimestampCol: start},
squirrel.NotEq{executionTookCol: nil},
}).
PlaceholderFormat(squirrel.Dollar).
ToSql()
if err != nil {
return 0, caos_errors.ThrowInternal(err, "EXEC-DXtzg", "Errors.Internal")
}
var durationSeconds uint64
if err = l.dbClient.
QueryRowContext(ctx, stmt, args...).
Scan(&durationSeconds); err != nil {
return 0, caos_errors.ThrowInternal(err, "EXEC-Ad8nP", "Errors.Logstore.Execution.ScanFailed")
}
return durationSeconds, nil
}
func (l *databaseLogStorage) Cleanup(ctx context.Context, keep time.Duration) error {
stmt, args, err := squirrel.Delete(executionLogsTable).
Where(squirrel.LtOrEq{executionTimestampCol: time.Now().Add(-keep)}).
PlaceholderFormat(squirrel.Dollar).
ToSql()
if err != nil {
return caos_errors.ThrowInternal(err, "EXEC-Bja8V", "Errors.Internal")
}
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
_, err = l.dbClient.ExecContext(execCtx, stmt, args...)
return err
}

View File

@@ -0,0 +1,33 @@
package execution
import (
"time"
"github.com/sirupsen/logrus"
"github.com/zitadel/zitadel/internal/logstore"
)
var _ logstore.LogRecord = (*Record)(nil)
type Record struct {
LogDate time.Time `json:"logDate"`
Took time.Duration `json:"took"`
Message string `json:"message"`
LogLevel logrus.Level `json:"logLevel"`
InstanceID string `json:"instanceId"`
ActionID string `json:"actionId,omitempty"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
func (e Record) Normalize() logstore.LogRecord {
e.Message = cutString(e.Message, 2000)
return &e
}
func cutString(str string, pos int) string {
if len(str) <= pos {
return str
}
return str[:pos]
}

View File

@@ -0,0 +1,89 @@
package mock
import (
"context"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/repository/quota"
)
var _ logstore.UsageQuerier = (*InmemLogStorage)(nil)
var _ logstore.LogCleanupper = (*InmemLogStorage)(nil)
type InmemLogStorage struct {
mux sync.Mutex
clock clock.Clock
emitted []*record
bulks []int
}
func NewInMemoryStorage(clock clock.Clock) *InmemLogStorage {
return &InmemLogStorage{
clock: clock,
emitted: make([]*record, 0),
bulks: make([]int, 0),
}
}
func (l *InmemLogStorage) QuotaUnit() quota.Unit {
return quota.Unimplemented
}
func (l *InmemLogStorage) Emit(_ context.Context, bulk []logstore.LogRecord) error {
if len(bulk) == 0 {
return nil
}
l.mux.Lock()
defer l.mux.Unlock()
for idx := range bulk {
l.emitted = append(l.emitted, bulk[idx].(*record))
}
l.bulks = append(l.bulks, len(bulk))
return nil
}
func (l *InmemLogStorage) QueryUsage(_ context.Context, _ string, start time.Time) (uint64, error) {
l.mux.Lock()
defer l.mux.Unlock()
var count uint64
for _, r := range l.emitted {
if r.ts.After(start) {
count++
}
}
return count, nil
}
func (l *InmemLogStorage) Cleanup(_ context.Context, keep time.Duration) error {
l.mux.Lock()
defer l.mux.Unlock()
clean := make([]*record, 0)
from := l.clock.Now().Add(-(keep + 1))
for _, r := range l.emitted {
if r.ts.After(from) {
clean = append(clean, r)
}
}
l.emitted = clean
return nil
}
func (l *InmemLogStorage) Bulks() []int {
l.mux.Lock()
defer l.mux.Unlock()
return l.bulks
}
func (l *InmemLogStorage) Len() int {
l.mux.Lock()
defer l.mux.Unlock()
return len(l.emitted)
}

View File

@@ -0,0 +1,25 @@
package mock
import (
"time"
"github.com/benbjohnson/clock"
"github.com/zitadel/zitadel/internal/logstore"
)
var _ logstore.LogRecord = (*record)(nil)
func NewRecord(clock clock.Clock) *record {
return &record{ts: clock.Now()}
}
type record struct {
ts time.Time
redacted bool
}
func (r record) Normalize() logstore.LogRecord {
r.redacted = true
return &r
}

View File

@@ -0,0 +1,23 @@
package stdout
import (
"context"
"encoding/json"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/logstore"
)
func NewStdoutEmitter() logstore.LogEmitter {
return logstore.LogEmitterFunc(func(ctx context.Context, bulk []logstore.LogRecord) error {
for idx := range bulk {
bytes, err := json.Marshal(bulk[idx])
if err != nil {
return err
}
logging.WithFields("record", string(bytes)).Info("log record emitted")
}
return nil
})
}

View File

@@ -0,0 +1,83 @@
package logstore_test
import (
"time"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/repository/quota"
)
type emitterOption func(config *logstore.EmitterConfig)
func emitterConfig(options ...emitterOption) *logstore.EmitterConfig {
cfg := &logstore.EmitterConfig{
Enabled: true,
Keep: time.Hour,
CleanupInterval: time.Hour,
Debounce: &logstore.DebouncerConfig{
MinFrequency: 0,
MaxBulkSize: 0,
},
}
for _, opt := range options {
opt(cfg)
}
return cfg
}
func withDebouncerConfig(config *logstore.DebouncerConfig) emitterOption {
return func(c *logstore.EmitterConfig) {
c.Debounce = config
}
}
func withDisabled() emitterOption {
return func(c *logstore.EmitterConfig) {
c.Enabled = false
}
}
func withCleanupping(keep, interval time.Duration) emitterOption {
return func(c *logstore.EmitterConfig) {
c.Keep = keep
c.CleanupInterval = interval
}
}
type quotaOption func(config *quota.AddedEvent)
func quotaConfig(quotaOptions ...quotaOption) quota.AddedEvent {
q := &quota.AddedEvent{
Amount: 90,
Limit: false,
ResetInterval: 90 * time.Second,
From: time.Unix(0, 0),
}
for _, opt := range quotaOptions {
opt(q)
}
return *q
}
func withAmountAndInterval(n uint64) quotaOption {
return func(c *quota.AddedEvent) {
c.Amount = n
c.ResetInterval = time.Duration(n) * time.Second
}
}
func withLimiting() quotaOption {
return func(c *quota.AddedEvent) {
c.Limit = true
}
}
func repeat(value, times int) []int {
ints := make([]int, times)
for i := 0; i < times; i++ {
ints[i] = value
}
return ints
}
func uint64Ptr(n uint64) *uint64 { return &n }

View File

@@ -0,0 +1,28 @@
package mock
import (
"context"
"time"
"github.com/zitadel/zitadel/internal/logstore"
"github.com/zitadel/zitadel/internal/repository/quota"
)
var _ logstore.QuotaQuerier = (*inmemReporter)(nil)
type inmemReporter struct {
config *quota.AddedEvent
startPeriod time.Time
}
func NewNoopQuerier(quota *quota.AddedEvent, startPeriod time.Time) *inmemReporter {
return &inmemReporter{config: quota, startPeriod: startPeriod}
}
func (i *inmemReporter) GetCurrentQuotaPeriod(context.Context, string, quota.Unit) (*quota.AddedEvent, time.Time, error) {
return i.config, i.startPeriod, nil
}
func (*inmemReporter) GetDueQuotaNotifications(context.Context, *quota.AddedEvent, time.Time, uint64) ([]*quota.NotifiedEvent, error) {
return nil, nil
}

View File

@@ -0,0 +1,110 @@
package logstore
import (
"context"
"math"
"time"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/repository/quota"
)
type QuotaQuerier interface {
GetCurrentQuotaPeriod(ctx context.Context, instanceID string, unit quota.Unit) (config *quota.AddedEvent, periodStart time.Time, err error)
GetDueQuotaNotifications(ctx context.Context, config *quota.AddedEvent, periodStart time.Time, used uint64) ([]*quota.NotifiedEvent, error)
}
type UsageQuerier interface {
LogEmitter
QuotaUnit() quota.Unit
QueryUsage(ctx context.Context, instanceId string, start time.Time) (uint64, error)
}
type UsageReporter interface {
Report(ctx context.Context, notifications []*quota.NotifiedEvent) (err error)
}
type UsageReporterFunc func(context.Context, []*quota.NotifiedEvent) (err error)
func (u UsageReporterFunc) Report(ctx context.Context, notifications []*quota.NotifiedEvent) (err error) {
return u(ctx, notifications)
}
type Service struct {
usageQuerier UsageQuerier
quotaQuerier QuotaQuerier
usageReporter UsageReporter
enabledSinks []*emitter
sinkEnabled bool
reportingEnabled bool
}
func New(quotaQuerier QuotaQuerier, usageReporter UsageReporter, usageQuerierSink *emitter, additionalSink ...*emitter) *Service {
var usageQuerier UsageQuerier
if usageQuerierSink != nil {
usageQuerier = usageQuerierSink.emitter.(UsageQuerier)
}
svc := &Service{
reportingEnabled: usageQuerierSink != nil && usageQuerierSink.enabled,
usageQuerier: usageQuerier,
quotaQuerier: quotaQuerier,
usageReporter: usageReporter,
}
for _, s := range append([]*emitter{usageQuerierSink}, additionalSink...) {
if s != nil && s.enabled {
svc.enabledSinks = append(svc.enabledSinks, s)
}
}
svc.sinkEnabled = len(svc.enabledSinks) > 0
return svc
}
func (s *Service) Enabled() bool {
return s.sinkEnabled
}
func (s *Service) Handle(ctx context.Context, record LogRecord) {
for _, sink := range s.enabledSinks {
logging.OnError(sink.Emit(ctx, record.Normalize())).WithField("record", record).Warn("failed to emit log record")
}
}
func (s *Service) Limit(ctx context.Context, instanceID string) *uint64 {
var err error
defer func() {
logging.OnError(err).Warn("failed to check is usage should be limited")
}()
if !s.reportingEnabled || instanceID == "" {
return nil
}
quota, periodStart, err := s.quotaQuerier.GetCurrentQuotaPeriod(ctx, instanceID, s.usageQuerier.QuotaUnit())
if err != nil || quota == nil {
return nil
}
usage, err := s.usageQuerier.QueryUsage(ctx, instanceID, periodStart)
if err != nil {
return nil
}
var remaining *uint64
if quota.Limit {
r := uint64(math.Max(0, float64(quota.Amount)-float64(usage)))
remaining = &r
}
notifications, err := s.quotaQuerier.GetDueQuotaNotifications(ctx, quota, periodStart, usage)
if err != nil {
return remaining
}
err = s.usageReporter.Report(ctx, notifications)
return remaining
}

View File

@@ -0,0 +1,312 @@
// The library github.com/benbjohnson/clock fails when race is enabled
// https://github.com/benbjohnson/clock/issues/44
//go:build !race
package logstore_test
import (
"context"
"reflect"
"runtime"
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/zitadel/zitadel/internal/logstore"
emittermock "github.com/zitadel/zitadel/internal/logstore/emitters/mock"
quotaqueriermock "github.com/zitadel/zitadel/internal/logstore/quotaqueriers/mock"
"github.com/zitadel/zitadel/internal/repository/quota"
)
const (
tick = time.Second
ticks = 60
)
type args struct {
mainSink *logstore.EmitterConfig
secondarySink *logstore.EmitterConfig
config quota.AddedEvent
}
type want struct {
enabled bool
remaining *uint64
mainSink wantSink
secondarySink wantSink
}
type wantSink struct {
bulks []int
len int
}
func TestService(t *testing.T) {
// tests should run on a single thread
// important for deterministic results
beforeProcs := runtime.GOMAXPROCS(1)
defer runtime.GOMAXPROCS(beforeProcs)
tests := []struct {
name string
args args
want want
}{{
name: "max and min debouncing works",
args: args{
mainSink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
MinFrequency: 1 * time.Minute,
MaxBulkSize: 60,
})),
secondarySink: emitterConfig(),
config: quotaConfig(),
},
want: want{
enabled: true,
remaining: nil,
mainSink: wantSink{
bulks: repeat(60, 1),
len: 60,
},
secondarySink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
},
}, {
name: "mixed debouncing works",
args: args{
mainSink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
MinFrequency: 0,
MaxBulkSize: 6,
})),
secondarySink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
MinFrequency: 10 * time.Second,
MaxBulkSize: 0,
})),
config: quotaConfig(),
},
want: want{
enabled: true,
remaining: nil,
mainSink: wantSink{
bulks: repeat(6, 10),
len: 60,
},
secondarySink: wantSink{
bulks: repeat(10, 6),
len: 60,
},
},
}, {
name: "when disabling main sink, secondary sink still works",
args: args{
mainSink: emitterConfig(withDisabled()),
secondarySink: emitterConfig(),
config: quotaConfig(),
},
want: want{
enabled: true,
remaining: nil,
mainSink: wantSink{
bulks: repeat(99, 0),
len: 0,
},
secondarySink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
},
}, {
name: "when all sink are disabled, the service is disabled",
args: args{
mainSink: emitterConfig(withDisabled()),
secondarySink: emitterConfig(withDisabled()),
config: quotaConfig(),
},
want: want{
enabled: false,
remaining: nil,
mainSink: wantSink{
bulks: repeat(99, 0),
len: 0,
},
secondarySink: wantSink{
bulks: repeat(99, 0),
len: 0,
},
},
}, {
name: "cleanupping works",
args: args{
mainSink: emitterConfig(withCleanupping(17*time.Second, 28*time.Second)),
secondarySink: emitterConfig(withDebouncerConfig(&logstore.DebouncerConfig{
MinFrequency: 0,
MaxBulkSize: 15,
}), withCleanupping(5*time.Second, 47*time.Second)),
config: quotaConfig(),
},
want: want{
enabled: true,
remaining: nil,
mainSink: wantSink{
bulks: repeat(1, 60),
len: 21,
},
secondarySink: wantSink{
bulks: repeat(15, 4),
len: 18,
},
},
}, {
name: "when quota has a limit of 90, 30 are remaining",
args: args{
mainSink: emitterConfig(),
secondarySink: emitterConfig(),
config: quotaConfig(withLimiting()),
},
want: want{
enabled: true,
remaining: uint64Ptr(30),
mainSink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
secondarySink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
},
}, {
name: "when quota has a limit of 30, 0 are remaining",
args: args{
mainSink: emitterConfig(),
secondarySink: emitterConfig(),
config: quotaConfig(withLimiting(), withAmountAndInterval(30)),
},
want: want{
enabled: true,
remaining: uint64Ptr(0),
mainSink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
secondarySink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
},
}, {
name: "when quota has amount of 30 but is not limited, remaining is nil",
args: args{
mainSink: emitterConfig(),
secondarySink: emitterConfig(),
config: quotaConfig(withAmountAndInterval(30)),
},
want: want{
enabled: true,
remaining: nil,
mainSink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
secondarySink: wantSink{
bulks: repeat(1, 60),
len: 60,
},
},
}}
for _, tt := range tests {
runTest(t, tt.name, tt.args, tt.want)
}
}
func runTest(t *testing.T, name string, args args, want want) bool {
return t.Run("Given over a minute, each second a log record is emitted", func(tt *testing.T) {
tt.Run(name, func(t *testing.T) {
ctx, clock, mainStorage, secondaryStorage, svc := given(t, args, want)
remaining := when(svc, ctx, clock)
then(t, mainStorage, secondaryStorage, remaining, want)
})
})
}
func given(t *testing.T, args args, want want) (context.Context, *clock.Mock, *emittermock.InmemLogStorage, *emittermock.InmemLogStorage, *logstore.Service) {
ctx := context.Background()
clock := clock.NewMock()
periodStart := time.Time{}
clock.Set(args.config.From)
mainStorage := emittermock.NewInMemoryStorage(clock)
mainEmitter, err := logstore.NewEmitter(ctx, clock, args.mainSink, mainStorage)
if err != nil {
t.Errorf("expected no error but got %v", err)
}
secondaryStorage := emittermock.NewInMemoryStorage(clock)
secondaryEmitter, err := logstore.NewEmitter(ctx, clock, args.secondarySink, secondaryStorage)
if err != nil {
t.Errorf("expected no error but got %v", err)
}
svc := logstore.New(
quotaqueriermock.NewNoopQuerier(&args.config, periodStart),
logstore.UsageReporterFunc(func(context.Context, []*quota.NotifiedEvent) error { return nil }),
mainEmitter,
secondaryEmitter)
if svc.Enabled() != want.enabled {
t.Errorf("wantet service enabled to be %t but is %t", want.enabled, svc.Enabled())
}
return ctx, clock, mainStorage, secondaryStorage, svc
}
func when(svc *logstore.Service, ctx context.Context, clock *clock.Mock) *uint64 {
var remaining *uint64
for i := 0; i < ticks; i++ {
svc.Handle(ctx, emittermock.NewRecord(clock))
runtime.Gosched()
remaining = svc.Limit(ctx, "non-empty-instance-id")
clock.Add(tick)
}
time.Sleep(time.Millisecond)
runtime.Gosched()
return remaining
}
func then(t *testing.T, mainStorage, secondaryStorage *emittermock.InmemLogStorage, remaining *uint64, want want) {
mainBulks := mainStorage.Bulks()
if !reflect.DeepEqual(want.mainSink.bulks, mainBulks) {
t.Errorf("wanted main storage to have bulks %v, but got %v", want.mainSink.bulks, mainBulks)
}
mainLen := mainStorage.Len()
if !reflect.DeepEqual(want.mainSink.len, mainLen) {
t.Errorf("wanted main storage to have len %d, but got %d", want.mainSink.len, mainLen)
}
secondaryBulks := secondaryStorage.Bulks()
if !reflect.DeepEqual(want.secondarySink.bulks, secondaryBulks) {
t.Errorf("wanted secondary storage to have bulks %v, but got %v", want.secondarySink.bulks, secondaryBulks)
}
secondaryLen := secondaryStorage.Len()
if !reflect.DeepEqual(want.secondarySink.len, secondaryLen) {
t.Errorf("wanted secondary storage to have len %d, but got %d", want.secondarySink.len, secondaryLen)
}
if remaining == nil && want.remaining == nil {
return
}
if remaining == nil && want.remaining != nil ||
remaining != nil && want.remaining == nil {
t.Errorf("wantet remaining nil %t but got %t", want.remaining == nil, remaining == nil)
return
}
if *remaining != *want.remaining {
t.Errorf("wantet remaining %d but got %d", *want.remaining, *remaining)
return
}
}