mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-12 11:04:25 +00:00
5823fdbef9
* project quota added
* project quota removed
* add periods table
* make log record generic
* accumulate usage
* query usage
* count action run seconds
* fix filter in ReportQuotaUsage
* fix existing tests
* fix logstore tests
* fix typo
* fix: add quota unit tests command side
* fix: add quota unit tests command side
* fix: add quota unit tests command side
* move notifications into debouncer and improve limit querying
* cleanup
* comment
* fix: add quota unit tests command side
* fix remaining quota usage query
* implement InmemLogStorage
* cleanup and linting
* improve test
* fix: add quota unit tests command side
* fix: add quota unit tests command side
* fix: add quota unit tests command side
* fix: add quota unit tests command side
* action notifications and fixes for notifications query
* revert console prefix
* fix: add quota unit tests command side
* fix: add quota integration tests
* improve accountable requests
* improve accountable requests
* fix: add quota integration tests
* fix: add quota integration tests
* fix: add quota integration tests
* comment
* remove ability to store logs in db and other changes requested from review
* changes requested from review
* changes requested from review
* Update internal/api/http/middleware/access_interceptor.go
Co-authored-by: Silvan <silvan.reusser@gmail.com>
* tests: fix quotas integration tests
* improve incrementUsageStatement
* linting
* fix: delete e2e tests as intergation tests cover functionality
* Update internal/api/http/middleware/access_interceptor.go
Co-authored-by: Silvan <silvan.reusser@gmail.com>
* backup
* fix conflict
* create rc
* create prerelease
* remove issue release labeling
* fix tracing
---------
Co-authored-by: Livio Spring <livio.a@gmail.com>
Co-authored-by: Stefan Benz <stefan@caos.ch>
Co-authored-by: adlerhurst <silvan.reusser@gmail.com>
(cherry picked from commit 1a49b7d298
)
91 lines
2.2 KiB
Go
91 lines
2.2 KiB
Go
package logstore
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/benbjohnson/clock"
|
|
"github.com/zitadel/logging"
|
|
)
|
|
|
|
type bulkSink[T LogRecord[T]] interface {
|
|
SendBulk(ctx context.Context, bulk []T) error
|
|
}
|
|
|
|
type bulkSinkFunc[T LogRecord[T]] func(ctx context.Context, bulk []T) error
|
|
|
|
func (s bulkSinkFunc[T]) SendBulk(ctx context.Context, bulk []T) error {
|
|
return s(ctx, bulk)
|
|
}
|
|
|
|
type debouncer[T LogRecord[T]] struct {
|
|
// Storing context.Context in a struct is generally bad practice
|
|
// https://go.dev/blog/context-and-structs
|
|
// However, debouncer starts a go routine that triggers side effects itself.
|
|
// So, there is no incoming context.Context available when these events trigger.
|
|
// The only context we can use for the side effects is the app context.
|
|
// Because this can be cancelled by os signals, it's the better solution than creating new background contexts.
|
|
binarySignaledCtx context.Context
|
|
clock clock.Clock
|
|
ticker *clock.Ticker
|
|
mux sync.Mutex
|
|
cfg DebouncerConfig
|
|
storage bulkSink[T]
|
|
cache []T
|
|
cacheLen uint
|
|
}
|
|
|
|
type DebouncerConfig struct {
|
|
MinFrequency time.Duration
|
|
MaxBulkSize uint
|
|
}
|
|
|
|
func newDebouncer[T LogRecord[T]](binarySignaledCtx context.Context, cfg DebouncerConfig, clock clock.Clock, ship bulkSink[T]) *debouncer[T] {
|
|
a := &debouncer[T]{
|
|
binarySignaledCtx: binarySignaledCtx,
|
|
clock: clock,
|
|
cfg: cfg,
|
|
storage: ship,
|
|
}
|
|
|
|
if cfg.MinFrequency > 0 {
|
|
a.ticker = clock.Ticker(cfg.MinFrequency)
|
|
go a.shipOnTicks()
|
|
}
|
|
return a
|
|
}
|
|
|
|
func (d *debouncer[T]) add(item T) {
|
|
d.mux.Lock()
|
|
defer d.mux.Unlock()
|
|
d.cache = append(d.cache, item)
|
|
d.cacheLen++
|
|
if d.cfg.MaxBulkSize > 0 && d.cacheLen >= d.cfg.MaxBulkSize {
|
|
// Add should not block and release the lock
|
|
go d.ship()
|
|
}
|
|
}
|
|
|
|
func (d *debouncer[T]) ship() {
|
|
if d.cacheLen == 0 {
|
|
return
|
|
}
|
|
d.mux.Lock()
|
|
defer d.mux.Unlock()
|
|
if err := d.storage.SendBulk(d.binarySignaledCtx, d.cache); err != nil {
|
|
logging.WithError(err).WithField("size", len(d.cache)).Error("storing bulk failed")
|
|
}
|
|
d.cache = nil
|
|
d.cacheLen = 0
|
|
if d.cfg.MinFrequency > 0 {
|
|
d.ticker.Reset(d.cfg.MinFrequency)
|
|
}
|
|
}
|
|
|
|
func (d *debouncer[T]) shipOnTicks() {
|
|
for range d.ticker.C {
|
|
d.ship()
|
|
}
|
|
}
|