This commit is contained in:
adlerhurst
2025-05-08 07:42:53 +02:00
parent 6ba86bc67b
commit 8ba497cb87
166 changed files with 700 additions and 10922 deletions

View File

@@ -1,56 +0,0 @@
package grpc
import (
"context"
"github.com/zitadel/zitadel/backend/command/command"
"github.com/zitadel/zitadel/backend/command/query"
"github.com/zitadel/zitadel/backend/command/receiver"
"github.com/zitadel/zitadel/backend/command/receiver/cache"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/telemetry/logging"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
type api struct {
db database.Pool
manipulator receiver.InstanceManipulator
reader receiver.InstanceReader
tracer *tracing.Tracer
logger *logging.Logger
cache cache.Cache[receiver.InstanceIndex, string, *receiver.Instance]
}
func (a *api) CreateInstance(ctx context.Context) error {
instance := &receiver.Instance{
ID: "123",
Name: "test",
}
return command.Trace(
a.tracer,
command.SetCache(a.cache,
command.Activity(a.logger, command.CreateInstance(a.manipulator, instance)),
instance,
),
).Execute(ctx)
}
func (a *api) DeleteInstance(ctx context.Context) error {
return command.Trace(
a.tracer,
command.DeleteCache(a.cache,
command.Activity(
a.logger,
command.DeleteInstance(a.manipulator, &receiver.Instance{
ID: "123",
})),
receiver.InstanceByID,
"123",
)).Execute(ctx)
}
func (a *api) InstanceByID(ctx context.Context) (*receiver.Instance, error) {
q := query.InstanceByID(a.reader, "123")
return q.Execute(ctx)
}

View File

@@ -1,102 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver/cache"
)
type setCache[I, K comparable, V cache.Entry[I, K]] struct {
cache cache.Cache[I, K, V]
command Command
entry V
}
// SetCache decorates the command, if the command is executed without error it will set the cache entry.
func SetCache[I, K comparable, V cache.Entry[I, K]](cache cache.Cache[I, K, V], command Command, entry V) Command {
return &setCache[I, K, V]{
cache: cache,
command: command,
entry: entry,
}
}
var _ Command = (*setCache[any, any, cache.Entry[any, any]])(nil)
// Execute implements [Command].
func (s *setCache[I, K, V]) Execute(ctx context.Context) error {
if err := s.command.Execute(ctx); err != nil {
return err
}
s.cache.Set(ctx, s.entry)
return nil
}
// Name implements [Command].
func (s *setCache[I, K, V]) Name() string {
return s.command.Name()
}
type deleteCache[I, K comparable, V cache.Entry[I, K]] struct {
cache cache.Cache[I, K, V]
command Command
index I
keys []K
}
// DeleteCache decorates the command, if the command is executed without error it will delete the cache entry.
func DeleteCache[I, K comparable, V cache.Entry[I, K]](cache cache.Cache[I, K, V], command Command, index I, keys ...K) Command {
return &deleteCache[I, K, V]{
cache: cache,
command: command,
index: index,
keys: keys,
}
}
var _ Command = (*deleteCache[any, any, cache.Entry[any, any]])(nil)
// Execute implements [Command].
func (s *deleteCache[I, K, V]) Execute(ctx context.Context) error {
if err := s.command.Execute(ctx); err != nil {
return err
}
return s.cache.Delete(ctx, s.index, s.keys...)
}
// Name implements [Command].
func (s *deleteCache[I, K, V]) Name() string {
return s.command.Name()
}
type invalidateCache[I, K comparable, V cache.Entry[I, K]] struct {
cache cache.Cache[I, K, V]
command Command
index I
keys []K
}
// InvalidateCache decorates the command, if the command is executed without error it will invalidate the cache entry.
func InvalidateCache[I, K comparable, V cache.Entry[I, K]](cache cache.Cache[I, K, V], command Command, index I, keys ...K) Command {
return &invalidateCache[I, K, V]{
cache: cache,
command: command,
index: index,
keys: keys,
}
}
var _ Command = (*invalidateCache[any, any, cache.Entry[any, any]])(nil)
// Execute implements [Command].
func (s *invalidateCache[I, K, V]) Execute(ctx context.Context) error {
if err := s.command.Execute(ctx); err != nil {
return err
}
return s.cache.Invalidate(ctx, s.index, s.keys...)
}
// Name implements [Command].
func (s *invalidateCache[I, K, V]) Name() string {
return s.command.Name()
}

View File

@@ -1,31 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver/cache"
)
type Command interface {
Execute(context.Context) error
Name() string
}
type Batch struct {
commands []Command
}
func (b *Batch) Execute(ctx context.Context) error {
for _, command := range b.commands {
if err := command.Execute(ctx); err != nil {
// TODO: undo?
return err
}
}
return nil
}
type CacheableCommand[I, K comparable, V cache.Entry[I, K]] interface {
Command
Entry() V
}

View File

@@ -1 +0,0 @@
package command

View File

@@ -1,33 +0,0 @@
package command
import (
"slices"
"github.com/zitadel/zitadel/backend/command/receiver"
)
type SetPrimaryDomain struct {
Domains []*receiver.Domain
Domain string
}
func (s *SetPrimaryDomain) Execute() error {
for domain := range slices.Values(s.Domains) {
domain.IsPrimary = domain.Name == s.Domain
}
return nil
}
type RemoveDomain struct {
Domains []*receiver.Domain
Domain string
}
func (r *RemoveDomain) Execute() error {
r.Domains = slices.DeleteFunc(r.Domains, func(domain *receiver.Domain) bool {
return domain.Name == r.Domain
})
return nil
}

View File

@@ -1,97 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver"
)
type createInstance struct {
receiver receiver.InstanceManipulator
*receiver.Instance
}
func CreateInstance(receiver receiver.InstanceManipulator, instance *receiver.Instance) *createInstance {
return &createInstance{
Instance: instance,
receiver: receiver,
}
}
func (c *createInstance) Execute(ctx context.Context) error {
c.State = receiver.InstanceStateActive
return c.receiver.Create(ctx, c.Instance)
}
func (c *createInstance) Name() string {
return "CreateInstance"
}
type deleteInstance struct {
receiver receiver.InstanceManipulator
*receiver.Instance
}
func DeleteInstance(receiver receiver.InstanceManipulator, instance *receiver.Instance) *deleteInstance {
return &deleteInstance{
Instance: instance,
receiver: receiver,
}
}
func (d *deleteInstance) Execute(ctx context.Context) error {
return d.receiver.Delete(ctx, d.Instance)
}
func (c *deleteInstance) Name() string {
return "DeleteInstance"
}
type updateInstance struct {
receiver receiver.InstanceManipulator
*receiver.Instance
name string
}
func UpdateInstance(receiver receiver.InstanceManipulator, instance *receiver.Instance, name string) *updateInstance {
return &updateInstance{
Instance: instance,
receiver: receiver,
name: name,
}
}
func (u *updateInstance) Execute(ctx context.Context) error {
u.Instance.Name = u.name
// return u.receiver.Update(ctx, u.Instance)
return nil
}
func (c *updateInstance) Name() string {
return "UpdateInstance"
}
type addDomain struct {
receiver receiver.InstanceManipulator
*receiver.Instance
*receiver.Domain
}
func AddDomain(receiver receiver.InstanceManipulator, instance *receiver.Instance, domain *receiver.Domain) *addDomain {
return &addDomain{
Instance: instance,
Domain: domain,
receiver: receiver,
}
}
func (a *addDomain) Execute(ctx context.Context) error {
return a.receiver.AddDomain(ctx, a.Instance, a.Domain)
}
func (c *addDomain) Name() string {
return "AddDomain"
}

View File

@@ -1,44 +0,0 @@
package command
import (
"context"
"log/slog"
"time"
"github.com/zitadel/zitadel/backend/telemetry/logging"
)
type logger struct {
level slog.Level
*logging.Logger
cmd Command
}
// Activity decorates the commands execute method with logging.
// It logs the command name, duration, and success or failure of the command.
func Activity(l *logging.Logger, command Command) Command {
return &logger{
Logger: l.With(slog.String("type", "activity")),
level: slog.LevelInfo,
cmd: command,
}
}
// Name implements [Command].
func (l *logger) Name() string {
return l.cmd.Name()
}
func (l *logger) Execute(ctx context.Context) error {
start := time.Now()
log := l.Logger.With(slog.String("command", l.cmd.Name()))
log.InfoContext(ctx, "execute")
err := l.cmd.Execute(ctx)
log = log.With(slog.Duration("took", time.Since(start)))
if err != nil {
log.Log(ctx, l.level, "failed", slog.Any("cause", err))
return err
}
log.Log(ctx, l.level, "successful")
return nil
}

View File

@@ -1,37 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
type trace struct {
command Command
tracer *tracing.Tracer
}
// Trace decorates the commands execute method with tracing.
// It creates a span with the command name and records any errors that occur during execution.
// The span is ended after the command is executed.
func Trace(tracer *tracing.Tracer, command Command) Command {
return &trace{
command: command,
tracer: tracer,
}
}
// Name implements [Command].
func (l *trace) Name() string {
return l.command.Name()
}
func (t *trace) Execute(ctx context.Context) error {
ctx, span := t.tracer.Start(ctx, t.command.Name())
defer span.End()
err := t.command.Execute(ctx)
if err != nil {
span.RecordError(err)
}
return err
}

View File

@@ -1,46 +0,0 @@
package command
import "github.com/zitadel/zitadel/backend/command/receiver"
type ChangeUsername struct {
*receiver.User
Username string
}
func (c *ChangeUsername) Execute() error {
c.User.Username = c.Username
return nil
}
func (c *ChangeUsername) Name() string {
return "ChangeUsername"
}
type SetEmail struct {
*receiver.User
*receiver.Email
}
func (s *SetEmail) Execute() error {
s.User.Email = s.Email
return nil
}
func (s *SetEmail) Name() string {
return "SetEmail"
}
type SetPhone struct {
*receiver.User
*receiver.Phone
}
func (s *SetPhone) Execute() error {
s.User.Phone = s.Phone
return nil
}
func (s *SetPhone) Name() string {
return "SetPhone"
}

View File

@@ -1,32 +0,0 @@
package query
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver"
)
type instanceByID struct {
receiver receiver.InstanceReader
id string
}
// InstanceByID returns a new instanceByID query.
func InstanceByID(receiver receiver.InstanceReader, id string) *instanceByID {
return &instanceByID{
receiver: receiver,
id: id,
}
}
// Execute implements Query.
func (i *instanceByID) Execute(ctx context.Context) (*receiver.Instance, error) {
return i.receiver.ByID(ctx, i.id)
}
// Name implements Query.
func (i *instanceByID) Name() string {
return "instanceByID"
}
var _ Query[*receiver.Instance] = (*instanceByID)(nil)

View File

@@ -1,8 +0,0 @@
package query
import "context"
type Query[T any] interface {
Execute(ctx context.Context) (T, error)
Name() string
}

View File

@@ -1,112 +0,0 @@
// Package cache provides abstraction of cache implementations that can be used by zitadel.
package cache
import (
"context"
"time"
"github.com/zitadel/logging"
)
// Purpose describes which object types are stored by a cache.
type Purpose int
//go:generate enumer -type Purpose -transform snake -trimprefix Purpose
const (
PurposeUnspecified Purpose = iota
PurposeAuthzInstance
PurposeMilestones
PurposeOrganization
PurposeIdPFormCallback
)
// Cache stores objects with a value of type `V`.
// Objects may be referred to by one or more indices.
// Implementations may encode the value for storage.
// This means non-exported fields may be lost and objects
// with function values may fail to encode.
// See https://pkg.go.dev/encoding/json#Marshal for example.
//
// `I` is the type by which indices are identified,
// typically an enum for type-safe access.
// Indices are defined when calling the constructor of an implementation of this interface.
// It is illegal to refer to an idex not defined during construction.
//
// `K` is the type used as key in each index.
// Due to the limitations in type constraints, all indices use the same key type.
//
// Implementations are free to use stricter type constraints or fixed typing.
type Cache[I, K comparable, V Entry[I, K]] interface {
// Get an object through specified index.
// An [IndexUnknownError] may be returned if the index is unknown.
// [ErrCacheMiss] is returned if the key was not found in the index,
// or the object is not valid.
Get(ctx context.Context, index I, key K) (V, bool)
// Set an object.
// Keys are created on each index based in the [Entry.Keys] method.
// If any key maps to an existing object, the object is invalidated,
// regardless if the object has other keys defined in the new entry.
// This to prevent ghost objects when an entry reduces the amount of keys
// for a given index.
Set(ctx context.Context, value V)
// Invalidate an object through specified index.
// Implementations may choose to instantly delete the object,
// defer until prune or a separate cleanup routine.
// Invalidated object are no longer returned from Get.
// It is safe to call Invalidate multiple times or on non-existing entries.
Invalidate(ctx context.Context, index I, key ...K) error
// Delete one or more keys from a specific index.
// An [IndexUnknownError] may be returned if the index is unknown.
// The referred object is not invalidated and may still be accessible though
// other indices and keys.
// It is safe to call Delete multiple times or on non-existing entries
Delete(ctx context.Context, index I, key ...K) error
// Truncate deletes all cached objects.
Truncate(ctx context.Context) error
}
// Entry contains a value of type `V` to be cached.
//
// `I` is the type by which indices are identified,
// typically an enum for type-safe access.
//
// `K` is the type used as key in an index.
// Due to the limitations in type constraints, all indices use the same key type.
type Entry[I, K comparable] interface {
// Keys returns which keys map to the object in a specified index.
// May return nil if the index in unknown or when there are no keys.
Keys(index I) (key []K)
}
type Connector int
//go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text
const (
// Empty line comment ensures empty string for unspecified value
ConnectorUnspecified Connector = iota //
ConnectorMemory
ConnectorPostgres
ConnectorRedis
)
type Config struct {
Connector Connector
// Age since an object was added to the cache,
// after which the object is considered invalid.
// 0 disables max age checks.
MaxAge time.Duration
// Age since last use (Get) of an object,
// after which the object is considered invalid.
// 0 disables last use age checks.
LastUseAge time.Duration
// Log allows logging of the specific cache.
// By default only errors are logged to stdout.
Log *logging.Config
}

View File

@@ -1,49 +0,0 @@
// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages.
package connector
import (
"context"
"fmt"
"github.com/zitadel/zitadel/backend/storage/cache"
"github.com/zitadel/zitadel/backend/storage/cache/connector/gomap"
"github.com/zitadel/zitadel/backend/storage/cache/connector/noop"
)
type CachesConfig struct {
Connectors struct {
Memory gomap.Config
}
Instance *cache.Config
Milestones *cache.Config
Organization *cache.Config
IdPFormCallbacks *cache.Config
}
type Connectors struct {
Config CachesConfig
Memory *gomap.Connector
}
func StartConnectors(conf *CachesConfig) (Connectors, error) {
if conf == nil {
return Connectors{}, nil
}
return Connectors{
Config: *conf,
Memory: gomap.NewConnector(conf.Connectors.Memory),
}, nil
}
func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) {
if conf == nil || conf.Connector == cache.ConnectorUnspecified {
return noop.NewCache[I, K, V](), nil
}
if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil {
c := gomap.NewCache[I, K, V](background, indices, *conf)
connectors.Memory.Config.StartAutoPrune(background, c, purpose)
return c, nil
}
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
}

View File

@@ -1,23 +0,0 @@
package gomap
import (
"github.com/zitadel/zitadel/backend/storage/cache"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
Config cache.AutoPruneConfig
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Config: config.AutoPrune,
}
}

View File

@@ -1,200 +0,0 @@
package gomap
import (
"context"
"errors"
"log/slog"
"maps"
"os"
"sync"
"sync/atomic"
"time"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type mapCache[I, K comparable, V cache.Entry[I, K]] struct {
config *cache.Config
indexMap map[I]*index[K, V]
logger *slog.Logger
}
// NewCache returns an in-memory Cache implementation based on the builtin go map type.
// Object values are stored as-is and there is no encoding or decoding involved.
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] {
m := &mapCache[I, K, V]{
config: &config,
indexMap: make(map[I]*index[K, V], len(indices)),
logger: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
AddSource: true,
Level: slog.LevelError,
})),
}
if config.Log != nil {
m.logger = config.Log.Slog()
}
m.logger.InfoContext(background, "map cache logging enabled")
for _, name := range indices {
m.indexMap[name] = &index[K, V]{
config: m.config,
entries: make(map[K]*entry[V]),
}
}
return m
}
func (c *mapCache[I, K, V]) Get(ctx context.Context, index I, key K) (value V, ok bool) {
i, ok := c.indexMap[index]
if !ok {
c.logger.ErrorContext(ctx, "map cache get", "err", cache.NewIndexUnknownErr(index), "index", index, "key", key)
return value, false
}
entry, err := i.Get(key)
if err == nil {
c.logger.DebugContext(ctx, "map cache get", "index", index, "key", key)
return entry.value, true
}
if errors.Is(err, cache.ErrCacheMiss) {
c.logger.InfoContext(ctx, "map cache get", "err", err, "index", index, "key", key)
return value, false
}
c.logger.ErrorContext(ctx, "map cache get", "err", cache.NewIndexUnknownErr(index), "index", index, "key", key)
return value, false
}
func (c *mapCache[I, K, V]) Set(ctx context.Context, value V) {
now := time.Now()
entry := &entry[V]{
value: value,
created: now,
}
entry.lastUse.Store(now.UnixMicro())
for name, i := range c.indexMap {
keys := value.Keys(name)
i.Set(keys, entry)
c.logger.DebugContext(ctx, "map cache set", "index", name, "keys", keys)
}
}
func (c *mapCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) error {
i, ok := c.indexMap[index]
if !ok {
return cache.NewIndexUnknownErr(index)
}
i.Invalidate(keys)
c.logger.DebugContext(ctx, "map cache invalidate", "index", index, "keys", keys)
return nil
}
func (c *mapCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) error {
i, ok := c.indexMap[index]
if !ok {
return cache.NewIndexUnknownErr(index)
}
i.Delete(keys)
c.logger.DebugContext(ctx, "map cache delete", "index", index, "keys", keys)
return nil
}
func (c *mapCache[I, K, V]) Prune(ctx context.Context) error {
for name, index := range c.indexMap {
index.Prune()
c.logger.DebugContext(ctx, "map cache prune", "index", name)
}
return nil
}
func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
for name, index := range c.indexMap {
index.Truncate()
c.logger.DebugContext(ctx, "map cache truncate", "index", name)
}
return nil
}
type index[K comparable, V any] struct {
mutex sync.RWMutex
config *cache.Config
entries map[K]*entry[V]
}
func (i *index[K, V]) Get(key K) (*entry[V], error) {
i.mutex.RLock()
entry, ok := i.entries[key]
i.mutex.RUnlock()
if ok && entry.isValid(i.config) {
return entry, nil
}
return nil, cache.ErrCacheMiss
}
func (c *index[K, V]) Set(keys []K, entry *entry[V]) {
c.mutex.Lock()
for _, key := range keys {
c.entries[key] = entry
}
c.mutex.Unlock()
}
func (i *index[K, V]) Invalidate(keys []K) {
i.mutex.RLock()
for _, key := range keys {
if entry, ok := i.entries[key]; ok {
entry.invalid.Store(true)
}
}
i.mutex.RUnlock()
}
func (c *index[K, V]) Delete(keys []K) {
c.mutex.Lock()
for _, key := range keys {
delete(c.entries, key)
}
c.mutex.Unlock()
}
func (c *index[K, V]) Prune() {
c.mutex.Lock()
maps.DeleteFunc(c.entries, func(_ K, entry *entry[V]) bool {
return !entry.isValid(c.config)
})
c.mutex.Unlock()
}
func (c *index[K, V]) Truncate() {
c.mutex.Lock()
c.entries = make(map[K]*entry[V])
c.mutex.Unlock()
}
type entry[V any] struct {
value V
created time.Time
invalid atomic.Bool
lastUse atomic.Int64 // UnixMicro time
}
func (e *entry[V]) isValid(c *cache.Config) bool {
if e.invalid.Load() {
return false
}
now := time.Now()
if c.MaxAge > 0 {
if e.created.Add(c.MaxAge).Before(now) {
e.invalid.Store(true)
return false
}
}
if c.LastUseAge > 0 {
lastUse := e.lastUse.Load()
if time.UnixMicro(lastUse).Add(c.LastUseAge).Before(now) {
e.invalid.Store(true)
return false
}
e.lastUse.CompareAndSwap(lastUse, now.UnixMicro())
}
return true
}

View File

@@ -1,329 +0,0 @@
package gomap
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type testIndex int
const (
testIndexID testIndex = iota
testIndexName
)
var testIndices = []testIndex{
testIndexID,
testIndexName,
}
type testObject struct {
id string
names []string
}
func (o *testObject) Keys(index testIndex) []string {
switch index {
case testIndexID:
return []string{o.id}
case testIndexName:
return o.names
default:
return nil
}
}
func Test_mapCache_Get(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
type args struct {
index testIndex
key string
}
tests := []struct {
name string
args args
want *testObject
wantOk bool
}{
{
name: "ok",
args: args{
index: testIndexID,
key: "id",
},
want: obj,
wantOk: true,
},
{
name: "miss",
args: args{
index: testIndexID,
key: "spanac",
},
want: nil,
wantOk: false,
},
{
name: "unknown index",
args: args{
index: 99,
key: "id",
},
want: nil,
wantOk: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, ok := c.Get(context.Background(), tt.args.index, tt.args.key)
assert.Equal(t, tt.want, got)
assert.Equal(t, tt.wantOk, ok)
})
}
}
func Test_mapCache_Invalidate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
err := c.Invalidate(context.Background(), testIndexName, "bar")
require.NoError(t, err)
got, ok := c.Get(context.Background(), testIndexID, "id")
assert.Nil(t, got)
assert.False(t, ok)
}
func Test_mapCache_Delete(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
err := c.Delete(context.Background(), testIndexName, "bar")
require.NoError(t, err)
// Shouldn't find object by deleted name
got, ok := c.Get(context.Background(), testIndexName, "bar")
assert.Nil(t, got)
assert.False(t, ok)
// Should find object by other name
got, ok = c.Get(context.Background(), testIndexName, "foo")
assert.Equal(t, obj, got)
assert.True(t, ok)
// Should find object by id
got, ok = c.Get(context.Background(), testIndexID, "id")
assert.Equal(t, obj, got)
assert.True(t, ok)
}
func Test_mapCache_Prune(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
objects := []*testObject{
{
id: "id1",
names: []string{"foo", "bar"},
},
{
id: "id2",
names: []string{"hello"},
},
}
for _, obj := range objects {
c.Set(context.Background(), obj)
}
// invalidate one entry
err := c.Invalidate(context.Background(), testIndexName, "bar")
require.NoError(t, err)
err = c.(cache.Pruner).Prune(context.Background())
require.NoError(t, err)
// Other object should still be found
got, ok := c.Get(context.Background(), testIndexID, "id2")
assert.Equal(t, objects[1], got)
assert.True(t, ok)
}
func Test_mapCache_Truncate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
objects := []*testObject{
{
id: "id1",
names: []string{"foo", "bar"},
},
{
id: "id2",
names: []string{"hello"},
},
}
for _, obj := range objects {
c.Set(context.Background(), obj)
}
err := c.Truncate(context.Background())
require.NoError(t, err)
mc := c.(*mapCache[testIndex, string, *testObject])
for _, index := range mc.indexMap {
index.mutex.RLock()
assert.Len(t, index.entries, 0)
index.mutex.RUnlock()
}
}
func Test_entry_isValid(t *testing.T) {
type fields struct {
created time.Time
invalid bool
lastUse time.Time
}
tests := []struct {
name string
fields fields
config *cache.Config
want bool
}{
{
name: "invalid",
fields: fields{
created: time.Now(),
invalid: true,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "max age exceeded",
fields: fields{
created: time.Now().Add(-(time.Minute + time.Second)),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "max age disabled",
fields: fields{
created: time.Now().Add(-(time.Minute + time.Second)),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
LastUseAge: time.Second,
},
want: true,
},
{
name: "last use age exceeded",
fields: fields{
created: time.Now().Add(-(time.Minute / 2)),
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "last use age disabled",
fields: fields{
created: time.Now().Add(-(time.Minute / 2)),
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.Config{
MaxAge: time.Minute,
},
want: true,
},
{
name: "valid",
fields: fields{
created: time.Now(),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &entry[any]{
created: tt.fields.created,
}
e.invalid.Store(tt.fields.invalid)
e.lastUse.Store(tt.fields.lastUse.UnixMicro())
got := e.isValid(tt.config)
assert.Equal(t, tt.want, got)
})
}
}

View File

@@ -1,21 +0,0 @@
package noop
import (
"context"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type noop[I, K comparable, V cache.Entry[I, K]] struct{}
// NewCache returns a cache that does nothing
func NewCache[I, K comparable, V cache.Entry[I, K]]() cache.Cache[I, K, V] {
return noop[I, K, V]{}
}
func (noop[I, K, V]) Set(context.Context, V) {}
func (noop[I, K, V]) Get(context.Context, I, K) (value V, ok bool) { return }
func (noop[I, K, V]) Invalidate(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Delete(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Prune(context.Context) (err error) { return }
func (noop[I, K, V]) Truncate(context.Context) (err error) { return }

View File

@@ -1,98 +0,0 @@
// Code generated by "enumer -type Connector -transform snake -trimprefix Connector -linecomment -text"; DO NOT EDIT.
package cache
import (
"fmt"
"strings"
)
const _ConnectorName = "memorypostgresredis"
var _ConnectorIndex = [...]uint8{0, 0, 6, 14, 19}
const _ConnectorLowerName = "memorypostgresredis"
func (i Connector) String() string {
if i < 0 || i >= Connector(len(_ConnectorIndex)-1) {
return fmt.Sprintf("Connector(%d)", i)
}
return _ConnectorName[_ConnectorIndex[i]:_ConnectorIndex[i+1]]
}
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
func _ConnectorNoOp() {
var x [1]struct{}
_ = x[ConnectorUnspecified-(0)]
_ = x[ConnectorMemory-(1)]
_ = x[ConnectorPostgres-(2)]
_ = x[ConnectorRedis-(3)]
}
var _ConnectorValues = []Connector{ConnectorUnspecified, ConnectorMemory, ConnectorPostgres, ConnectorRedis}
var _ConnectorNameToValueMap = map[string]Connector{
_ConnectorName[0:0]: ConnectorUnspecified,
_ConnectorLowerName[0:0]: ConnectorUnspecified,
_ConnectorName[0:6]: ConnectorMemory,
_ConnectorLowerName[0:6]: ConnectorMemory,
_ConnectorName[6:14]: ConnectorPostgres,
_ConnectorLowerName[6:14]: ConnectorPostgres,
_ConnectorName[14:19]: ConnectorRedis,
_ConnectorLowerName[14:19]: ConnectorRedis,
}
var _ConnectorNames = []string{
_ConnectorName[0:0],
_ConnectorName[0:6],
_ConnectorName[6:14],
_ConnectorName[14:19],
}
// ConnectorString retrieves an enum value from the enum constants string name.
// Throws an error if the param is not part of the enum.
func ConnectorString(s string) (Connector, error) {
if val, ok := _ConnectorNameToValueMap[s]; ok {
return val, nil
}
if val, ok := _ConnectorNameToValueMap[strings.ToLower(s)]; ok {
return val, nil
}
return 0, fmt.Errorf("%s does not belong to Connector values", s)
}
// ConnectorValues returns all values of the enum
func ConnectorValues() []Connector {
return _ConnectorValues
}
// ConnectorStrings returns a slice of all String values of the enum
func ConnectorStrings() []string {
strs := make([]string, len(_ConnectorNames))
copy(strs, _ConnectorNames)
return strs
}
// IsAConnector returns "true" if the value is listed in the enum definition. "false" otherwise
func (i Connector) IsAConnector() bool {
for _, v := range _ConnectorValues {
if i == v {
return true
}
}
return false
}
// MarshalText implements the encoding.TextMarshaler interface for Connector
func (i Connector) MarshalText() ([]byte, error) {
return []byte(i.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface for Connector
func (i *Connector) UnmarshalText(text []byte) error {
var err error
*i, err = ConnectorString(string(text))
return err
}

View File

@@ -1,29 +0,0 @@
package cache
import (
"errors"
"fmt"
)
type IndexUnknownError[I comparable] struct {
index I
}
func NewIndexUnknownErr[I comparable](index I) error {
return IndexUnknownError[I]{index}
}
func (i IndexUnknownError[I]) Error() string {
return fmt.Sprintf("index %v unknown", i.index)
}
func (a IndexUnknownError[I]) Is(err error) bool {
if b, ok := err.(IndexUnknownError[I]); ok {
return a.index == b.index
}
return false
}
var (
ErrCacheMiss = errors.New("cache miss")
)

View File

@@ -1,76 +0,0 @@
package cache
import (
"context"
"math/rand"
"time"
"github.com/jonboulle/clockwork"
"github.com/zitadel/logging"
)
// Pruner is an optional [Cache] interface.
type Pruner interface {
// Prune deletes all invalidated or expired objects.
Prune(ctx context.Context) error
}
type PrunerCache[I, K comparable, V Entry[I, K]] interface {
Cache[I, K, V]
Pruner
}
type AutoPruneConfig struct {
// Interval at which the cache is automatically pruned.
// 0 or lower disables automatic pruning.
Interval time.Duration
// Timeout for an automatic prune.
// It is recommended to keep the value shorter than AutoPruneInterval
// 0 or lower disables automatic pruning.
Timeout time.Duration
}
func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, purpose Purpose) (close func()) {
return c.startAutoPrune(background, pruner, purpose, clockwork.NewRealClock())
}
func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, purpose Purpose, clock clockwork.Clock) (close func()) {
if c.Interval <= 0 {
return func() {}
}
background, cancel := context.WithCancel(background)
// randomize the first interval
timer := clock.NewTimer(time.Duration(rand.Int63n(int64(c.Interval))))
go c.pruneTimer(background, pruner, purpose, timer)
return cancel
}
func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, purpose Purpose, timer clockwork.Timer) {
defer func() {
if !timer.Stop() {
<-timer.Chan()
}
}()
for {
select {
case <-background.Done():
return
case <-timer.Chan():
err := c.doPrune(background, pruner)
logging.OnError(err).WithField("purpose", purpose).Error("cache auto prune")
timer.Reset(c.Interval)
}
}
}
func (c *AutoPruneConfig) doPrune(background context.Context, pruner Pruner) error {
ctx, cancel := context.WithCancel(background)
defer cancel()
if c.Timeout > 0 {
ctx, cancel = context.WithTimeout(background, c.Timeout)
defer cancel()
}
return pruner.Prune(ctx)
}

View File

@@ -1,43 +0,0 @@
package cache
import (
"context"
"testing"
"time"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/assert"
)
type testPruner struct {
called chan struct{}
}
func (p *testPruner) Prune(context.Context) error {
p.called <- struct{}{}
return nil
}
func TestAutoPruneConfig_startAutoPrune(t *testing.T) {
c := AutoPruneConfig{
Interval: time.Second,
Timeout: time.Millisecond,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
pruner := testPruner{
called: make(chan struct{}),
}
clock := clockwork.NewFakeClock()
close := c.startAutoPrune(ctx, &pruner, PurposeAuthzInstance, clock)
defer close()
clock.Advance(time.Second)
select {
case _, ok := <-pruner.called:
assert.True(t, ok)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}

View File

@@ -1,90 +0,0 @@
// Code generated by "enumer -type Purpose -transform snake -trimprefix Purpose"; DO NOT EDIT.
package cache
import (
"fmt"
"strings"
)
const _PurposeName = "unspecifiedauthz_instancemilestonesorganizationid_p_form_callback"
var _PurposeIndex = [...]uint8{0, 11, 25, 35, 47, 65}
const _PurposeLowerName = "unspecifiedauthz_instancemilestonesorganizationid_p_form_callback"
func (i Purpose) String() string {
if i < 0 || i >= Purpose(len(_PurposeIndex)-1) {
return fmt.Sprintf("Purpose(%d)", i)
}
return _PurposeName[_PurposeIndex[i]:_PurposeIndex[i+1]]
}
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
func _PurposeNoOp() {
var x [1]struct{}
_ = x[PurposeUnspecified-(0)]
_ = x[PurposeAuthzInstance-(1)]
_ = x[PurposeMilestones-(2)]
_ = x[PurposeOrganization-(3)]
_ = x[PurposeIdPFormCallback-(4)]
}
var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones, PurposeOrganization, PurposeIdPFormCallback}
var _PurposeNameToValueMap = map[string]Purpose{
_PurposeName[0:11]: PurposeUnspecified,
_PurposeLowerName[0:11]: PurposeUnspecified,
_PurposeName[11:25]: PurposeAuthzInstance,
_PurposeLowerName[11:25]: PurposeAuthzInstance,
_PurposeName[25:35]: PurposeMilestones,
_PurposeLowerName[25:35]: PurposeMilestones,
_PurposeName[35:47]: PurposeOrganization,
_PurposeLowerName[35:47]: PurposeOrganization,
_PurposeName[47:65]: PurposeIdPFormCallback,
_PurposeLowerName[47:65]: PurposeIdPFormCallback,
}
var _PurposeNames = []string{
_PurposeName[0:11],
_PurposeName[11:25],
_PurposeName[25:35],
_PurposeName[35:47],
_PurposeName[47:65],
}
// PurposeString retrieves an enum value from the enum constants string name.
// Throws an error if the param is not part of the enum.
func PurposeString(s string) (Purpose, error) {
if val, ok := _PurposeNameToValueMap[s]; ok {
return val, nil
}
if val, ok := _PurposeNameToValueMap[strings.ToLower(s)]; ok {
return val, nil
}
return 0, fmt.Errorf("%s does not belong to Purpose values", s)
}
// PurposeValues returns all values of the enum
func PurposeValues() []Purpose {
return _PurposeValues
}
// PurposeStrings returns a slice of all String values of the enum
func PurposeStrings() []string {
strs := make([]string, len(_PurposeNames))
copy(strs, _PurposeNames)
return strs
}
// IsAPurpose returns "true" if the value is listed in the enum definition. "false" otherwise
func (i Purpose) IsAPurpose() bool {
for _, v := range _PurposeValues {
if i == v {
return true
}
}
return false
}

View File

@@ -1,58 +0,0 @@
package db
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver"
"github.com/zitadel/zitadel/backend/storage/database"
)
// NewInstance returns a new instance receiver.
func NewInstance(client database.QueryExecutor) receiver.InstanceManipulator {
return &instance{client: client}
}
// instance is the sql interface for instances.
type instance struct {
client database.QueryExecutor
}
// ByID implements receiver.InstanceReader.
func (i *instance) ByID(ctx context.Context, id string) (*receiver.Instance, error) {
var instance receiver.Instance
err := i.client.QueryRow(ctx, "SELECT id, name, state FROM instances WHERE id = $1", id).
Scan(
&instance.ID,
&instance.Name,
&instance.State,
)
if err != nil {
return nil, err
}
return &instance, nil
}
// AddDomain implements [receiver.InstanceManipulator].
func (i *instance) AddDomain(ctx context.Context, instance *receiver.Instance, domain *receiver.Domain) error {
return i.client.Exec(ctx, "INSERT INTO instance_domains (instance_id, domain, is_primary) VALUES ($1, $2, $3)", instance.ID, domain.Name, domain.IsPrimary)
}
// Create implements [receiver.InstanceManipulator].
func (i *instance) Create(ctx context.Context, instance *receiver.Instance) error {
return i.client.Exec(ctx, "INSERT INTO instances (id, name, state) VALUES ($1, $2, $3)", instance.ID, instance.Name, instance.State)
}
// Delete implements [receiver.InstanceManipulator].
func (i *instance) Delete(ctx context.Context, instance *receiver.Instance) error {
return i.client.Exec(ctx, "DELETE FROM instances WHERE id = $1", instance.ID)
}
// SetPrimaryDomain implements [receiver.InstanceManipulator].
func (i *instance) SetPrimaryDomain(ctx context.Context, instance *receiver.Instance, domain *receiver.Domain) error {
return i.client.Exec(ctx, "UPDATE instance_domains SET is_primary = domain = $1 WHERE instance_id = $2", domain.Name, instance.ID)
}
var (
_ receiver.InstanceManipulator = (*instance)(nil)
_ receiver.InstanceReader = (*instance)(nil)
)

View File

@@ -1,6 +0,0 @@
package receiver
type Domain struct {
Name string
IsPrimary bool
}

View File

@@ -1,7 +0,0 @@
package receiver
type Email struct {
Verifiable
Address string
}

View File

@@ -1,57 +0,0 @@
package receiver
import (
"context"
"github.com/zitadel/zitadel/backend/command/receiver/cache"
)
type InstanceState uint8
const (
InstanceStateActive InstanceState = iota
InstanceStateDeleted
)
type Instance struct {
ID string
Name string
State InstanceState
Domains []*Domain
}
type InstanceIndex uint8
var InstanceIndices = []InstanceIndex{
InstanceByID,
InstanceByDomain,
}
const (
InstanceByID InstanceIndex = iota
InstanceByDomain
)
var _ cache.Entry[InstanceIndex, string] = (*Instance)(nil)
// Keys implements [cache.Entry].
func (i *Instance) Keys(index InstanceIndex) (key []string) {
switch index {
case InstanceByID:
return []string{i.ID}
case InstanceByDomain:
return []string{i.Name}
}
return nil
}
type InstanceManipulator interface {
Create(ctx context.Context, instance *Instance) error
Delete(ctx context.Context, instance *Instance) error
AddDomain(ctx context.Context, instance *Instance, domain *Domain) error
SetPrimaryDomain(ctx context.Context, instance *Instance, domain *Domain) error
}
type InstanceReader interface {
ByID(ctx context.Context, id string) (*Instance, error)
}

View File

@@ -1,7 +0,0 @@
package receiver
type Phone struct {
Verifiable
Number string
}

View File

@@ -1,9 +0,0 @@
package receiver
type User struct {
ID string
Username string
Email *Email
Phone *Phone
}

View File

@@ -1,8 +0,0 @@
package receiver
import "github.com/zitadel/zitadel/internal/crypto"
type Verifiable struct {
IsVerified bool
Code *crypto.CryptoValue
}

View File

@@ -1,6 +0,0 @@
// The API package implements the protobuf stubs
// It uses the Chain of responsibility pattern to handle requests in a modular way
// It implements the client or invoker of the command pattern.
// The client is responsible for creating the concrete command and setting its receiver.
package api

View File

@@ -1,35 +0,0 @@
package userv2
import (
"context"
"github.com/muhlemmer/gu"
"github.com/zitadel/zitadel/backend/command/v2/domain"
"github.com/zitadel/zitadel/pkg/grpc/user/v2"
)
func (s *Server) SetEmail(ctx context.Context, req *user.SetEmailRequest) (resp *user.SetEmailResponse, err error) {
request := &domain.SetUserEmail{
UserID: req.GetUserId(),
Email: req.GetEmail(),
}
switch req.GetVerification().(type) {
case *user.SetEmailRequest_IsVerified:
request.IsVerified = gu.Ptr(req.GetIsVerified())
case *user.SetEmailRequest_SendCode:
request.SendCode = &domain.SendCode{
URLTemplate: req.GetSendCode().UrlTemplate,
}
case *user.SetEmailRequest_ReturnCode:
request.ReturnCode = new(domain.ReturnCode)
}
if err := s.domain.SetUserEmail(ctx, request); err != nil {
return nil, err
}
response := new(user.SetEmailResponse)
if request.ReturnCode != nil {
response.VerificationCode = &request.ReturnCode.Code
}
return response, nil
}

View File

@@ -1,12 +0,0 @@
package userv2
import (
"go.opentelemetry.io/otel/trace"
"github.com/zitadel/zitadel/backend/command/v2/domain"
)
type Server struct {
tracer trace.Tracer
domain *domain.Domain
}

View File

@@ -1,41 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
"github.com/zitadel/zitadel/internal/crypto"
)
type generateCode struct {
set func(code string)
generator pattern.Query[crypto.Generator]
}
func GenerateCode(set func(code string), generator pattern.Query[crypto.Generator]) *generateCode {
return &generateCode{
set: set,
generator: generator,
}
}
var _ pattern.Command = (*generateCode)(nil)
// Execute implements [pattern.Command].
func (cmd *generateCode) Execute(ctx context.Context) error {
if err := cmd.generator.Execute(ctx); err != nil {
return err
}
value, code, err := crypto.NewCode(cmd.generator.Result())
_ = value
if err != nil {
return err
}
cmd.set(code)
return nil
}
// Name implements [pattern.Command].
func (*generateCode) Name() string {
return "command.generate_code"
}

View File

@@ -1,41 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
)
var _ pattern.Command = (*sendEmailCode)(nil)
type sendEmailCode struct {
UserID string `json:"userId"`
Email string `json:"email"`
URLTemplate *string `json:"urlTemplate"`
code string `json:"-"`
}
func SendEmailCode(userID, email string, urlTemplate *string) pattern.Command {
cmd := &sendEmailCode{
UserID: userID,
Email: email,
URLTemplate: urlTemplate,
}
return pattern.Batch(GenerateCode(cmd.SetCode, generateCode))
}
// Name implements [pattern.Command].
func (c *sendEmailCode) Name() string {
return "user.v2.email.send_code"
}
// Execute implements [pattern.Command].
func (c *sendEmailCode) Execute(ctx context.Context) error {
// Implementation of the command execution
return nil
}
func (c *sendEmailCode) SetCode(code string) {
c.code = code
}

View File

@@ -1,39 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/storage/eventstore"
)
var (
_ eventstore.EventCommander = (*setEmail)(nil)
)
type setEmail struct {
UserID string `json:"userId"`
Email string `json:"email"`
}
func SetEmail(userID, email string) *setEmail {
return &setEmail{
UserID: userID,
Email: email,
}
}
// Event implements [eventstore.EventCommander].
func (c *setEmail) Event() *eventstore.Event {
panic("unimplemented")
}
// Name implements [pattern.Command].
func (c *setEmail) Name() string {
return "user.v2.set_email"
}
// Execute implements [pattern.Command].
func (c *setEmail) Execute(ctx context.Context) error {
// Implementation of the command execution
return nil
}

View File

@@ -1,32 +0,0 @@
package command
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
)
var _ pattern.Command = (*verifyEmail)(nil)
type verifyEmail struct {
UserID string `json:"userId"`
Email string `json:"email"`
}
func VerifyEmail(userID, email string) *verifyEmail {
return &verifyEmail{
UserID: userID,
Email: email,
}
}
// Name implements [pattern.Command].
func (c *verifyEmail) Name() string {
return "user.v2.verify_email"
}
// Execute implements [pattern.Command].
func (c *verifyEmail) Execute(ctx context.Context) error {
// Implementation of the command execution
return nil
}

View File

@@ -1,13 +0,0 @@
package domain
import (
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
"github.com/zitadel/zitadel/internal/crypto"
"go.opentelemetry.io/otel/trace"
)
type Domain struct {
pool database.Pool
tracer trace.Tracer
userCodeAlg crypto.EncryptionAlgorithm
}

View File

@@ -1,6 +0,0 @@
package domain
type Email struct {
Address string
Verified bool
}

View File

@@ -1,42 +0,0 @@
package query
import (
"context"
"github.com/zitadel/zitadel/internal/crypto"
)
type encryptionConfigReceiver interface {
GetEncryptionConfig(ctx context.Context) (*crypto.GeneratorConfig, error)
}
type encryptionGenerator struct {
receiver encryptionConfigReceiver
algorithm crypto.EncryptionAlgorithm
res crypto.Generator
}
func QueryEncryptionGenerator(receiver encryptionConfigReceiver, algorithm crypto.EncryptionAlgorithm) *encryptionGenerator {
return &encryptionGenerator{
receiver: receiver,
algorithm: algorithm,
}
}
func (q *encryptionGenerator) Execute(ctx context.Context) error {
config, err := q.receiver.GetEncryptionConfig(ctx)
if err != nil {
return err
}
q.res = crypto.NewEncryptionGenerator(*config, q.algorithm)
return nil
}
func (q *encryptionGenerator) Name() string {
return "query.encryption_generator"
}
func (q *encryptionGenerator) Result() crypto.Generator {
return q.res
}

View File

@@ -1,38 +0,0 @@
package query
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
)
var _ pattern.Query[string] = (*returnEmailCode)(nil)
type returnEmailCode struct {
UserID string `json:"userId"`
Email string `json:"email"`
code string `json:"-"`
}
func ReturnEmailCode(userID, email string) *returnEmailCode {
return &returnEmailCode{
UserID: userID,
Email: email,
}
}
// Name implements [pattern.Command].
func (c *returnEmailCode) Name() string {
return "user.v2.email.return_code"
}
// Execute implements [pattern.Command].
func (c *returnEmailCode) Execute(ctx context.Context) error {
// Implementation of the command execution
return nil
}
// Result implements [pattern.Query].
func (c *returnEmailCode) Result() string {
return c.code
}

View File

@@ -1,38 +0,0 @@
package query
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/domain"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
type UserByIDQuery struct {
querier database.Querier
UserID string `json:"userId"`
res *domain.User
}
var _ pattern.Query[*domain.User] = (*UserByIDQuery)(nil)
// Name implements [pattern.Command].
func (q *UserByIDQuery) Name() string {
return "user.v2.by_id"
}
// Execute implements [pattern.Command].
func (q *UserByIDQuery) Execute(ctx context.Context) error {
var res *domain.User
err := q.querier.QueryRow(ctx, "SELECT id, username, email FROM users WHERE id = $1", q.UserID).Scan(&res.ID, &res.Username, &res.Email.Address)
if err != nil {
return err
}
q.res = res
return nil
}
// Result implements [pattern.Query].
func (q *UserByIDQuery) Result() *domain.User {
return q.res
}

View File

@@ -1,77 +0,0 @@
package domain
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/domain/command"
"github.com/zitadel/zitadel/backend/command/v2/domain/query"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
"github.com/zitadel/zitadel/backend/command/v2/telemetry/tracing"
)
type User struct {
ID string
Username string
Email Email
}
type SetUserEmail struct {
UserID string
Email string
IsVerified *bool
ReturnCode *ReturnCode
SendCode *SendCode
code string
client database.QueryExecutor
}
func (e *SetUserEmail) SetCode(code string) {
e.code = code
}
type ReturnCode struct {
// Code is the code to be sent to the user
Code string
}
type SendCode struct {
// URLTemplate is the template for the URL that is rendered into the message
URLTemplate *string
}
func (d *Domain) SetUserEmail(ctx context.Context, req *SetUserEmail) error {
batch := pattern.Batch(
tracing.Trace(d.tracer, command.SetEmail(req.UserID, req.Email)),
)
if req.IsVerified == nil {
batch.Append(command.GenerateCode(
req.SetCode,
query.QueryEncryptionGenerator(
database.Query(d.pool),
d.userCodeAlg,
),
))
} else {
batch.Append(command.VerifyEmail(req.UserID, req.Email))
}
// if !req.GetVerification().GetIsVerified() {
// batch.
// switch req.GetVerification().(type) {
// case *user.SetEmailRequest_IsVerified:
// batch.Append(tracing.Trace(s.tracer, command.VerifyEmail(req.GetUserId(), req.GetEmail())))
// case *user.SetEmailRequest_SendCode:
// batch.Append(tracing.Trace(s.tracer, command.SendEmailCode(req.GetUserId(), req.GetEmail(), req.GetSendCode().UrlTemplate)))
// case *user.SetEmailRequest_ReturnCode:
// batch.Append(tracing.Trace(s.tracer, query.ReturnEmailCode(req.GetUserId(), req.GetEmail())))
// }
// if err := batch.Execute(ctx); err != nil {
// return nil, err
// }
}

View File

@@ -1,100 +0,0 @@
package pattern
import (
"context"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
// Command implements the command pattern.
// It is used to encapsulate a request as an object, thereby allowing for parameterization of clients with queues, requests, and operations.
// The command pattern allows for the decoupling of the sender and receiver of a request.
// It is often used in conjunction with the invoker pattern, which is responsible for executing the command.
// The command pattern is a behavioral design pattern that turns a request into a stand-alone object.
// This object contains all the information about the request.
// The command pattern is useful for implementing undo/redo functionality, logging, and queuing requests.
// It is also useful for implementing the macro command pattern, which allows for the execution of a series of commands as a single command.
// The command pattern is also used in event-driven architectures, where events are encapsulated as commands.
type Command interface {
Execute(ctx context.Context) error
Name() string
}
type Query[T any] interface {
Command
Result() T
}
type Invoker struct{}
// func bla() {
// sync.Pool{
// New: func() any {
// return new(Invoker)
// },
// }
// }
type Transaction struct {
beginner database.Beginner
cmd Command
opts *database.TransactionOptions
}
func (t *Transaction) Execute(ctx context.Context) error {
tx, err := t.beginner.Begin(ctx, t.opts)
if err != nil {
return err
}
defer func() { err = tx.End(ctx, err) }()
return t.cmd.Execute(ctx)
}
func (t *Transaction) Name() string {
return t.cmd.Name()
}
type batch struct {
Commands []Command
}
func Batch(cmds ...Command) *batch {
return &batch{
Commands: cmds,
}
}
func (b *batch) Execute(ctx context.Context) error {
for _, cmd := range b.Commands {
if err := cmd.Execute(ctx); err != nil {
return err
}
}
return nil
}
func (b *batch) Name() string {
return "batch"
}
func (b *batch) Append(cmds ...Command) {
b.Commands = append(b.Commands, cmds...)
}
type NoopCommand struct{}
func (c *NoopCommand) Execute(_ context.Context) error {
return nil
}
func (c *NoopCommand) Name() string {
return "noop"
}
type NoopQuery[T any] struct {
NoopCommand
}
func (q *NoopQuery[T]) Result() T {
var zero T
return zero
}

View File

@@ -1,9 +0,0 @@
package database
import (
"context"
)
type Connector interface {
Connect(ctx context.Context) (Pool, error)
}

View File

@@ -1,54 +0,0 @@
package database
import (
"context"
)
var (
db *database
)
type database struct {
connector Connector
pool Pool
}
type Pool interface {
Beginner
QueryExecutor
Acquire(ctx context.Context) (Client, error)
Close(ctx context.Context) error
}
type Client interface {
Beginner
QueryExecutor
Release(ctx context.Context) error
}
type Querier interface {
Query(ctx context.Context, stmt string, args ...any) (Rows, error)
QueryRow(ctx context.Context, stmt string, args ...any) Row
}
type Executor interface {
Exec(ctx context.Context, stmt string, args ...any) error
}
type Row interface {
Scan(dest ...any) error
}
type Rows interface {
Row
Next() bool
Close() error
Err() error
}
type QueryExecutor interface {
Querier
Executor
}

View File

@@ -1,92 +0,0 @@
package dialect
import (
"context"
"errors"
"reflect"
"github.com/mitchellh/mapstructure"
"github.com/spf13/viper"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/storage/database/dialect/postgres"
)
type Hook struct {
Match func(string) bool
Decode func(config any) (database.Connector, error)
Name string
Constructor func() database.Connector
}
var hooks = []Hook{
{
Match: postgres.NameMatcher,
Decode: postgres.DecodeConfig,
Name: postgres.Name,
Constructor: func() database.Connector { return new(postgres.Config) },
},
// {
// Match: gosql.NameMatcher,
// Decode: gosql.DecodeConfig,
// Name: gosql.Name,
// Constructor: func() database.Connector { return new(gosql.Config) },
// },
}
type Config struct {
Dialects map[string]any `mapstructure:",remain" yaml:",inline"`
connector database.Connector
}
func (c Config) Connect(ctx context.Context) (database.Pool, error) {
if len(c.Dialects) != 1 {
return nil, errors.New("Exactly one dialect must be configured")
}
return c.connector.Connect(ctx)
}
// Hooks implements [configure.Unmarshaller].
func (c Config) Hooks() []viper.DecoderConfigOption {
return []viper.DecoderConfigOption{
viper.DecodeHook(decodeHook),
}
}
func decodeHook(from, to reflect.Value) (_ any, err error) {
if to.Type() != reflect.TypeOf(Config{}) {
return from.Interface(), nil
}
config := new(Config)
if err = mapstructure.Decode(from.Interface(), config); err != nil {
return nil, err
}
if err = config.decodeDialect(); err != nil {
return nil, err
}
return config, nil
}
func (c *Config) decodeDialect() error {
for _, hook := range hooks {
for name, config := range c.Dialects {
if !hook.Match(name) {
continue
}
connector, err := hook.Decode(config)
if err != nil {
return err
}
c.connector = connector
return nil
}
}
return errors.New("no dialect found")
}

View File

@@ -1,80 +0,0 @@
package postgres
import (
"context"
"errors"
"slices"
"strings"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/mitchellh/mapstructure"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
var (
_ database.Connector = (*Config)(nil)
Name = "postgres"
)
type Config struct {
config *pgxpool.Config
// Host string
// Port int32
// Database string
// MaxOpenConns uint32
// MaxIdleConns uint32
// MaxConnLifetime time.Duration
// MaxConnIdleTime time.Duration
// User User
// // Additional options to be appended as options=<Options>
// // The value will be taken as is. Multiple options are space separated.
// Options string
configuredFields []string
}
// Connect implements [database.Connector].
func (c *Config) Connect(ctx context.Context) (database.Pool, error) {
pool, err := pgxpool.NewWithConfig(ctx, c.config)
if err != nil {
return nil, err
}
if err = pool.Ping(ctx); err != nil {
return nil, err
}
return &pgxPool{pool}, nil
}
func NameMatcher(name string) bool {
return slices.Contains([]string{"postgres", "pg"}, strings.ToLower(name))
}
func DecodeConfig(input any) (database.Connector, error) {
switch c := input.(type) {
case string:
config, err := pgxpool.ParseConfig(c)
if err != nil {
return nil, err
}
return &Config{config: config}, nil
case map[string]any:
connector := new(Config)
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: connector,
})
if err != nil {
return nil, err
}
if err = decoder.Decode(c); err != nil {
return nil, err
}
return &Config{
config: &pgxpool.Config{},
}, nil
}
return nil, errors.New("invalid configuration")
}

View File

@@ -1,48 +0,0 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
type pgxConn struct{ *pgxpool.Conn }
var _ database.Client = (*pgxConn)(nil)
// Release implements [database.Client].
func (c *pgxConn) Release(_ context.Context) error {
c.Conn.Release()
return nil
}
// Begin implements [database.Client].
func (c *pgxConn) Begin(ctx context.Context, opts *database.TransactionOptions) (database.Transaction, error) {
tx, err := c.Conn.BeginTx(ctx, transactionOptionsToPgx(opts))
if err != nil {
return nil, err
}
return &pgxTx{tx}, nil
}
// Query implements sql.Client.
// Subtle: this method shadows the method (*Conn).Query of pgxConn.Conn.
func (c *pgxConn) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
rows, err := c.Conn.Query(ctx, sql, args...)
return &Rows{rows}, err
}
// QueryRow implements sql.Client.
// Subtle: this method shadows the method (*Conn).QueryRow of pgxConn.Conn.
func (c *pgxConn) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return c.Conn.QueryRow(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (c *pgxConn) Exec(ctx context.Context, sql string, args ...any) error {
_, err := c.Conn.Exec(ctx, sql, args...)
return err
}

View File

@@ -1,57 +0,0 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
type pgxPool struct{ *pgxpool.Pool }
var _ database.Pool = (*pgxPool)(nil)
// Acquire implements [database.Pool].
func (c *pgxPool) Acquire(ctx context.Context) (database.Client, error) {
conn, err := c.Pool.Acquire(ctx)
if err != nil {
return nil, err
}
return &pgxConn{conn}, nil
}
// Query implements [database.Pool].
// Subtle: this method shadows the method (Pool).Query of pgxPool.Pool.
func (c *pgxPool) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
rows, err := c.Pool.Query(ctx, sql, args...)
return &Rows{rows}, err
}
// QueryRow implements [database.Pool].
// Subtle: this method shadows the method (Pool).QueryRow of pgxPool.Pool.
func (c *pgxPool) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return c.Pool.QueryRow(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (c *pgxPool) Exec(ctx context.Context, sql string, args ...any) error {
_, err := c.Pool.Exec(ctx, sql, args...)
return err
}
// Begin implements [database.Pool].
func (c *pgxPool) Begin(ctx context.Context, opts *database.TransactionOptions) (database.Transaction, error) {
tx, err := c.Pool.BeginTx(ctx, transactionOptionsToPgx(opts))
if err != nil {
return nil, err
}
return &pgxTx{tx}, nil
}
// Close implements [database.Pool].
func (c *pgxPool) Close(_ context.Context) error {
c.Pool.Close()
return nil
}

View File

@@ -1,18 +0,0 @@
package postgres
import (
"github.com/jackc/pgx/v5"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
var _ database.Rows = (*Rows)(nil)
type Rows struct{ pgx.Rows }
// Close implements [database.Rows].
// Subtle: this method shadows the method (Rows).Close of Rows.Rows.
func (r *Rows) Close() error {
r.Rows.Close()
return nil
}

View File

@@ -1,95 +0,0 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/zitadel/zitadel/backend/command/v2/storage/database"
)
type pgxTx struct{ pgx.Tx }
var _ database.Transaction = (*pgxTx)(nil)
// Commit implements [database.Transaction].
func (tx *pgxTx) Commit(ctx context.Context) error {
return tx.Tx.Commit(ctx)
}
// Rollback implements [database.Transaction].
func (tx *pgxTx) Rollback(ctx context.Context) error {
return tx.Tx.Rollback(ctx)
}
// End implements [database.Transaction].
func (tx *pgxTx) End(ctx context.Context, err error) error {
if err != nil {
tx.Rollback(ctx)
return err
}
return tx.Commit(ctx)
}
// Query implements [database.Transaction].
// Subtle: this method shadows the method (Tx).Query of pgxTx.Tx.
func (tx *pgxTx) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
rows, err := tx.Tx.Query(ctx, sql, args...)
return &Rows{rows}, err
}
// QueryRow implements [database.Transaction].
// Subtle: this method shadows the method (Tx).QueryRow of pgxTx.Tx.
func (tx *pgxTx) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return tx.Tx.QueryRow(ctx, sql, args...)
}
// Exec implements [database.Transaction].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (tx *pgxTx) Exec(ctx context.Context, sql string, args ...any) error {
_, err := tx.Tx.Exec(ctx, sql, args...)
return err
}
// Begin implements [database.Transaction].
// As postgres does not support nested transactions we use savepoints to emulate them.
func (tx *pgxTx) Begin(ctx context.Context) (database.Transaction, error) {
savepoint, err := tx.Tx.Begin(ctx)
if err != nil {
return nil, err
}
return &pgxTx{savepoint}, nil
}
func transactionOptionsToPgx(opts *database.TransactionOptions) pgx.TxOptions {
if opts == nil {
return pgx.TxOptions{}
}
return pgx.TxOptions{
IsoLevel: isolationToPgx(opts.IsolationLevel),
AccessMode: accessModeToPgx(opts.AccessMode),
}
}
func isolationToPgx(isolation database.IsolationLevel) pgx.TxIsoLevel {
switch isolation {
case database.IsolationLevelSerializable:
return pgx.Serializable
case database.IsolationLevelReadCommitted:
return pgx.ReadCommitted
default:
return pgx.Serializable
}
}
func accessModeToPgx(accessMode database.AccessMode) pgx.TxAccessMode {
switch accessMode {
case database.AccessModeReadWrite:
return pgx.ReadWrite
case database.AccessModeReadOnly:
return pgx.ReadOnly
default:
return pgx.ReadWrite
}
}

View File

@@ -1,39 +0,0 @@
package database
import (
"context"
"github.com/zitadel/zitadel/internal/crypto"
)
type query struct{ Querier }
func Query(querier Querier) *query {
return &query{Querier: querier}
}
const getEncryptionConfigQuery = "SELECT" +
" length" +
", expiry" +
", should_include_lower_letters" +
", should_include_upper_letters" +
", should_include_digits" +
", should_include_symbols" +
" FROM encryption_config"
func (q query) GetEncryptionConfig(ctx context.Context) (*crypto.GeneratorConfig, error) {
var config crypto.GeneratorConfig
row := q.QueryRow(ctx, getEncryptionConfigQuery)
err := row.Scan(
&config.Length,
&config.Expiry,
&config.IncludeLowerLetters,
&config.IncludeUpperLetters,
&config.IncludeDigits,
&config.IncludeSymbols,
)
if err != nil {
return nil, err
}
return &config, nil
}

View File

@@ -1,36 +0,0 @@
package database
import "context"
type Transaction interface {
Commit(ctx context.Context) error
Rollback(ctx context.Context) error
End(ctx context.Context, err error) error
Begin(ctx context.Context) (Transaction, error)
QueryExecutor
}
type Beginner interface {
Begin(ctx context.Context, opts *TransactionOptions) (Transaction, error)
}
type TransactionOptions struct {
IsolationLevel IsolationLevel
AccessMode AccessMode
}
type IsolationLevel uint8
const (
IsolationLevelSerializable IsolationLevel = iota
IsolationLevelReadCommitted
)
type AccessMode uint8
const (
AccessModeReadWrite AccessMode = iota
AccessModeReadOnly
)

View File

@@ -1,13 +0,0 @@
package eventstore
import "github.com/zitadel/zitadel/backend/command/v2/pattern"
type Event struct {
AggregateType string `json:"aggregateType"`
AggregateID string `json:"aggregateId"`
}
type EventCommander interface {
pattern.Command
Event() *Event
}

View File

@@ -1,55 +0,0 @@
package tracing
import (
"context"
"go.opentelemetry.io/otel/trace"
"github.com/zitadel/zitadel/backend/command/v2/pattern"
)
type command struct {
trace.Tracer
cmd pattern.Command
}
func Trace(tracer trace.Tracer, cmd pattern.Command) pattern.Command {
return &command{
Tracer: tracer,
cmd: cmd,
}
}
func (cmd *command) Name() string {
return cmd.cmd.Name()
}
func (cmd *command) Execute(ctx context.Context) error {
ctx, span := cmd.Tracer.Start(ctx, cmd.Name())
defer span.End()
err := cmd.cmd.Execute(ctx)
if err != nil {
span.RecordError(err)
}
return err
}
type query[T any] struct {
command
query pattern.Query[T]
}
func Query[T any](tracer trace.Tracer, q pattern.Query[T]) pattern.Query[T] {
return &query[T]{
command: command{
Tracer: tracer,
cmd: q,
},
query: q,
}
}
func (q *query[T]) Result() T {
return q.query.Result()
}

View File

@@ -1,45 +0,0 @@
package domain
import (
"context"
"github.com/zitadel/zitadel/backend/storage/database"
)
type poolHandler[T any] struct {
pool database.Pool
client database.QueryExecutor
}
func (h *poolHandler[T]) acquire(ctx context.Context, in T) (out T, _ func(context.Context, error) error, err error) {
client, err := h.pool.Acquire(ctx)
if err != nil {
return in, nil, err
}
h.client = client
return in, func(ctx context.Context, _ error) error { return client.Release(ctx) }, nil
}
func (h *poolHandler[T]) begin(ctx context.Context, in T) (out T, _ func(context.Context, error) error, err error) {
var beginner database.Beginner = h.pool
if h.client != nil {
beginner = h.client.(database.Beginner)
}
previousClient := h.client
tx, err := beginner.Begin(ctx, nil)
if err != nil {
return in, nil, err
}
h.client = tx
return in, func(ctx context.Context, err error) error {
err = tx.End(ctx, err)
if err != nil {
return err
}
h.client = previousClient
return nil
}, nil
}

View File

@@ -1,20 +0,0 @@
package domain
import (
"context"
"github.com/zitadel/zitadel/backend/storage/database"
)
type defaults struct {
db database.Pool
}
type clientSetter interface {
setClient(database.QueryExecutor)
}
func (d *defaults) acquire(ctx context.Context, setter clientSetter) {
d.db.Acquire(ctx)
setter.setClient(d.db)
}

View File

@@ -1,68 +0,0 @@
package domain
import (
"context"
"github.com/zitadel/zitadel/backend/repository"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/telemetry/logging"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
type Instance struct {
db database.Pool
instance instanceRepository
user userRepository
}
type instanceRepository interface {
ByID(ctx context.Context, querier database.Querier, id string) (*repository.Instance, error)
ByDomain(ctx context.Context, querier database.Querier, domain string) (*repository.Instance, error)
Create(ctx context.Context, tx database.Transaction, instance *repository.Instance) (*repository.Instance, error)
}
func NewInstance(db database.Pool, tracer *tracing.Tracer, logger *logging.Logger) *Instance {
b := &Instance{
db: db,
instance: repository.NewInstance(
repository.WithLogger[repository.InstanceOptions](logger),
repository.WithTracer[repository.InstanceOptions](tracer),
),
user: repository.NewUser(
repository.WithLogger[repository.UserOptions](logger),
repository.WithTracer[repository.UserOptions](tracer),
),
}
return b
}
func (b *Instance) ByID(ctx context.Context, id string) (*repository.Instance, error) {
return b.instance.ByID(ctx, b.db, id)
}
func (b *Instance) ByDomain(ctx context.Context, domain string) (*repository.Instance, error) {
return b.instance.ByDomain(ctx, b.db, domain)
}
type SetUpInstance struct {
Instance *repository.Instance
User *repository.User
}
func (b *Instance) SetUp(ctx context.Context, request *SetUpInstance) (err error) {
tx, err := b.db.Begin(ctx, nil)
if err != nil {
return err
}
defer func() {
err = tx.End(ctx, err)
}()
_, err = b.instance.Create(ctx, tx, request.Instance)
if err != nil {
return err
}
_, err = b.user.Create(ctx, tx, request.User)
return err
}

View File

@@ -1,45 +0,0 @@
package domain
import (
"context"
"github.com/zitadel/zitadel/backend/repository"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/internal/crypto"
)
type User struct {
defaults
userCodeAlg crypto.EncryptionAlgorithm
user userRepository
secretGenerator secretGeneratorRepository
}
type UserRepositoryConstructor interface {
NewUserExecutor(database.Executor) userRepository
NewUserQuerier(database.Querier) userRepository
}
type userRepository interface {
Create(ctx context.Context, tx database.Executor, user *repository.User) (*repository.User, error)
ByID(ctx context.Context, querier database.Querier, id string) (*repository.User, error)
EmailVerificationCode(ctx context.Context, client database.Querier, userID string) (*repository.EmailVerificationCode, error)
EmailVerificationFailed(ctx context.Context, client database.Executor, code *repository.EmailVerificationCode) error
EmailVerificationSucceeded(ctx context.Context, client database.Executor, code *repository.EmailVerificationCode) error
}
type secretGeneratorRepository interface {
GeneratorConfigByType(ctx context.Context, client database.Querier, typ repository.SecretGeneratorType) (*crypto.GeneratorConfig, error)
}
func NewUser(db database.Pool) *User {
b := &User{
db: db,
user: repository.NewUser(),
secretGenerator: repository.NewSecretGenerator(),
}
return b
}

View File

@@ -1,250 +0,0 @@
package domain
import (
"context"
"text/template"
"github.com/zitadel/zitadel/backend/handler"
"github.com/zitadel/zitadel/backend/repository"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/domain"
)
type VerifyEmail struct {
UserID string
Code string
client database.QueryExecutor
config *crypto.GeneratorConfig
gen crypto.Generator
code *repository.EmailVerificationCode
verificationErr error
}
type SetEmail struct {
*poolHandler[*SetEmail]
UserID string
Email string
Verification handler.Handle[*SetEmail, *SetEmail]
// config *crypto.GeneratorConfig
gen crypto.Generator
code *crypto.CryptoValue
plainCode string
currentEmail string
}
func (u *User) WithEmailConfirmationURL(url template.Template) handler.Handle[*SetEmail, *SetEmail] {
return handler.Chain(
u.WithEmailReturnCode(),
func(ctx context.Context, in *SetEmail) (out *SetEmail, err error) {
// TODO: queue notification
return in, nil
},
)
}
func (u *User) WithEmailReturnCode() handler.Handle[*SetEmail, *SetEmail] {
return handler.Chains(
handler.ErrFuncToHandle(
func(ctx context.Context, in *SetEmail) (err error) {
in.code, in.plainCode, err = crypto.NewCode(in.gen)
return err
},
),
handler.ErrFuncToHandle(
func(ctx context.Context, in *SetEmail) (err error) {
return u.user.SetEmailVerificationCode(ctx, in.poolHandler.client, in.UserID, in.code)
},
),
)
}
func (u *User) WithEmailVerified() handler.Handle[*SetEmail, *SetEmail] {
return handler.Chain(
handler.ErrFuncToHandle(
func(ctx context.Context, in *SetEmail) (err error) {
return repository.SetEmailVerificationCode(ctx, in.poolHandler.client, in.UserID, in.code)
},
),
handler.ErrFuncToHandle(
func(ctx context.Context, in *SetEmail) (err error) {
return u.user.EmailVerificationSucceeded(ctx, in.poolHandler.client, &repository.EmailVerificationCode{
Code: in.code,
})
},
),
)
}
func (u *User) WithDefaultEmailVerification() handler.Handle[*SetEmail, *SetEmail] {
return handler.Chain(
u.WithEmailReturnCode(),
func(ctx context.Context, in *SetEmail) (out *SetEmail, err error) {
// TODO: queue notification
return in, nil
},
)
}
func (u *User) SetEmailDifferent(ctx context.Context, in *SetEmail) (err error) {
if in.Verification == nil {
in.Verification = u.WithDefaultEmailVerification()
}
client, err := u.db.Acquire(ctx)
if err != nil {
return err
}
defer client.Release(ctx)
config, err := u.secretGenerator.GeneratorConfigByType(ctx, client, domain.SecretGeneratorTypeVerifyEmailCode)
if err != nil {
return err
}
in.gen = crypto.NewEncryptionGenerator(*config, u.userCodeAlg)
tx, err := client.Begin(ctx, nil)
if err != nil {
return err
}
defer tx.End(ctx, err)
user, err := u.user.ByID(ctx, tx, in.UserID)
if err != nil {
return err
}
if user.Email == in.Email {
return nil
}
_, err = in.Verification(ctx, in)
return err
}
func (u *User) SetEmail(ctx context.Context, in *SetEmail) error {
_, err := handler.Chain(
handler.HandleIf(
func(in *SetEmail) bool {
return in.Verification == nil
},
func(ctx context.Context, in *SetEmail) (*SetEmail, error) {
in.Verification = u.WithDefaultEmailVerification()
return in, nil
},
),
handler.Deferrable(
in.poolHandler.acquire,
handler.Chains(
func(ctx context.Context, in *SetEmail) (_ *SetEmail, err error) {
config, err := u.secretGenerator.GeneratorConfigByType(ctx, in.poolHandler.client, domain.SecretGeneratorTypeVerifyEmailCode)
if err != nil {
return nil, err
}
in.gen = crypto.NewEncryptionGenerator(*config, u.userCodeAlg)
return in, nil
},
handler.Deferrable(
in.poolHandler.begin,
handler.Chains(
func(ctx context.Context, in *SetEmail) (*SetEmail, error) {
// TODO: repository.EmailByUserID
user, err := u.user.ByID(ctx, in.poolHandler.client, in.UserID)
if err != nil {
return nil, err
}
in.currentEmail = user.Email
return in, nil
},
handler.SkipIf(
func(in *SetEmail) bool {
return in.currentEmail == in.Email
},
handler.Chains(
func(ctx context.Context, in *SetEmail) (*SetEmail, error) {
// TODO: repository.SetEmail
return in, nil
},
in.Verification,
),
),
),
),
),
),
)(ctx, in)
return err
}
func (u *User) VerifyEmail(ctx context.Context, in *VerifyEmail) error {
_, err := handler.Deferrable(
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, _ func(context.Context, error) error, err error) {
client, err := u.db.Acquire(ctx)
if err != nil {
return nil, nil, err
}
in.client = client
return in, func(ctx context.Context, _ error) error { return client.Release(ctx) }, err
},
handler.Chains(
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, err error) {
in.config, err = u.secretGenerator.GeneratorConfigByType(ctx, in.client, domain.SecretGeneratorTypeVerifyEmailCode)
return in, err
},
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, err error) {
in.gen = crypto.NewEncryptionGenerator(*in.config, u.userCodeAlg)
return in, nil
},
handler.Deferrable(
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, _ func(context.Context, error) error, err error) {
client := in.client
tx, err := in.client.(database.Client).Begin(ctx, nil)
if err != nil {
return nil, nil, err
}
in.client = tx
return in, func(ctx context.Context, err error) error {
err = tx.End(ctx, err)
if err != nil {
return err
}
in.client = client
return nil
}, err
},
handler.Chains(
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, err error) {
in.code, err = u.user.EmailVerificationCode(ctx, in.client, in.UserID)
return in, err
},
func(ctx context.Context, in *VerifyEmail) (*VerifyEmail, error) {
in.verificationErr = crypto.VerifyCode(in.code.CreatedAt, in.code.Expiry, in.code.Code, in.Code, in.gen.Alg())
return in, nil
},
handler.HandleIf(
func(in *VerifyEmail) bool {
return in.verificationErr == nil
},
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, err error) {
return in, u.user.EmailVerificationSucceeded(ctx, in.client, in.code)
},
),
handler.HandleIf(
func(in *VerifyEmail) bool {
return in.verificationErr != nil
},
func(ctx context.Context, in *VerifyEmail) (_ *VerifyEmail, err error) {
return in, u.user.EmailVerificationFailed(ctx, in.client, in.code)
},
),
),
),
),
)(ctx, in)
return err
}

View File

@@ -1,162 +0,0 @@
package handler
import (
"context"
)
type Parameter[P, C any] struct {
Previous P
Current C
}
// Handle is a function that handles the in.
type Handle[In, Out any] func(ctx context.Context, in In) (out Out, err error)
type DeferrableHandle[In, Out any] func(ctx context.Context, in In) (out Out, deferrable func(context.Context, error) error, err error)
type Defer[In, Out, NextOut any] func(handle DeferrableHandle[In, Out], next Handle[Out, NextOut]) Handle[In, NextOut]
type HandleNoReturn[In any] func(ctx context.Context, in In) error
// Middleware is a function that decorates the handle function.
// It must call the handle function but its up the the middleware to decide when and how.
type Middleware[In, Out any] func(ctx context.Context, in In, handle Handle[In, Out]) (out Out, err error)
func Deferrable[In, Out, NextOut any](handle DeferrableHandle[In, Out], next Handle[Out, NextOut]) Handle[In, NextOut] {
return func(ctx context.Context, in In) (nextOut NextOut, err error) {
out, deferrable, err := handle(ctx, in)
if err != nil {
return nextOut, err
}
defer func() {
err = deferrable(ctx, err)
}()
return next(ctx, out)
}
}
// Chain chains the handle function with the next handler.
// The next handler is called after the handle function.
func Chain[In, Out, NextOut any](handle Handle[In, Out], next Handle[Out, NextOut]) Handle[In, NextOut] {
return func(ctx context.Context, in In) (nextOut NextOut, err error) {
out, err := handle(ctx, in)
if err != nil {
return nextOut, err
}
return next(ctx, out)
}
}
// Chains chains the handle function with the next handlers.
// The next handlers are called after the handle function.
// The order of the handlers is preserved.
func Chains[In, Out any](handle Handle[In, Out], chain ...Handle[Out, Out]) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
for _, next := range chain {
handle = Chain(handle, next)
}
return handle(ctx, in)
}
}
// Decorate decorates the handle function with the decorate function.
// The decorate function is called before the handle function.
func Decorate[In, Out any](handle Handle[In, Out], decorate Middleware[In, Out]) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
return decorate(ctx, in, handle)
}
}
// Decorates decorates the handle function with the decorate functions.
// The decorates function is called before the handle function.
func Decorates[In, Out any](handle Handle[In, Out], decorates ...Middleware[In, Out]) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
for i := len(decorates) - 1; i >= 0; i-- {
handle = Decorate(handle, decorates[i])
}
return handle(ctx, in)
}
}
// SkipNext skips the next handler if the handle function returns a non-empty output or an error.
func SkipNext[In, Out any](handle Handle[In, Out], next Handle[In, Out]) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
var empty Out
out, err = handle(ctx, in)
// TODO: does this work?
if any(out) != any(empty) || err != nil {
return out, err
}
return next(ctx, in)
}
}
func HandleIf[In any](cond func(In) bool, handle Handle[In, In]) Handle[In, In] {
return func(ctx context.Context, in In) (out In, err error) {
if !cond(in) {
return in, nil
}
return handle(ctx, in)
}
}
func SkipIf[In any](cond func(In) bool, handle Handle[In, In]) Handle[In, In] {
return func(ctx context.Context, in In) (out In, err error) {
if cond(in) {
return in, nil
}
return handle(ctx, in)
}
}
// SkipNilHandler skips the handle function if the handler is nil.
// If handle is nil, an empty output is returned.
// The function is safe to call with nil handler.
func SkipNilHandler[O, In, Out any](handler *O, handle Handle[In, Out]) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
if handler == nil {
return out, nil
}
return handle(ctx, in)
}
}
// SkipReturnPreviousHandler skips the handle function if the handler is nil and returns the input.
// The function is safe to call with nil handler.
func SkipReturnPreviousHandler[O, In any](handler *O, handle Handle[In, In]) Handle[In, In] {
return func(ctx context.Context, in In) (out In, err error) {
if handler == nil {
return in, nil
}
return handle(ctx, in)
}
}
func CtxFuncToHandle[Out any](fn func(context.Context) (Out, error)) Handle[struct{}, Out] {
return func(ctx context.Context, in struct{}) (out Out, err error) {
return fn(ctx)
}
}
func ResFuncToHandle[In any, Out any](fn func(context.Context, In) Out) Handle[In, Out] {
return func(ctx context.Context, in In) (out Out, err error) {
return fn(ctx, in), nil
}
}
func ErrFuncToHandle[In any](fn func(context.Context, In) error) Handle[In, In] {
return func(ctx context.Context, in In) (out In, err error) {
err = fn(ctx, in)
if err != nil {
return out, err
}
return in, nil
}
}
func NoReturnToHandle[In any](fn func(context.Context, In)) Handle[In, In] {
return func(ctx context.Context, in In) (out In, err error) {
fn(ctx, in)
return in, nil
}
}

View File

@@ -1,19 +0,0 @@
package repository
import "github.com/zitadel/zitadel/backend/storage/database"
type executor struct {
client database.Executor
}
func execute(client database.Executor) *executor {
return &executor{client: client}
}
type querier struct {
client database.Querier
}
func query(client database.Querier) *querier {
return &querier{client: client}
}

View File

@@ -1,16 +0,0 @@
package repository
import (
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/storage/eventstore"
)
type eventStore struct {
es *eventstore.Eventstore
}
func events(client database.Executor) *eventStore {
return &eventStore{
es: eventstore.New(client),
}
}

View File

@@ -1,115 +0,0 @@
package repository
import (
"context"
"github.com/zitadel/zitadel/backend/handler"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/telemetry/logging"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
type Instance struct {
ID string
Name string
}
type InstanceOptions struct {
cache *InstanceCache
}
type instance struct {
options[InstanceOptions]
*InstanceOptions
}
func NewInstance(opts ...Option[InstanceOptions]) *instance {
i := new(instance)
i.InstanceOptions = &i.options.custom
for _, opt := range opts {
opt.apply(&i.options)
}
return i
}
func WithInstanceCache(c *InstanceCache) Option[InstanceOptions] {
return func(opts *options[InstanceOptions]) {
opts.custom.cache = c
}
}
func (i *instance) Create(ctx context.Context, tx database.Transaction, instance *Instance) (*Instance, error) {
return tracing.Wrap(i.tracer, "instance.SetUp",
handler.Chains(
handler.Decorates(
execute(tx).CreateInstance,
tracing.Decorate[*Instance, *Instance](i.tracer, tracing.WithSpanName("instance.sql.SetUp")),
logging.Decorate[*Instance, *Instance](i.logger, "instance.sql.SetUp"),
),
handler.Decorates(
events(tx).CreateInstance,
tracing.Decorate[*Instance, *Instance](i.tracer, tracing.WithSpanName("instance.event.SetUp")),
logging.Decorate[*Instance, *Instance](i.logger, "instance.event.SetUp"),
),
handler.SkipReturnPreviousHandler(i.cache,
handler.Decorates(
handler.NoReturnToHandle(i.cache.Set),
tracing.Decorate[*Instance, *Instance](i.tracer, tracing.WithSpanName("instance.cache.SetUp")),
logging.Decorate[*Instance, *Instance](i.logger, "instance.cache.SetUp"),
),
),
),
)(ctx, instance)
}
func (i *instance) ByID(ctx context.Context, querier database.Querier, id string) (*Instance, error) {
return tracing.Wrap(i.tracer, "instance.byID",
handler.SkipNext(
handler.SkipNilHandler(i.cache,
handler.ResFuncToHandle(i.cache.ByID),
),
handler.Chain(
handler.Decorates(
query(querier).InstanceByID,
tracing.Decorate[string, *Instance](i.tracer, tracing.WithSpanName("instance.sql.ByID")),
logging.Decorate[string, *Instance](i.logger, "instance.sql.ByID"),
),
handler.SkipNilHandler(i.cache, handler.NoReturnToHandle(i.cache.Set)),
),
),
)(ctx, id)
}
func (i *instance) ByDomain(ctx context.Context, querier database.Querier, domain string) (*Instance, error) {
return tracing.Wrap(i.tracer, "instance.byDomain",
handler.SkipNext(
handler.SkipNilHandler(i.cache,
handler.ResFuncToHandle(i.cache.ByDomain),
),
handler.Chain(
handler.Decorate(
query(querier).InstanceByDomain,
tracing.Decorate[string, *Instance](i.tracer, tracing.WithSpanName("instance.sql.ByDomain")),
),
handler.SkipNilHandler(i.cache, handler.NoReturnToHandle(i.cache.Set)),
),
),
)(ctx, domain)
}
type ListRequest struct {
Limit uint16
}
func (i *instance) List(ctx context.Context, querier database.Querier, request *ListRequest) ([]*Instance, error) {
return tracing.Wrap(i.tracer, "instance.list",
handler.Chains(
handler.Decorates(
query(querier).ListInstances,
tracing.Decorate[*ListRequest, []*Instance](i.tracer, tracing.WithSpanName("instance.sql.List")),
logging.Decorate[*ListRequest, []*Instance](i.logger, "instance.sql.List"),
),
),
)(ctx, request)
}

View File

@@ -1,58 +0,0 @@
package repository
import (
"context"
"log"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type InstanceCache struct {
cache.Cache[InstanceIndex, string, *Instance]
}
type InstanceIndex uint8
var InstanceIndices = []InstanceIndex{
InstanceByID,
InstanceByDomain,
}
const (
InstanceByID InstanceIndex = iota
InstanceByDomain
)
var _ cache.Entry[InstanceIndex, string] = (*Instance)(nil)
// Keys implements [cache.Entry].
func (i *Instance) Keys(index InstanceIndex) (key []string) {
switch index {
case InstanceByID:
return []string{i.ID}
case InstanceByDomain:
return []string{i.Name}
}
return nil
}
func NewInstanceCache(c cache.Cache[InstanceIndex, string, *Instance]) *InstanceCache {
return &InstanceCache{c}
}
func (i *InstanceCache) ByID(ctx context.Context, id string) *Instance {
log.Println("cached.instance.byID")
instance, _ := i.Cache.Get(ctx, InstanceByID, id)
return instance
}
func (i *InstanceCache) ByDomain(ctx context.Context, domain string) *Instance {
log.Println("cached.instance.byDomain")
instance, _ := i.Cache.Get(ctx, InstanceByDomain, domain)
return instance
}
func (i *InstanceCache) Set(ctx context.Context, instance *Instance) {
log.Println("cached.instance.set")
i.Cache.Set(ctx, instance)
}

View File

@@ -1,61 +0,0 @@
package repository
import (
"context"
"log"
)
const InstanceByIDStmt = `SELECT id, name FROM instances WHERE id = $1`
func (q *querier) InstanceByID(ctx context.Context, id string) (*Instance, error) {
log.Println("sql.instance.byID")
row := q.client.QueryRow(ctx, InstanceByIDStmt, id)
var instance Instance
if err := row.Scan(&instance.ID, &instance.Name); err != nil {
return nil, err
}
return &instance, nil
}
const instanceByDomainQuery = `SELECT i.id, i.name FROM instances i JOIN instance_domains id ON i.id = id.instance_id WHERE id.domain = $1`
func (q *querier) InstanceByDomain(ctx context.Context, domain string) (*Instance, error) {
log.Println("sql.instance.byDomain")
row := q.client.QueryRow(ctx, instanceByDomainQuery, domain)
var instance Instance
if err := row.Scan(&instance.ID, &instance.Name); err != nil {
return nil, err
}
return &instance, nil
}
func (q *querier) ListInstances(ctx context.Context, request *ListRequest) (res []*Instance, err error) {
log.Println("sql.instance.list")
rows, err := q.client.Query(ctx, "SELECT id, name FROM instances")
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var instance Instance
if err = rows.Scan(&instance.ID, &instance.Name); err != nil {
return nil, err
}
res = append(res, &instance)
}
if err = rows.Err(); err != nil {
return nil, err
}
return res, nil
}
const InstanceCreateStmt = `INSERT INTO instances (id, name) VALUES ($1, $2)`
func (e *executor) CreateInstance(ctx context.Context, instance *Instance) (*Instance, error) {
log.Println("sql.instance.create")
err := e.client.Exec(ctx, InstanceCreateStmt, instance.ID, instance.Name)
if err != nil {
return nil, err
}
return instance, nil
}

View File

@@ -1,15 +0,0 @@
package repository
import (
"context"
"log"
)
func (s *eventStore) CreateInstance(ctx context.Context, instance *Instance) (*Instance, error) {
log.Println("event.instance.create")
err := s.es.Push(ctx, instance)
if err != nil {
return nil, err
}
return instance, nil
}

View File

@@ -1,258 +0,0 @@
package repository_test
import (
"context"
"fmt"
"log/slog"
"os"
"reflect"
"testing"
"github.com/zitadel/zitadel/backend/repository"
"github.com/zitadel/zitadel/backend/storage/cache"
"github.com/zitadel/zitadel/backend/storage/cache/connector/gomap"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/storage/database/mock"
"github.com/zitadel/zitadel/backend/telemetry/logging"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
func Test_instance_Create(t *testing.T) {
type args struct {
ctx context.Context
tx database.Transaction
instance *repository.Instance
}
tests := []struct {
name string
opts []repository.Option[repository.InstanceOptions]
args args
want *repository.Instance
wantErr bool
}{
{
name: "simple",
opts: []repository.Option[repository.InstanceOptions]{
repository.WithTracer[repository.InstanceOptions](tracing.NewTracer("test")),
repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
repository.WithInstanceCache(
repository.NewInstanceCache(gomap.NewCache[repository.InstanceIndex, string, *repository.Instance](context.Background(), repository.InstanceIndices, cache.Config{})),
),
},
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t, mock.ExpectExec(repository.InstanceCreateStmt, "ID", "Name")),
instance: &repository.Instance{
ID: "ID",
Name: "Name",
},
},
want: &repository.Instance{
ID: "ID",
Name: "Name",
},
wantErr: false,
},
{
name: "without cache",
opts: []repository.Option[repository.InstanceOptions]{
repository.WithTracer[repository.InstanceOptions](tracing.NewTracer("test")),
repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
},
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t, mock.ExpectExec(repository.InstanceCreateStmt, "ID", "Name")),
instance: &repository.Instance{
ID: "ID",
Name: "Name",
},
},
want: &repository.Instance{
ID: "ID",
Name: "Name",
},
wantErr: false,
},
{
name: "without cache, tracer",
opts: []repository.Option[repository.InstanceOptions]{
repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
},
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t, mock.ExpectExec(repository.InstanceCreateStmt, "ID", "Name")),
instance: &repository.Instance{
ID: "ID",
Name: "Name",
},
},
want: &repository.Instance{
ID: "ID",
Name: "Name",
},
wantErr: false,
},
{
name: "without cache, tracer, logger",
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t, mock.ExpectExec(repository.InstanceCreateStmt, "ID", "Name")),
instance: &repository.Instance{
ID: "ID",
Name: "Name",
},
},
want: &repository.Instance{
ID: "ID",
Name: "Name",
},
wantErr: false,
},
{
name: "without cache, tracer, logger, eventStore",
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t, mock.ExpectExec(repository.InstanceCreateStmt, "ID", "Name")),
instance: &repository.Instance{
ID: "ID",
Name: "Name",
},
},
want: &repository.Instance{
ID: "ID",
Name: "Name",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Printf("------------------------ %s ------------------------\n", tt.name)
i := repository.NewInstance(tt.opts...)
got, err := i.Create(tt.args.ctx, tt.args.tx, tt.args.instance)
if (err != nil) != tt.wantErr {
t.Errorf("instance.Create() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("instance.Create() = %v, want %v", got, tt.want)
}
})
}
}
func Test_instance_ByID(t *testing.T) {
type args struct {
ctx context.Context
tx database.Transaction
id string
}
tests := []struct {
name string
opts []repository.Option[repository.InstanceOptions]
args args
want *repository.Instance
wantErr bool
}{
{
name: "simple, not cached",
opts: []repository.Option[repository.InstanceOptions]{
repository.WithTracer[repository.InstanceOptions](tracing.NewTracer("test")),
repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
repository.WithInstanceCache(
repository.NewInstanceCache(gomap.NewCache[repository.InstanceIndex, string, *repository.Instance](context.Background(), repository.InstanceIndices, cache.Config{})),
),
},
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t,
mock.ExpectQueryRow(mock.NewRow(t, "id", "Name"), repository.InstanceByIDStmt, "id"),
),
id: "id",
},
want: &repository.Instance{
ID: "id",
Name: "Name",
},
wantErr: false,
},
{
name: "simple, cached",
opts: []repository.Option[repository.InstanceOptions]{
repository.WithTracer[repository.InstanceOptions](tracing.NewTracer("test")),
repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
repository.WithInstanceCache(
func() *repository.InstanceCache {
c := repository.NewInstanceCache(gomap.NewCache[repository.InstanceIndex, string, *repository.Instance](context.Background(), repository.InstanceIndices, cache.Config{}))
c.Set(context.Background(), &repository.Instance{
ID: "id",
Name: "Name",
})
return c
}(),
),
},
args: args{
ctx: context.Background(),
tx: mock.NewTransaction(t,
mock.ExpectQueryRow(mock.NewRow(t, "id", "Name"), repository.InstanceByIDStmt, "id"),
),
id: "id",
},
want: &repository.Instance{
ID: "id",
Name: "Name",
},
wantErr: false,
},
// {
// name: "without cache, tracer",
// opts: []repository.Option[repository.InstanceOptions]{
// repository.WithLogger[repository.InstanceOptions](logging.New(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))),
// },
// args: args{
// ctx: context.Background(),
// tx: mock.NewTransaction(),
// id: &repository.Instance{
// ID: "ID",
// Name: "Name",
// },
// },
// want: &repository.Instance{
// ID: "ID",
// Name: "Name",
// },
// wantErr: false,
// },
// {
// name: "without cache, tracer, logger",
// args: args{
// ctx: context.Background(),
// tx: mock.NewTransaction(),
// id: &repository.Instance{
// ID: "ID",
// Name: "Name",
// },
// },
// want: &repository.Instance{
// ID: "ID",
// Name: "Name",
// },
// wantErr: false,
// },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Printf("------------------------ %s ------------------------\n", tt.name)
i := repository.NewInstance(tt.opts...)
got, err := i.ByID(tt.args.ctx, tt.args.tx, tt.args.id)
if (err != nil) != tt.wantErr {
t.Errorf("instance.ByID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("instance.ByID() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,35 +0,0 @@
package repository
import (
"github.com/zitadel/zitadel/backend/telemetry/logging"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
)
// options are the default options for orchestrators.
type options[T any] struct {
custom T
defaultOptions
}
type defaultOptions struct {
tracer *tracing.Tracer
logger *logging.Logger
}
type Option[T any] func(*options[T])
func WithTracer[T any](tracer *tracing.Tracer) Option[T] {
return func(o *options[T]) {
o.tracer = tracer
}
}
func WithLogger[T any](logger *logging.Logger) Option[T] {
return func(o *options[T]) {
o.logger = logger
}
}
func (o Option[T]) apply(opts *options[T]) {
o(opts)
}

View File

@@ -1,33 +0,0 @@
package repository
import (
"context"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
"github.com/zitadel/zitadel/internal/crypto"
"github.com/zitadel/zitadel/internal/domain"
)
type SecretGeneratorOptions struct{}
type SecretGenerator struct {
options[SecretGeneratorOptions]
}
func NewSecretGenerator(opts ...Option[SecretGeneratorOptions]) *SecretGenerator {
i := new(SecretGenerator)
for _, opt := range opts {
opt.apply(&i.options)
}
return i
}
type SecretGeneratorType = domain.SecretGeneratorType
func (sg *SecretGenerator) GeneratorConfigByType(ctx context.Context, client database.Querier, typ SecretGeneratorType) (*crypto.GeneratorConfig, error) {
return tracing.Wrap(sg.tracer, "secretGenerator.GeneratorConfigByType",
query(client).SecretGeneratorConfigByType,
)(ctx, typ)
}

View File

@@ -1,25 +0,0 @@
package repository
import (
"context"
"github.com/zitadel/zitadel/internal/api/authz"
"github.com/zitadel/zitadel/internal/crypto"
)
const secretGeneratorByTypeStmt = `SELECT * FROM secret_generators WHERE instance_id = $1 AND type = $2`
func (q querier) SecretGeneratorConfigByType(ctx context.Context, typ SecretGeneratorType) (config *crypto.GeneratorConfig, err error) {
err = q.client.QueryRow(ctx, secretGeneratorByTypeStmt, authz.GetInstance(ctx).InstanceID, typ).Scan(
&config.Length,
&config.Expiry,
&config.IncludeLowerLetters,
&config.IncludeUpperLetters,
&config.IncludeDigits,
&config.IncludeSymbols,
)
if err != nil {
return nil, err
}
return config, nil
}

View File

@@ -1,132 +0,0 @@
package repository
import (
"context"
"time"
"github.com/zitadel/zitadel/backend/handler"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/telemetry/tracing"
"github.com/zitadel/zitadel/internal/crypto"
)
type User struct {
ID string
Username string
Email string
}
type UserOptions struct {
cache *UserCache
}
type user struct {
options[UserOptions]
*UserOptions
}
func NewUser(opts ...Option[UserOptions]) *user {
i := new(user)
i.UserOptions = &i.options.custom
for _, opt := range opts {
opt(&i.options)
}
return i
}
func WithUserCache(c *UserCache) Option[UserOptions] {
return func(i *options[UserOptions]) {
i.custom.cache = c
}
}
func (u *user) Create(ctx context.Context, client database.Executor, user *User) (*User, error) {
return tracing.Wrap(u.tracer, "user.Create",
handler.Chain(
handler.Decorate(
execute(client).CreateUser,
tracing.Decorate[*User, *User](u.tracer, tracing.WithSpanName("user.sql.Create")),
),
handler.Decorate(
events(client).CreateUser,
tracing.Decorate[*User, *User](u.tracer, tracing.WithSpanName("user.event.Create")),
),
),
)(ctx, user)
}
func (u *user) ByID(ctx context.Context, client database.Querier, id string) (*User, error) {
return handler.SkipNext(
handler.SkipNilHandler(u.cache,
handler.ResFuncToHandle(u.cache.ByID),
),
handler.Chain(
handler.Decorate(
query(client).UserByID,
tracing.Decorate[string, *User](u.tracer, tracing.WithSpanName("user.sql.ByID")),
),
handler.SkipNilHandler(u.custom.cache, handler.NoReturnToHandle(u.cache.Set)),
),
)(ctx, id)
}
type ChangeEmail struct {
UserID string
Email string
// Opt *ChangeEmailOption
}
// type ChangeEmailOption struct {
// returnCode bool
// isVerified bool
// sendCode bool
// }
// type ChangeEmailVerifiedOption struct {
// isVerified bool
// }
// type ChangeEmailReturnCodeOption struct {
// alg crypto.EncryptionAlgorithm
// }
// type ChangeEmailSendCodeOption struct {
// alg crypto.EncryptionAlgorithm
// urlTemplate string
// }
func (u *user) ChangeEmail(ctx context.Context, client database.Executor, change *ChangeEmail) {
}
type EmailVerificationCode struct {
Code *crypto.CryptoValue
CreatedAt time.Time
Expiry time.Duration
}
func (u *user) EmailVerificationCode(ctx context.Context, client database.Querier, userID string) (*EmailVerificationCode, error) {
return tracing.Wrap(u.tracer, "user.EmailVerificationCode",
handler.Decorate(
query(client).EmailVerificationCode,
tracing.Decorate[string, *EmailVerificationCode](u.tracer, tracing.WithSpanName("user.sql.EmailVerificationCode")),
),
)(ctx, userID)
}
func (u *user) EmailVerificationFailed(ctx context.Context, client database.Executor, code *EmailVerificationCode) error {
_, err := tracing.Wrap(u.tracer, "user.EmailVerificationFailed",
handler.ErrFuncToHandle(execute(client).EmailVerificationFailed),
)(ctx, code)
return err
}
func (u *user) EmailVerificationSucceeded(ctx context.Context, client database.Executor, code *EmailVerificationCode) error {
_, err := tracing.Wrap(u.tracer, "user.EmailVerificationSucceeded",
handler.ErrFuncToHandle(execute(client).EmailVerificationSucceeded),
)(ctx, code)
return err
}

View File

@@ -1,52 +0,0 @@
package repository
import (
"context"
"log"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type UserCache struct {
cache.Cache[UserIndex, string, *User]
}
type UserIndex uint8
var UserIndices = []UserIndex{
UserByIDIndex,
UserByUsernameIndex,
}
const (
UserByIDIndex UserIndex = iota
UserByUsernameIndex
)
var _ cache.Entry[UserIndex, string] = (*User)(nil)
// Keys implements [cache.Entry].
func (u *User) Keys(index UserIndex) (key []string) {
switch index {
case UserByIDIndex:
return []string{u.ID}
case UserByUsernameIndex:
return []string{u.Username}
}
return nil
}
func NewUserCache(c cache.Cache[UserIndex, string, *User]) *UserCache {
return &UserCache{c}
}
func (c *UserCache) ByID(ctx context.Context, id string) *User {
log.Println("cached.user.byID")
user, _ := c.Cache.Get(ctx, UserByIDIndex, id)
return user
}
func (c *UserCache) Set(ctx context.Context, user *User) {
log.Println("cached.user.set")
c.Cache.Set(ctx, user)
}

View File

@@ -1,58 +0,0 @@
package repository
import (
"context"
"errors"
"log"
)
const userByIDQuery = `SELECT id, username FROM users WHERE id = $1`
func (q *querier) UserByID(ctx context.Context, id string) (res *User, err error) {
log.Println("sql.user.byID")
row := q.client.QueryRow(ctx, userByIDQuery, id)
var user User
if err := row.Scan(&user.ID, &user.Username); err != nil {
return nil, err
}
return &user, nil
}
const emailVerificationCodeStmt = `SELECT created_at, expiry,code FROM email_verification_codes WHERE user_id = $1`
func (q *querier) EmailVerificationCode(ctx context.Context, userID string) (res *EmailVerificationCode, err error) {
log.Println("sql.user.emailVerificationCode")
res = new(EmailVerificationCode)
err = q.client.QueryRow(ctx, emailVerificationCodeStmt, userID).
Scan(
&res.CreatedAt,
&res.Expiry,
&res.Code,
)
if err != nil {
return nil, err
}
return res, nil
}
func (e *executor) CreateUser(ctx context.Context, user *User) (res *User, err error) {
log.Println("sql.user.create")
err = e.client.Exec(ctx, "INSERT INTO users (id, username) VALUES ($1, $2)", user.ID, user.Username)
if err != nil {
return nil, err
}
return user, nil
}
func (e *executor) EmailVerificationFailed(ctx context.Context, code *EmailVerificationCode) error {
return errors.New("not implemented")
}
func (e *executor) EmailVerificationSucceeded(ctx context.Context, code *EmailVerificationCode) error {
return errors.New("not implemented")
}
func (e *executor) SetEmail(ctx context.Context, userID, email string) error {
return errors.New("not implemented")
}

View File

@@ -1,15 +0,0 @@
package repository
import (
"context"
"log"
)
func (s *eventStore) CreateUser(ctx context.Context, user *User) (*User, error) {
log.Println("event.user.create")
err := s.es.Push(ctx, user)
if err != nil {
return nil, err
}
return user, nil
}

View File

@@ -1,225 +0,0 @@
DROP TABLE IF EXISTS properties;
DROP TABLE IF EXISTS parents;
DROP TABLE IF EXISTS objects;
CREATE TABLE IF NOT EXISTS objects (
type TEXT NOT NULL
, id TEXT NOT NULL
, PRIMARY KEY (type, id)
);
TRUNCATE objects CASCADE;
INSERT INTO objects VALUES
('instance', 'i1')
, ('organization', 'o1')
, ('user', 'u1')
, ('user', 'u2')
, ('organization', 'o2')
, ('user', 'u3')
, ('project', 'p3')
, ('instance', 'i2')
, ('organization', 'o3')
, ('user', 'u4')
, ('project', 'p1')
, ('project', 'p2')
, ('application', 'a1')
, ('application', 'a2')
, ('org_domain', 'od1')
, ('org_domain', 'od2')
;
CREATE TABLE IF NOT EXISTS parents (
parent_type TEXT NOT NULL
, parent_id TEXT NOT NULL
, child_type TEXT NOT NULL
, child_id TEXT NOT NULL
, PRIMARY KEY (parent_type, parent_id, child_type, child_id)
, FOREIGN KEY (parent_type, parent_id) REFERENCES objects(type, id) ON DELETE CASCADE
, FOREIGN KEY (child_type, child_id) REFERENCES objects(type, id) ON DELETE CASCADE
);
INSERT INTO parents VALUES
('instance', 'i1', 'organization', 'o1')
, ('organization', 'o1', 'user', 'u1')
, ('organization', 'o1', 'user', 'u2')
, ('instance', 'i1', 'organization', 'o2')
, ('organization', 'o2', 'user', 'u3')
, ('organization', 'o2', 'project', 'p3')
, ('instance', 'i2', 'organization', 'o3')
, ('organization', 'o3', 'user', 'u4')
, ('organization', 'o3', 'project', 'p1')
, ('organization', 'o3', 'project', 'p2')
, ('project', 'p1', 'application', 'a1')
, ('project', 'p2', 'application', 'a2')
, ('organization', 'o3', 'org_domain', 'od1')
, ('organization', 'o3', 'org_domain', 'od2')
;
CREATE TABLE properties (
object_type TEXT NOT NULL
, object_id TEXT NOT NULL
, key TEXT NOT NULL
, value JSONB NOT NULL
, should_index BOOLEAN NOT NULL DEFAULT FALSE
, PRIMARY KEY (object_type, object_id, key)
, FOREIGN KEY (object_type, object_id) REFERENCES objects(type, id) ON DELETE CASCADE
);
CREATE INDEX properties_object_indexed ON properties (object_type, object_id) INCLUDE (value) WHERE should_index;
CREATE INDEX properties_value_indexed ON properties (object_type, key, value) WHERE should_index;
TRUNCATE properties;
INSERT INTO properties VALUES
('instance', 'i1', 'name', '"Instance 1"', TRUE)
, ('instance', 'i1', 'description', '"Instance 1 description"', FALSE)
, ('instance', 'i2', 'name', '"Instance 2"', TRUE)
, ('organization', 'o1', 'name', '"Organization 1"', TRUE)
, ('org_domain', 'od1', 'domain', '"example.com"', TRUE)
, ('org_domain', 'od1', 'is_primary', 'true', TRUE)
, ('org_domain', 'od1', 'is_verified', 'true', FALSE)
, ('org_domain', 'od2', 'domain', '"example.org"', TRUE)
, ('org_domain', 'od2', 'is_primary', 'false', TRUE)
, ('org_domain', 'od2', 'is_verified', 'false', FALSE)
;
CREATE TABLE events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid()
, type TEXT NOT NULL
, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
, revision SMALLINT NOT NULL
, creator TEXT NOT NULL
, payload JSONB
, global_sequence NUMERIC NOT NULL DEFAULT pg_current_xact_id()::TEXT::NUMERIC
, sequence_order SMALLINT NOT NULL CHECK (sequence_order >= 0)
-- , object_type TEXT NOT NULL
-- , object_id TEXT NOT NULL
-- , FOREIGN KEY (object_type, object_id) REFERENCES objects(type, id)
);
CREATE TYPE property (
-- key must be a json path
key TEXT
-- value should be a primitive type
, value JSONB
-- indicates wheter the property should be indexed
, should_index BOOLEAN
);
CREATE TYPE parent (
parent_type TEXT
, parent_id TEXT
);
CREATE TYPE object (
type TEXT
, id TEXT
, properties property[]
-- an object automatically inherits the parents of its parent
, parents parent[]
);
CREATE TYPE command (
type TEXT
, revision SMALLINT
, creator TEXT
, payload JSONB
-- if properties is null the objects and all its child objects get deleted
-- if the value of a property is null the property and all sub fields get deleted
-- for example if the key is 'a.b' and the value is null the property 'a.b.c' will be deleted as well
, objects object[]
);
CREATE OR REPLACE PROCEDURE update_object(_object object)
AS $$
DECLARE
_property property;
BEGIN
FOR _property IN ARRAY _object.properties LOOP
IF _property.value IS NULL THEN
DELETE FROM properties
WHERE object_type = _object.type
AND object_id = _object.id
AND key LIKE CONCAT(_property.key, '%');
ELSE
INSERT INTO properties (object_type, object_id, key, value, should_index)
VALUES (_object.type, _object.id, _property.key, _property.value, _property.should_index)
ON CONFLICT (object_type, object_id, key) DO UPDATE SET (value, should_index) = (_property.value, _property.should_index);
END IF;
END LOOP;
END;
CREATE OR REPLACE PROCEDURE delete_object(_type, _id)
AS $$
BEGIN
WITH RECURSIVE objects_to_delete (_type, _id) AS (
SELECT $1, $2
UNION
SELECT p.child_type, p.child_id
FROM parents p
JOIN objects_to_delete o ON p.parent_type = o.type AND p.parent_id = o.id
)
DELETE FROM objects
WHERE (type, id) IN (SELECT * FROM objects_to_delete)
END;
CREATE OR REPLACE FUNCTION push(_commands command[])
RETURNS NUMMERIC AS $$
DECLARE
_command command;
_index INT;
_object object;
BEGIN
FOR _index IN 1..array_length(_commands, 1) LOOP
_command := _commands[_index];
INSERT INTO events (type, revision, creator, payload)
VALUES (_command.type, _command.revision, _command.creator, _command.payload);
FOREACH _object IN ARRAY _command.objects LOOP
IF _object.properties IS NULL THEN
PERFORM delete_object(_object.type, _object.id);
ELSE
PERFORM update_object(_object);
END IF;
END LOOP;
RETURN pg_current_xact_id()::TEXT::NUMERIC;
END;
$$ LANGUAGE plpgsql;
BEGIN;
RETURNING *
;
rollback;
SELECT
*
FROM
properties
WHERE
(object_type, object_id) IN (
SELECT
object_type
, object_id
FROM
properties
where
object_type = 'instance'
and key = 'name'
and value = '"Instance 1"'
and should_index
)
;

View File

@@ -1,310 +0,0 @@
-- postgres
DROP TABLE IF EXISTS properties;
DROP TABLE IF EXISTS parents CASCADE;
DROP TABLE IF EXISTS objects CASCADE;
DROP TABLE IF EXISTS indexed_properties;
DROP TABLE IF EXISTS events;
DROP TABLE IF EXISTS models;
DROP TYPE IF EXISTS object CASCADe;
DROP TYPE IF EXISTS model CASCADE;
CREATE TYPE model AS (
name TEXT
, id TEXT
);
CREATE TYPE object AS (
model TEXT
, model_revision SMALLINT
, id TEXT
, payload JSONB
, parents model[]
);
CREATE TABLE models (
name TEXT
, revision SMALLINT NOT NULL CONSTRAINT positive_revision CHECK (revision > 0)
, indexed_paths TEXT[]
, PRIMARY KEY (name, revision)
);
CREATE TABLE objects (
model TEXT NOT NULL
, model_revision SMALLINT NOT NULL
, id TEXT NOT NULL
, payload JSONB
, PRIMARY KEY (model, id)
, FOREIGN KEY (model, model_revision) REFERENCES models(name, revision) ON DELETE RESTRICT
);
CREATE TABLE indexed_properties (
model TEXT NOT NULL
, model_revision SMALLINT NOT NULL
, object_id TEXT NOT NULL
, path TEXT NOT NULL
, value JSONB
, text_value TEXT
, number_value NUMERIC
, boolean_value BOOLEAN
, PRIMARY KEY (model, object_id, path)
, FOREIGN KEY (model, object_id) REFERENCES objects(model, id) ON DELETE CASCADE
, FOREIGN KEY (model, model_revision) REFERENCES models(name, revision) ON DELETE RESTRICT
);
CREATE OR REPLACE FUNCTION ip_value_converter()
RETURNS TRIGGER AS $$
BEGIN
CASE jsonb_typeof(NEW.value)
WHEN 'boolean' THEN
NEW.boolean_value := NEW.value::BOOLEAN;
NEW.value := NULL;
WHEN 'number' THEN
NEW.number_value := NEW.value::NUMERIC;
NEW.value := NULL;
WHEN 'string' THEN
NEW.text_value := (NEW.value#>>'{}')::TEXT;
NEW.value := NULL;
ELSE
-- do nothing
END CASE;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER ip_value_converter_before_insert
BEFORE INSERT
ON indexed_properties
FOR EACH ROW
EXECUTE FUNCTION ip_value_converter();
CREATE TRIGGER ip_value_converter_before_update
BEFORE UPDATE
ON indexed_properties
FOR EACH ROW
EXECUTE FUNCTION ip_value_converter();
CREATE INDEX ip_search_model_object ON indexed_properties (model, path, value) WHERE value IS NOT NULL;
CREATE INDEX ip_search_model_rev_object ON indexed_properties (model, model_revision, path, value) WHERE value IS NOT NULL;
CREATE INDEX ip_search_model_text ON indexed_properties (model, path, text_value) WHERE text_value IS NOT NULL;
CREATE INDEX ip_search_model_rev_text ON indexed_properties (model, model_revision, path, text_value) WHERE text_value IS NOT NULL;
CREATE INDEX ip_search_model_number ON indexed_properties (model, path, number_value) WHERE number_value IS NOT NULL;
CREATE INDEX ip_search_model_rev_number ON indexed_properties (model, model_revision, path, number_value) WHERE number_value IS NOT NULL;
CREATE INDEX ip_search_model_boolean ON indexed_properties (model, path, boolean_value) WHERE boolean_value IS NOT NULL;
CREATE INDEX ip_search_model_rev_boolean ON indexed_properties (model, model_revision, path, boolean_value) WHERE boolean_value IS NOT NULL;
CREATE TABLE IF NOT EXISTS parents (
parent_model TEXT NOT NULL
, parent_id TEXT NOT NULL
, child_model TEXT NOT NULL
, child_id TEXT NOT NULL
, PRIMARY KEY (parent_model, parent_id, child_model, child_id)
, FOREIGN KEY (parent_model, parent_id) REFERENCES objects(model, id) ON DELETE CASCADE
, FOREIGN KEY (child_model, child_id) REFERENCES objects(model, id) ON DELETE CASCADE
);
INSERT INTO models VALUES
('instance', 1, ARRAY['name', 'domain.name'])
, ('organization', 1, ARRAY['name'])
, ('user', 1, ARRAY['username', 'email', 'firstname', 'lastname'])
;
CREATE OR REPLACE FUNCTION jsonb_to_rows(j jsonb, _path text[] DEFAULT ARRAY[]::text[])
RETURNS TABLE (path text[], value jsonb)
LANGUAGE plpgsql
AS $$
DECLARE
k text;
v jsonb;
BEGIN
FOR k, v IN SELECT * FROM jsonb_each(j) LOOP
IF jsonb_typeof(v) = 'object' THEN
-- Recursive call for nested objects, appending the key to the path
RETURN QUERY SELECT * FROM jsonb_to_rows(v, _path || k)
UNION VALUES (_path, '{}'::JSONB);
ELSE
-- Base case: return the key path and value
CASE WHEN jsonb_typeof(v) = 'null' THEN
RETURN QUERY SELECT _path || k, NULL::jsonb;
ELSE
RETURN QUERY SELECT _path || k, v;
END CASE;
END IF;
END LOOP;
END;
$$;
CREATE OR REPLACE FUNCTION merge_payload(_old JSONB, _new JSONB)
RETURNS JSONB
LANGUAGE plpgsql
AS $$
DECLARE
_fields CURSOR FOR SELECT DISTINCT ON (path)
path
, last_value(value) over (partition by path) as value
FROM (
SELECT path, value FROM jsonb_to_rows(_old)
UNION ALL
SELECT path, value FROM jsonb_to_rows(_new)
);
_path text[];
_value jsonb;
BEGIN
OPEN _fields;
LOOP
FETCH _fields INTO _path, _value;
EXIT WHEN NOT FOUND;
IF jsonb_typeof(_value) = 'object' THEN
IF _old #> _path IS NOT NULL THEN
CONTINUE;
END IF;
_old = jsonb_set_lax(_old, _path, '{}'::jsonb, TRUE);
CONTINUE;
END IF;
_old = jsonb_set_lax(_old, _path, _value, TRUE, 'delete_key');
END LOOP;
RETURN _old;
END;
$$;
CREATE OR REPLACE FUNCTION set_object(_object object)
RETURNS VOID AS $$
DECLARE
_parent model;
BEGIN
INSERT INTO objects (model, model_revision, id, payload)
VALUES (_object.model, _object.model_revision, _object.id, _object.payload)
ON CONFLICT (model, id) DO UPDATE
SET
payload = merge_payload(objects.payload, EXCLUDED.payload)
, model_revision = EXCLUDED.model_revision;
INSERT INTO indexed_properties (model, model_revision, object_id, path, value)
SELECT
*
FROM (
SELECT
_object.model
, _object.model_revision
, _object.id
, UNNEST(m.indexed_paths) AS "path"
, _object.payload #> string_to_array(UNNEST(m.indexed_paths), '.') AS "value"
FROM
models m
WHERE
m.name = _object.model
AND m.revision = _object.model_revision
GROUP BY
m.name
, m.revision
)
WHERE
"value" IS NOT NULL
ON CONFLICT (model, object_id, path) DO UPDATE
SET
value = EXCLUDED.value
, text_value = EXCLUDED.text_value
, number_value = EXCLUDED.number_value
, boolean_value = EXCLUDED.boolean_value
;
INSERT INTO parents (parent_model, parent_id, child_model, child_id)
VALUES
(_object.model, _object.id, _object.model, _object.id)
ON CONFLICT (parent_model, parent_id, child_model, child_id) DO NOTHING;
IF _object.parents IS NULL THEN
RETURN;
END IF;
FOREACH _parent IN ARRAY _object.parents
LOOP
INSERT INTO parents (parent_model, parent_id, child_model, child_id)
SELECT
p.parent_model
, p.parent_id
, _object.model
, _object.id
FROM parents p
WHERE
p.child_model = _parent.name
AND p.child_id = _parent.id
ON CONFLICT (parent_model, parent_id, child_model, child_id) DO NOTHING
;
INSERT INTO parents (parent_model, parent_id, child_model, child_id)
VALUES
(_parent.name, _parent.id, _object.model, _object.id)
ON CONFLICT (parent_model, parent_id, child_model, child_id) DO NOTHING;
END LOOP;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION set_objects(_objects object[])
RETURNS VOID AS $$
DECLARE
_object object;
BEGIN
FOREACH _object IN ARRAY _objects
LOOP
PERFORM set_object(_object);
END LOOP;
END;
$$ LANGUAGE plpgsql;
-- CREATE OR REPLACE FUNCTION set_objects(VARIADIC _objects object[])
-- RETURNS VOID AS $$
-- BEGIN
-- PERFORM set_objectS(_objects);
-- END;
-- $$ LANGUAGE plpgsql;
SELECT set_objects(
ARRAY[
ROW('instance', 1::smallint, 'i1', '{"name": "i1", "domain": {"name": "example2.com", "isVerified": false}}', NULL)::object
, ROW('organization', 1::smallint, 'o1', '{"name": "o1", "description": "something useful"}', ARRAY[
ROW('instance', 'i1')::model
])::object
, ROW('user', 1::smallint, 'u1', '{"username": "u1", "description": "something useful", "firstname": "Silvan"}', ARRAY[
ROW('instance', 'i1')::model
, ROW('organization', 'o1')::model
])::object
]
);
SELECT set_objects(
ARRAY[
ROW('instance', 1::smallint, 'i1', '{"domain": {"isVerified": true}}', NULL)::object
]
);
SELECT
o.*
FROM
indexed_properties ip
JOIN
objects o
ON
ip.model = o.model
AND ip.object_id = o.id
WHERE
ip.model = 'instance'
AND ip.path = 'name'
AND ip.text_value = 'i1';
;
select * from merge_payload(
'{"a": "asdf", "b": {"c":{"d": 1, "g": {"h": [4,5,6]}}}, "f": [1,2,3]}'::jsonb
, '{"b": {"c":{"d": 1, "g": {"i": [4,5,6]}}}, "a": null}'::jsonb
);

View File

@@ -1,272 +0,0 @@
-- postgres
DROP TABLE IF EXISTS properties;
DROP TABLE IF EXISTS parents CASCADE;
DROP TABLE IF EXISTS objects CASCADE;
DROP TABLE IF EXISTS indexed_properties;
DROP TABLE IF EXISTS events;
DROP TABLE IF EXISTS models;
DROP TYPE IF EXISTS object CASCADe;
DROP TYPE IF EXISTS model CASCADE;
CREATE TYPE model AS (
name TEXT
, id TEXT
);
CREATE TYPE object AS (
model TEXT
, model_revision SMALLINT
, id TEXT
, payload JSONB
, parents model[]
);
CREATE TABLE models (
name TEXT
, revision SMALLINT NOT NULL CONSTRAINT positive_revision CHECK (revision > 0)
, indexed_paths TEXT[]
, PRIMARY KEY (name, revision)
);
CREATE TABLE objects (
model TEXT NOT NULL
, model_revision SMALLINT NOT NULL
, id TEXT NOT NULL
, payload JSONB
, PRIMARY KEY (model, id)
, FOREIGN KEY (model, model_revision) REFERENCES models(name, revision) ON DELETE RESTRICT
);
CREATE TABLE indexed_properties (
model TEXT NOT NULL
, model_revision SMALLINT NOT NULL
, object_id TEXT NOT NULL
, path TEXT[] NOT NULL
, value JSONB
, text_value TEXT
, number_value NUMERIC
, boolean_value BOOLEAN
, PRIMARY KEY (model, object_id, path)
, FOREIGN KEY (model, object_id) REFERENCES objects(model, id) ON DELETE CASCADE
, FOREIGN KEY (model, model_revision) REFERENCES models(name, revision) ON DELETE RESTRICT
);
CREATE TABLE IF NOT EXISTS parents (
parent_model TEXT NOT NULL
, parent_id TEXT NOT NULL
, child_model TEXT NOT NULL
, child_id TEXT NOT NULL
, PRIMARY KEY (parent_model, parent_id, child_model, child_id)
, FOREIGN KEY (parent_model, parent_id) REFERENCES objects(model, id) ON DELETE CASCADE
, FOREIGN KEY (child_model, child_id) REFERENCES objects(model, id) ON DELETE CASCADE
);
CREATE OR REPLACE FUNCTION jsonb_to_rows(j jsonb, _path text[] DEFAULT ARRAY[]::text[])
RETURNS TABLE (path text[], value jsonb)
LANGUAGE plpgsql
AS $$
DECLARE
k text;
v jsonb;
BEGIN
FOR k, v IN SELECT * FROM jsonb_each(j) LOOP
IF jsonb_typeof(v) = 'object' THEN
-- Recursive call for nested objects, appending the key to the path
RETURN QUERY SELECT * FROM jsonb_to_rows(v, _path || k);
ELSE
-- Base case: return the key path and value
CASE WHEN jsonb_typeof(v) = 'null' THEN
RETURN QUERY SELECT _path || k, NULL::jsonb;
ELSE
RETURN QUERY SELECT _path || k, v;
END CASE;
END IF;
END LOOP;
END;
$$;
-- after insert trigger which is called after the object was inserted and then inserts the properties
CREATE OR REPLACE FUNCTION set_ip_from_object_insert()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
_property RECORD;
_model models;
BEGIN
SELECT * INTO _model FROM models WHERE name = NEW.model AND revision = NEW.model_revision;
FOR _property IN SELECT * FROM jsonb_to_rows(NEW.payload) LOOP
IF ARRAY_TO_STRING(_property.path, '.') = ANY(_model.indexed_paths) THEN
INSERT INTO indexed_properties (model, model_revision, object_id, path, value)
VALUES (NEW.model, NEW.model_revision, NEW.id, _property.path, _property.value);
END IF;
END LOOP;
RETURN NULL;
END;
$$;
CREATE TRIGGER set_ip_from_object_insert
AFTER INSERT ON objects
FOR EACH ROW
EXECUTE FUNCTION set_ip_from_object_insert();
-- before update trigger with is called before an object is updated
-- it updates the properties table first
-- and computes the correct payload for the object
-- partial update of the object is allowed
-- if the value of a property is set to null the properties and all its children are deleted
CREATE OR REPLACE FUNCTION set_ip_from_object_update()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
_property RECORD;
_payload JSONB;
_model models;
_path_index INT;
BEGIN
_payload := OLD.payload;
SELECT * INTO _model FROM models WHERE name = NEW.model AND revision = NEW.model_revision;
FOR _property IN SELECT * FROM jsonb_to_rows(NEW.payload) ORDER BY array_length(path, 1) LOOP
-- set the properties
CASE WHEN _property.value IS NULL THEN
RAISE NOTICE 'DELETE PROPERTY: %', _property;
DELETE FROM indexed_properties
WHERE model = NEW.model
AND model_revision = NEW.model_revision
AND object_id = NEW.id
AND path[:ARRAY_LENGTH(_property.path, 1)] = _property.path;
ELSE
RAISE NOTICE 'UPSERT PROPERTY: %', _property;
DELETE FROM indexed_properties
WHERE
model = NEW.model
AND model_revision = NEW.model_revision
AND object_id = NEW.id
AND (
_property.path[:array_length(path, 1)] = path
OR path[:array_length(_property.path, 1)] = _property.path
)
AND array_length(path, 1) <> array_length(_property.path, 1);
-- insert property if should be indexed
IF ARRAY_TO_STRING(_property.path, '.') = ANY(_model.indexed_paths) THEN
RAISE NOTICE 'path should be indexed: %', _property.path;
INSERT INTO indexed_properties (model, model_revision, object_id, path, value)
VALUES (NEW.model, NEW.model_revision, NEW.id, _property.path, _property.value)
ON CONFLICT (model, object_id, path) DO UPDATE
SET value = EXCLUDED.value;
END IF;
END CASE;
-- if property is updated we can set it directly
IF _payload #> _property.path IS NOT NULL THEN
_payload = jsonb_set_lax(COALESCE(_payload, '{}'::JSONB), _property.path, _property.value, TRUE);
EXIT;
END IF;
-- ensure parent object exists exists
FOR _path_index IN 1..(array_length(_property.path, 1)-1) LOOP
IF _payload #> _property.path[:_path_index] IS NOT NULL AND jsonb_typeof(_payload #> _property.path[:_path_index]) = 'object' THEN
CONTINUE;
END IF;
_payload = jsonb_set(_payload, _property.path[:_path_index], '{}'::JSONB, TRUE);
EXIT;
END LOOP;
_payload = jsonb_set_lax(_payload, _property.path, _property.value, TRUE, 'delete_key');
END LOOP;
-- update the payload
NEW.payload = _payload;
RETURN NEW;
END;
$$;
CREATE OR REPLACE TRIGGER set_ip_from_object_update
BEFORE UPDATE ON objects
FOR EACH ROW
EXECUTE FUNCTION set_ip_from_object_update();
CREATE OR REPLACE FUNCTION set_object(_object object)
RETURNS VOID
LANGUAGE plpgsql
AS $$
BEGIN
INSERT INTO objects (model, model_revision, id, payload)
VALUES (_object.model, _object.model_revision, _object.id, _object.payload)
ON CONFLICT (model, id) DO UPDATE
SET
payload = EXCLUDED.payload
, model_revision = EXCLUDED.model_revision
;
INSERT INTO parents (parent_model, parent_id, child_model, child_id)
SELECT
p.name
, p.id
, _object.model
, _object.id
FROM UNNEST(_object.parents) AS p
ON CONFLICT DO NOTHING;
END;
$$;
CREATE OR REPLACE FUNCTION set_objects(_objects object[])
RETURNS VOID
LANGUAGE plpgsql
AS $$
DECLARE
_object object;
BEGIN
FOREACH _object IN ARRAY _objects LOOP
PERFORM set_object(_object);
END LOOP;
END;
$$;
INSERT INTO models VALUES
('instance', 1, ARRAY['name', 'domain.name'])
, ('organization', 1, ARRAY['name'])
, ('user', 1, ARRAY['username', 'email', 'firstname', 'lastname'])
;
INSERT INTO objects VALUES
('instance', 1, 'i2', '{"name": "i2", "domain": {"name": "example2.com", "isVerified": false}}')
, ('instance', 1, 'i3', '{"name": "i3", "domain": {"name": "example3.com", "isVerified": false}}')
, ('instance', 1, 'i4', '{"name": "i4", "domain": {"name": "example4.com", "isVerified": false}}')
;
begin;
UPDATE objects SET payload = '{"domain": {"isVerified": true}}' WHERE model = 'instance';
rollback;
SELECT set_objects(
ARRAY[
ROW('instance', 1::smallint, 'i1', '{"name": "i1", "domain": {"name": "example2.com", "isVerified": false}}', NULL)::object
, ROW('organization', 1::smallint, 'o1', '{"name": "o1", "description": "something useful"}', ARRAY[
ROW('instance', 'i1')::model
])::object
, ROW('user', 1::smallint, 'u1', '{"username": "u1", "description": "something useful", "firstname": "Silvan"}', ARRAY[
ROW('instance', 'i1')::model
, ROW('organization', 'o1')::model
])::object
]
);

View File

@@ -1,280 +0,0 @@
-- postgres
DROP TABLE IF EXISTS properties;
DROP TABLE IF EXISTS parents CASCADE;
DROP TABLE IF EXISTS objects CASCADE;
DROP TABLE IF EXISTS indexed_properties;
DROP TABLE IF EXISTS events;
DROP TABLE IF EXISTS models;
DROP TYPE IF EXISTS object CASCADe;
DROP TYPE IF EXISTS model CASCADE;
CREATE TABLE models (
name TEXT
, revision SMALLINT NOT NULL CONSTRAINT positive_revision CHECK (revision > 0)
, indexed_paths TEXT[]
, PRIMARY KEY (name, revision)
);
CREATE TABLE objects (
model TEXT NOT NULL
, model_revision SMALLINT NOT NULL
, id TEXT NOT NULL
, payload JSONB
, PRIMARY KEY (model, id)
, FOREIGN KEY (model, model_revision) REFERENCES models(name, revision) ON DELETE RESTRICT
);
CREATE TYPE operation_type AS ENUM (
-- inserts a new object, if the object already exists the operation will fail
-- path is ignored
'create'
-- if path is null an upsert is performed and the payload is overwritten
-- if path is not null the value is set at the given path
, 'set'
-- drops an object if path is null
-- if path is set but no value, the field at the given path is dropped
-- if path and value are set and the field is an array the value is removed from the array
, 'delete'
-- adds a value to an array
-- or a field if it does not exist, if the field exists the operation will fail
, 'add'
);
CREATE TYPE object_manipulation AS (
path TEXT[]
, operation operation_type
, value JSONB
);
CREATE TABLE IF NOT EXISTS parents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid()
, parent_model TEXT NOT NULL
, parent_id TEXT NOT NULL
, child_model TEXT NOT NULL
, child_id TEXT NOT NULL
, FOREIGN KEY (parent_model, parent_id) REFERENCES objects(model, id) ON DELETE CASCADE
, FOREIGN KEY (child_model, child_id) REFERENCES objects(model, id) ON DELETE CASCADE
);
CREATE TYPE parent_operation AS ENUM (
'add'
, 'remove'
);
CREATE TYPE parent_manipulation AS (
model TEXT
, id TEXT
, operation parent_operation
);
CREATE OR REPLACE FUNCTION jsonb_set_ensure_path(_jsonb JSONB, _path TEXT[], _value JSONB)
RETURNS JSONB
LANGUAGE plpgsql
AS $$
DECLARE
i INT;
BEGIN
IF _jsonb #> _path IS NOT NULL THEN
RETURN JSONB_SET(_jsonb, _path, _value);
END IF;
FOR i IN REVERSE ARRAY_LENGTH(_path, 1)..1 LOOP
_value := JSONB_BUILD_OBJECT(_path[i], _value);
IF _jsonb #> _path[:i] IS NOT NULL THEN
EXIT;
END IF;
END LOOP;
RETURN _jsonb || _value;
END;
$$;
-- current: {}
-- '{"a": {"b": {"c": {"d": {"e": 1}}}}}'::JSONB #> '{a,b,c}' = 1
drop function manipulate_object;
drop function object_set;
CREATE OR REPLACE FUNCTION object_set(
_model TEXT
, _model_revision SMALLINT
, _id TEXT
, _manipulations object_manipulation[]
, _parents parent_manipulation[]
)
RETURNS objects
LANGUAGE plpgsql
AS $$
DECLARE
_manipulation object_manipulation;
BEGIN
FOREACH _manipulation IN ARRAY _manipulations LOOP
CASE _manipulation.operation
WHEN 'create' THEN
INSERT INTO objects (model, model_revision, id, payload)
VALUES (_model, _model_revision, _id, _manipulation.value);
WHEN 'delete' THEN
CASE
WHEN _manipulation.path IS NULL THEN
DELETE FROM objects
WHERE
model = _model
AND model_revision = _model_revision
AND id = _id;
WHEN _manipulation.value IS NULL THEN
UPDATE
objects
SET
payload = payload #- _manipulation.path
WHERE
model = _model
AND model_revision = _model_revision
AND id = _id;
ELSE
UPDATE
objects
SET
payload = jsonb_set(payload, _manipulation.path, (SELECT JSONB_AGG(v) FROM JSONB_PATH_QUERY(payload, ('$.' || ARRAY_TO_STRING(_manipulation.path, '.') || '[*]')::jsonpath) AS v WHERE v <> _manipulation.value))
WHERE
model = _model
AND model_revision = _model_revision
AND id = _id;
END CASE;
WHEN 'set' THEN
IF _manipulation.path IS NULL THEN
INSERT INTO objects (model, model_revision, id, payload)
VALUES (_model, _model_revision, _id, _manipulation.value)
ON CONFLICT (model, model_revision, id)
DO UPDATE SET payload = _manipulation.value;
ELSE
UPDATE
objects
SET
payload = jsonb_set_ensure_path(payload, _manipulation.path, _manipulation.value)
WHERE
model = _model
AND model_revision = _model_revision
AND id = _id;
END IF;
WHEN 'add' THEN
UPDATE
objects
SET
-- TODO: parent field must exist
payload = CASE
WHEN jsonb_typeof(payload #> _manipulation.path) IS NULL THEN
jsonb_set_ensure_path(payload, _manipulation.path, _manipulation.value)
WHEN jsonb_typeof(payload #> _manipulation.path) = 'array' THEN
jsonb_set(payload, _manipulation.path, COALESCE(payload #> _manipulation.path, '[]'::JSONB) || _manipulation.value)
-- ELSE
-- RAISE EXCEPTION 'Field at path % is not an array', _manipulation.path;
END
WHERE
model = _model
AND model_revision = _model_revision
AND id = _id;
-- TODO: RAISE EXCEPTION 'Field at path % is not an array', _manipulation.path;
END CASE;
END LOOP;
FOREACH _parent IN ARRAY _parents LOOP
CASE _parent.operation
WHEN 'add' THEN
-- insert the new parent and all its parents
INSERT INTO parents (parent_model, parent_id, child_model, child_id)
(
SELECT
id
FROM parents p
WHERE
p.child_model = _parent.model
AND p.child_id = _parent.id
UNION
SELECT
_parent.model
, _parent.id
, _model
, _id
)
ON CONFLICT (parent_model, parent_id, child_model, child_id) DO NOTHING;
WHEN 'remove' THEN
-- remove the parent including the objects childs parent
DELETE FROM parents
WHERE id IN (
SELECT
id
FROM
parents p
WHERE
p.child_model = _model
AND p.child_id = _id
AND p.parent_model = _parent.model
AND p.parent_id = _parent.id
UNION
SELECT
id
FROM (
SELECT
id
FROM
parents p
WHERE
p.parent_model = _model
AND p.parent_id = _id
)
WHERE
);
END CASE;
END LOOP;
RETURN NULL;
END;
$$;
INSERT INTO models VALUES
('instance', 1, ARRAY['name', 'domain.name'])
, ('organization', 1, ARRAY['name'])
, ('user', 1, ARRAY['username', 'email', 'firstname', 'lastname'])
;
rollback;
BEGIN;
SELECT * FROM manipulate_object(
'instance'
, 1::SMALLINT
, 'i1'
, ARRAY[
ROW(NULL, 'create', '{"name": "i1"}'::JSONB)::object_manipulation
, ROW(ARRAY['domain'], 'set', '{"name": "example.com", "isVerified": false}'::JSONB)::object_manipulation
, ROW(ARRAY['domain', 'isVerified'], 'set', 'true'::JSONB)::object_manipulation
, ROW(ARRAY['domain', 'name'], 'delete', NULL)::object_manipulation
, ROW(ARRAY['domain', 'name'], 'add', '"i1.com"')::object_manipulation
, ROW(ARRAY['managers'], 'set', '[]'::JSONB)::object_manipulation
, ROW(ARRAY['managers', 'objects'], 'add', '[{"a": "asdf"}, {"a": "qewr"}]'::JSONB)::object_manipulation
, ROW(ARRAY['managers', 'objects'], 'delete', '{"a": "asdf"}'::JSONB)::object_manipulation
, ROW(ARRAY['some', 'objects'], 'set', '{"a": "asdf"}'::JSONB)::object_manipulation
-- , ROW(NULL, 'delete', NULL)::object_manipulation
]
);
select * from objects;
ROLLBACK;
BEGIN;
SELECT * FROM manipulate_object(
'instance'
, 1::SMALLINT
, 'i1'
, ARRAY[
ROW(NULL, 'create', '{"name": "i1"}'::JSONB)::object_manipulation
, ROW(ARRAY['domain', 'name'], 'set', '"example.com"'::JSONB)::object_manipulation
]
);
select * from objects;
ROLLBACK;
select jsonb_path_query_array('{"a": [12, 13, 14, 15]}'::JSONB, ('$.a ? (@ != $val)')::jsonpath, jsonb_build_object('val', '12'));

View File

@@ -1,62 +0,0 @@
CREATE TABLE IF NOT EXISTS aggregates(
id TEXT NOT NULL
, type TEXT NOT NULL
, instance_id TEXT NOT NULL
, current_sequence INT NOT NULL DEFAULT 0
, PRIMARY KEY (instance_id, type, id)
);
CREATE TABLE IF NOT EXISTS events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid()
-- object type that the event is related to
, aggregate TEXT NOT NULL
-- id of the object that the event is related to
, aggregate_id TEXT NOT NULL
, instance_id TEXT NOT NULL
-- time the event was created
, created_at TIMESTAMPTZ NOT NULL DEFAULT now()
-- user that created the event
, creator TEXT
-- type of the event
, type TEXT NOT NULL
-- version of the event
, revision SMALLINT NOT NULL
-- changed fields or NULL
, payload JSONB
, position NUMERIC NOT NULL DEFAULT pg_current_xact_id()::TEXT::NUMERIC
, in_position_order INT2 NOT NULL
, FOREIGN KEY (instance_id, aggregate, aggregate_id) REFERENCES aggregates(instance_id, type, id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS instances(
id TEXT
, name TEXT NOT NULL
, created_at TIMESTAMPTZ NOT NULL
, updated_at TIMESTAMPTZ NOT NULL
, default_org_id TEXT
, iam_project_id TEXT
, console_client_id TEXT
, console_app_id TEXT
, default_language VARCHAR(10)
, PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS instance_domains(
instance_id TEXT NOT NULL
, domain TEXT NOT NULL
, is_primary BOOLEAN NOT NULL DEFAULT FALSE
, is_verified BOOLEAN NOT NULL DEFAULT FALSE
, PRIMARY KEY (instance_id, domain)
, FOREIGN KEY (instance_id) REFERENCES instances(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS instance_domain_search_idx ON instance_domains (domain);

View File

@@ -1,112 +0,0 @@
// Package cache provides abstraction of cache implementations that can be used by zitadel.
package cache
import (
"context"
"time"
"github.com/zitadel/logging"
)
// Purpose describes which object types are stored by a cache.
type Purpose int
//go:generate enumer -type Purpose -transform snake -trimprefix Purpose
const (
PurposeUnspecified Purpose = iota
PurposeAuthzInstance
PurposeMilestones
PurposeOrganization
PurposeIdPFormCallback
)
// Cache stores objects with a value of type `V`.
// Objects may be referred to by one or more indices.
// Implementations may encode the value for storage.
// This means non-exported fields may be lost and objects
// with function values may fail to encode.
// See https://pkg.go.dev/encoding/json#Marshal for example.
//
// `I` is the type by which indices are identified,
// typically an enum for type-safe access.
// Indices are defined when calling the constructor of an implementation of this interface.
// It is illegal to refer to an idex not defined during construction.
//
// `K` is the type used as key in each index.
// Due to the limitations in type constraints, all indices use the same key type.
//
// Implementations are free to use stricter type constraints or fixed typing.
type Cache[I, K comparable, V Entry[I, K]] interface {
// Get an object through specified index.
// An [IndexUnknownError] may be returned if the index is unknown.
// [ErrCacheMiss] is returned if the key was not found in the index,
// or the object is not valid.
Get(ctx context.Context, index I, key K) (V, bool)
// Set an object.
// Keys are created on each index based in the [Entry.Keys] method.
// If any key maps to an existing object, the object is invalidated,
// regardless if the object has other keys defined in the new entry.
// This to prevent ghost objects when an entry reduces the amount of keys
// for a given index.
Set(ctx context.Context, value V)
// Invalidate an object through specified index.
// Implementations may choose to instantly delete the object,
// defer until prune or a separate cleanup routine.
// Invalidated object are no longer returned from Get.
// It is safe to call Invalidate multiple times or on non-existing entries.
Invalidate(ctx context.Context, index I, key ...K) error
// Delete one or more keys from a specific index.
// An [IndexUnknownError] may be returned if the index is unknown.
// The referred object is not invalidated and may still be accessible though
// other indices and keys.
// It is safe to call Delete multiple times or on non-existing entries
Delete(ctx context.Context, index I, key ...K) error
// Truncate deletes all cached objects.
Truncate(ctx context.Context) error
}
// Entry contains a value of type `V` to be cached.
//
// `I` is the type by which indices are identified,
// typically an enum for type-safe access.
//
// `K` is the type used as key in an index.
// Due to the limitations in type constraints, all indices use the same key type.
type Entry[I, K comparable] interface {
// Keys returns which keys map to the object in a specified index.
// May return nil if the index in unknown or when there are no keys.
Keys(index I) (key []K)
}
type Connector int
//go:generate enumer -type Connector -transform snake -trimprefix Connector -linecomment -text
const (
// Empty line comment ensures empty string for unspecified value
ConnectorUnspecified Connector = iota //
ConnectorMemory
ConnectorPostgres
ConnectorRedis
)
type Config struct {
Connector Connector
// Age since an object was added to the cache,
// after which the object is considered invalid.
// 0 disables max age checks.
MaxAge time.Duration
// Age since last use (Get) of an object,
// after which the object is considered invalid.
// 0 disables last use age checks.
LastUseAge time.Duration
// Log allows logging of the specific cache.
// By default only errors are logged to stdout.
Log *logging.Config
}

View File

@@ -1,49 +0,0 @@
// Package connector provides glue between the [cache.Cache] interface and implementations from the connector sub-packages.
package connector
import (
"context"
"fmt"
"github.com/zitadel/zitadel/backend/storage/cache"
"github.com/zitadel/zitadel/backend/storage/cache/connector/gomap"
"github.com/zitadel/zitadel/backend/storage/cache/connector/noop"
)
type CachesConfig struct {
Connectors struct {
Memory gomap.Config
}
Instance *cache.Config
Milestones *cache.Config
Organization *cache.Config
IdPFormCallbacks *cache.Config
}
type Connectors struct {
Config CachesConfig
Memory *gomap.Connector
}
func StartConnectors(conf *CachesConfig) (Connectors, error) {
if conf == nil {
return Connectors{}, nil
}
return Connectors{
Config: *conf,
Memory: gomap.NewConnector(conf.Connectors.Memory),
}, nil
}
func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Context, indices []I, purpose cache.Purpose, conf *cache.Config, connectors Connectors) (cache.Cache[I, K, V], error) {
if conf == nil || conf.Connector == cache.ConnectorUnspecified {
return noop.NewCache[I, K, V](), nil
}
if conf.Connector == cache.ConnectorMemory && connectors.Memory != nil {
c := gomap.NewCache[I, K, V](background, indices, *conf)
connectors.Memory.Config.StartAutoPrune(background, c, purpose)
return c, nil
}
return nil, fmt.Errorf("cache connector %q not enabled", conf.Connector)
}

View File

@@ -1,23 +0,0 @@
package gomap
import (
"github.com/zitadel/zitadel/backend/storage/cache"
)
type Config struct {
Enabled bool
AutoPrune cache.AutoPruneConfig
}
type Connector struct {
Config cache.AutoPruneConfig
}
func NewConnector(config Config) *Connector {
if !config.Enabled {
return nil
}
return &Connector{
Config: config.AutoPrune,
}
}

View File

@@ -1,200 +0,0 @@
package gomap
import (
"context"
"errors"
"log/slog"
"maps"
"os"
"sync"
"sync/atomic"
"time"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type mapCache[I, K comparable, V cache.Entry[I, K]] struct {
config *cache.Config
indexMap map[I]*index[K, V]
logger *slog.Logger
}
// NewCache returns an in-memory Cache implementation based on the builtin go map type.
// Object values are stored as-is and there is no encoding or decoding involved.
func NewCache[I, K comparable, V cache.Entry[I, K]](background context.Context, indices []I, config cache.Config) cache.PrunerCache[I, K, V] {
m := &mapCache[I, K, V]{
config: &config,
indexMap: make(map[I]*index[K, V], len(indices)),
logger: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
AddSource: true,
Level: slog.LevelError,
})),
}
if config.Log != nil {
m.logger = config.Log.Slog()
}
m.logger.InfoContext(background, "map cache logging enabled")
for _, name := range indices {
m.indexMap[name] = &index[K, V]{
config: m.config,
entries: make(map[K]*entry[V]),
}
}
return m
}
func (c *mapCache[I, K, V]) Get(ctx context.Context, index I, key K) (value V, ok bool) {
i, ok := c.indexMap[index]
if !ok {
c.logger.ErrorContext(ctx, "map cache get", "err", cache.NewIndexUnknownErr(index), "index", index, "key", key)
return value, false
}
entry, err := i.Get(key)
if err == nil {
c.logger.DebugContext(ctx, "map cache get", "index", index, "key", key)
return entry.value, true
}
if errors.Is(err, cache.ErrCacheMiss) {
c.logger.InfoContext(ctx, "map cache get", "err", err, "index", index, "key", key)
return value, false
}
c.logger.ErrorContext(ctx, "map cache get", "err", cache.NewIndexUnknownErr(index), "index", index, "key", key)
return value, false
}
func (c *mapCache[I, K, V]) Set(ctx context.Context, value V) {
now := time.Now()
entry := &entry[V]{
value: value,
created: now,
}
entry.lastUse.Store(now.UnixMicro())
for name, i := range c.indexMap {
keys := value.Keys(name)
i.Set(keys, entry)
c.logger.DebugContext(ctx, "map cache set", "index", name, "keys", keys)
}
}
func (c *mapCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) error {
i, ok := c.indexMap[index]
if !ok {
return cache.NewIndexUnknownErr(index)
}
i.Invalidate(keys)
c.logger.DebugContext(ctx, "map cache invalidate", "index", index, "keys", keys)
return nil
}
func (c *mapCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) error {
i, ok := c.indexMap[index]
if !ok {
return cache.NewIndexUnknownErr(index)
}
i.Delete(keys)
c.logger.DebugContext(ctx, "map cache delete", "index", index, "keys", keys)
return nil
}
func (c *mapCache[I, K, V]) Prune(ctx context.Context) error {
for name, index := range c.indexMap {
index.Prune()
c.logger.DebugContext(ctx, "map cache prune", "index", name)
}
return nil
}
func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
for name, index := range c.indexMap {
index.Truncate()
c.logger.DebugContext(ctx, "map cache truncate", "index", name)
}
return nil
}
type index[K comparable, V any] struct {
mutex sync.RWMutex
config *cache.Config
entries map[K]*entry[V]
}
func (i *index[K, V]) Get(key K) (*entry[V], error) {
i.mutex.RLock()
entry, ok := i.entries[key]
i.mutex.RUnlock()
if ok && entry.isValid(i.config) {
return entry, nil
}
return nil, cache.ErrCacheMiss
}
func (c *index[K, V]) Set(keys []K, entry *entry[V]) {
c.mutex.Lock()
for _, key := range keys {
c.entries[key] = entry
}
c.mutex.Unlock()
}
func (i *index[K, V]) Invalidate(keys []K) {
i.mutex.RLock()
for _, key := range keys {
if entry, ok := i.entries[key]; ok {
entry.invalid.Store(true)
}
}
i.mutex.RUnlock()
}
func (c *index[K, V]) Delete(keys []K) {
c.mutex.Lock()
for _, key := range keys {
delete(c.entries, key)
}
c.mutex.Unlock()
}
func (c *index[K, V]) Prune() {
c.mutex.Lock()
maps.DeleteFunc(c.entries, func(_ K, entry *entry[V]) bool {
return !entry.isValid(c.config)
})
c.mutex.Unlock()
}
func (c *index[K, V]) Truncate() {
c.mutex.Lock()
c.entries = make(map[K]*entry[V])
c.mutex.Unlock()
}
type entry[V any] struct {
value V
created time.Time
invalid atomic.Bool
lastUse atomic.Int64 // UnixMicro time
}
func (e *entry[V]) isValid(c *cache.Config) bool {
if e.invalid.Load() {
return false
}
now := time.Now()
if c.MaxAge > 0 {
if e.created.Add(c.MaxAge).Before(now) {
e.invalid.Store(true)
return false
}
}
if c.LastUseAge > 0 {
lastUse := e.lastUse.Load()
if time.UnixMicro(lastUse).Add(c.LastUseAge).Before(now) {
e.invalid.Store(true)
return false
}
e.lastUse.CompareAndSwap(lastUse, now.UnixMicro())
}
return true
}

View File

@@ -1,329 +0,0 @@
package gomap
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type testIndex int
const (
testIndexID testIndex = iota
testIndexName
)
var testIndices = []testIndex{
testIndexID,
testIndexName,
}
type testObject struct {
id string
names []string
}
func (o *testObject) Keys(index testIndex) []string {
switch index {
case testIndexID:
return []string{o.id}
case testIndexName:
return o.names
default:
return nil
}
}
func Test_mapCache_Get(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
type args struct {
index testIndex
key string
}
tests := []struct {
name string
args args
want *testObject
wantOk bool
}{
{
name: "ok",
args: args{
index: testIndexID,
key: "id",
},
want: obj,
wantOk: true,
},
{
name: "miss",
args: args{
index: testIndexID,
key: "spanac",
},
want: nil,
wantOk: false,
},
{
name: "unknown index",
args: args{
index: 99,
key: "id",
},
want: nil,
wantOk: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, ok := c.Get(context.Background(), tt.args.index, tt.args.key)
assert.Equal(t, tt.want, got)
assert.Equal(t, tt.wantOk, ok)
})
}
}
func Test_mapCache_Invalidate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
err := c.Invalidate(context.Background(), testIndexName, "bar")
require.NoError(t, err)
got, ok := c.Get(context.Background(), testIndexID, "id")
assert.Nil(t, got)
assert.False(t, ok)
}
func Test_mapCache_Delete(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
}
c.Set(context.Background(), obj)
err := c.Delete(context.Background(), testIndexName, "bar")
require.NoError(t, err)
// Shouldn't find object by deleted name
got, ok := c.Get(context.Background(), testIndexName, "bar")
assert.Nil(t, got)
assert.False(t, ok)
// Should find object by other name
got, ok = c.Get(context.Background(), testIndexName, "foo")
assert.Equal(t, obj, got)
assert.True(t, ok)
// Should find object by id
got, ok = c.Get(context.Background(), testIndexID, "id")
assert.Equal(t, obj, got)
assert.True(t, ok)
}
func Test_mapCache_Prune(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
objects := []*testObject{
{
id: "id1",
names: []string{"foo", "bar"},
},
{
id: "id2",
names: []string{"hello"},
},
}
for _, obj := range objects {
c.Set(context.Background(), obj)
}
// invalidate one entry
err := c.Invalidate(context.Background(), testIndexName, "bar")
require.NoError(t, err)
err = c.(cache.Pruner).Prune(context.Background())
require.NoError(t, err)
// Other object should still be found
got, ok := c.Get(context.Background(), testIndexID, "id2")
assert.Equal(t, objects[1], got)
assert.True(t, ok)
}
func Test_mapCache_Truncate(t *testing.T) {
c := NewCache[testIndex, string, *testObject](context.Background(), testIndices, cache.Config{
MaxAge: time.Second,
LastUseAge: time.Second / 4,
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
})
objects := []*testObject{
{
id: "id1",
names: []string{"foo", "bar"},
},
{
id: "id2",
names: []string{"hello"},
},
}
for _, obj := range objects {
c.Set(context.Background(), obj)
}
err := c.Truncate(context.Background())
require.NoError(t, err)
mc := c.(*mapCache[testIndex, string, *testObject])
for _, index := range mc.indexMap {
index.mutex.RLock()
assert.Len(t, index.entries, 0)
index.mutex.RUnlock()
}
}
func Test_entry_isValid(t *testing.T) {
type fields struct {
created time.Time
invalid bool
lastUse time.Time
}
tests := []struct {
name string
fields fields
config *cache.Config
want bool
}{
{
name: "invalid",
fields: fields{
created: time.Now(),
invalid: true,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "max age exceeded",
fields: fields{
created: time.Now().Add(-(time.Minute + time.Second)),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "max age disabled",
fields: fields{
created: time.Now().Add(-(time.Minute + time.Second)),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
LastUseAge: time.Second,
},
want: true,
},
{
name: "last use age exceeded",
fields: fields{
created: time.Now().Add(-(time.Minute / 2)),
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: false,
},
{
name: "last use age disabled",
fields: fields{
created: time.Now().Add(-(time.Minute / 2)),
invalid: false,
lastUse: time.Now().Add(-(time.Second * 2)),
},
config: &cache.Config{
MaxAge: time.Minute,
},
want: true,
},
{
name: "valid",
fields: fields{
created: time.Now(),
invalid: false,
lastUse: time.Now(),
},
config: &cache.Config{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &entry[any]{
created: tt.fields.created,
}
e.invalid.Store(tt.fields.invalid)
e.lastUse.Store(tt.fields.lastUse.UnixMicro())
got := e.isValid(tt.config)
assert.Equal(t, tt.want, got)
})
}
}

View File

@@ -1,21 +0,0 @@
package noop
import (
"context"
"github.com/zitadel/zitadel/backend/storage/cache"
)
type noop[I, K comparable, V cache.Entry[I, K]] struct{}
// NewCache returns a cache that does nothing
func NewCache[I, K comparable, V cache.Entry[I, K]]() cache.Cache[I, K, V] {
return noop[I, K, V]{}
}
func (noop[I, K, V]) Set(context.Context, V) {}
func (noop[I, K, V]) Get(context.Context, I, K) (value V, ok bool) { return }
func (noop[I, K, V]) Invalidate(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Delete(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Prune(context.Context) (err error) { return }
func (noop[I, K, V]) Truncate(context.Context) (err error) { return }

View File

@@ -1,98 +0,0 @@
// Code generated by "enumer -type Connector -transform snake -trimprefix Connector -linecomment -text"; DO NOT EDIT.
package cache
import (
"fmt"
"strings"
)
const _ConnectorName = "memorypostgresredis"
var _ConnectorIndex = [...]uint8{0, 0, 6, 14, 19}
const _ConnectorLowerName = "memorypostgresredis"
func (i Connector) String() string {
if i < 0 || i >= Connector(len(_ConnectorIndex)-1) {
return fmt.Sprintf("Connector(%d)", i)
}
return _ConnectorName[_ConnectorIndex[i]:_ConnectorIndex[i+1]]
}
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
func _ConnectorNoOp() {
var x [1]struct{}
_ = x[ConnectorUnspecified-(0)]
_ = x[ConnectorMemory-(1)]
_ = x[ConnectorPostgres-(2)]
_ = x[ConnectorRedis-(3)]
}
var _ConnectorValues = []Connector{ConnectorUnspecified, ConnectorMemory, ConnectorPostgres, ConnectorRedis}
var _ConnectorNameToValueMap = map[string]Connector{
_ConnectorName[0:0]: ConnectorUnspecified,
_ConnectorLowerName[0:0]: ConnectorUnspecified,
_ConnectorName[0:6]: ConnectorMemory,
_ConnectorLowerName[0:6]: ConnectorMemory,
_ConnectorName[6:14]: ConnectorPostgres,
_ConnectorLowerName[6:14]: ConnectorPostgres,
_ConnectorName[14:19]: ConnectorRedis,
_ConnectorLowerName[14:19]: ConnectorRedis,
}
var _ConnectorNames = []string{
_ConnectorName[0:0],
_ConnectorName[0:6],
_ConnectorName[6:14],
_ConnectorName[14:19],
}
// ConnectorString retrieves an enum value from the enum constants string name.
// Throws an error if the param is not part of the enum.
func ConnectorString(s string) (Connector, error) {
if val, ok := _ConnectorNameToValueMap[s]; ok {
return val, nil
}
if val, ok := _ConnectorNameToValueMap[strings.ToLower(s)]; ok {
return val, nil
}
return 0, fmt.Errorf("%s does not belong to Connector values", s)
}
// ConnectorValues returns all values of the enum
func ConnectorValues() []Connector {
return _ConnectorValues
}
// ConnectorStrings returns a slice of all String values of the enum
func ConnectorStrings() []string {
strs := make([]string, len(_ConnectorNames))
copy(strs, _ConnectorNames)
return strs
}
// IsAConnector returns "true" if the value is listed in the enum definition. "false" otherwise
func (i Connector) IsAConnector() bool {
for _, v := range _ConnectorValues {
if i == v {
return true
}
}
return false
}
// MarshalText implements the encoding.TextMarshaler interface for Connector
func (i Connector) MarshalText() ([]byte, error) {
return []byte(i.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface for Connector
func (i *Connector) UnmarshalText(text []byte) error {
var err error
*i, err = ConnectorString(string(text))
return err
}

View File

@@ -1,29 +0,0 @@
package cache
import (
"errors"
"fmt"
)
type IndexUnknownError[I comparable] struct {
index I
}
func NewIndexUnknownErr[I comparable](index I) error {
return IndexUnknownError[I]{index}
}
func (i IndexUnknownError[I]) Error() string {
return fmt.Sprintf("index %v unknown", i.index)
}
func (a IndexUnknownError[I]) Is(err error) bool {
if b, ok := err.(IndexUnknownError[I]); ok {
return a.index == b.index
}
return false
}
var (
ErrCacheMiss = errors.New("cache miss")
)

View File

@@ -1,76 +0,0 @@
package cache
import (
"context"
"math/rand"
"time"
"github.com/jonboulle/clockwork"
"github.com/zitadel/logging"
)
// Pruner is an optional [Cache] interface.
type Pruner interface {
// Prune deletes all invalidated or expired objects.
Prune(ctx context.Context) error
}
type PrunerCache[I, K comparable, V Entry[I, K]] interface {
Cache[I, K, V]
Pruner
}
type AutoPruneConfig struct {
// Interval at which the cache is automatically pruned.
// 0 or lower disables automatic pruning.
Interval time.Duration
// Timeout for an automatic prune.
// It is recommended to keep the value shorter than AutoPruneInterval
// 0 or lower disables automatic pruning.
Timeout time.Duration
}
func (c AutoPruneConfig) StartAutoPrune(background context.Context, pruner Pruner, purpose Purpose) (close func()) {
return c.startAutoPrune(background, pruner, purpose, clockwork.NewRealClock())
}
func (c *AutoPruneConfig) startAutoPrune(background context.Context, pruner Pruner, purpose Purpose, clock clockwork.Clock) (close func()) {
if c.Interval <= 0 {
return func() {}
}
background, cancel := context.WithCancel(background)
// randomize the first interval
timer := clock.NewTimer(time.Duration(rand.Int63n(int64(c.Interval))))
go c.pruneTimer(background, pruner, purpose, timer)
return cancel
}
func (c *AutoPruneConfig) pruneTimer(background context.Context, pruner Pruner, purpose Purpose, timer clockwork.Timer) {
defer func() {
if !timer.Stop() {
<-timer.Chan()
}
}()
for {
select {
case <-background.Done():
return
case <-timer.Chan():
err := c.doPrune(background, pruner)
logging.OnError(err).WithField("purpose", purpose).Error("cache auto prune")
timer.Reset(c.Interval)
}
}
}
func (c *AutoPruneConfig) doPrune(background context.Context, pruner Pruner) error {
ctx, cancel := context.WithCancel(background)
defer cancel()
if c.Timeout > 0 {
ctx, cancel = context.WithTimeout(background, c.Timeout)
defer cancel()
}
return pruner.Prune(ctx)
}

View File

@@ -1,43 +0,0 @@
package cache
import (
"context"
"testing"
"time"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/assert"
)
type testPruner struct {
called chan struct{}
}
func (p *testPruner) Prune(context.Context) error {
p.called <- struct{}{}
return nil
}
func TestAutoPruneConfig_startAutoPrune(t *testing.T) {
c := AutoPruneConfig{
Interval: time.Second,
Timeout: time.Millisecond,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
pruner := testPruner{
called: make(chan struct{}),
}
clock := clockwork.NewFakeClock()
close := c.startAutoPrune(ctx, &pruner, PurposeAuthzInstance, clock)
defer close()
clock.Advance(time.Second)
select {
case _, ok := <-pruner.called:
assert.True(t, ok)
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}

View File

@@ -1,90 +0,0 @@
// Code generated by "enumer -type Purpose -transform snake -trimprefix Purpose"; DO NOT EDIT.
package cache
import (
"fmt"
"strings"
)
const _PurposeName = "unspecifiedauthz_instancemilestonesorganizationid_p_form_callback"
var _PurposeIndex = [...]uint8{0, 11, 25, 35, 47, 65}
const _PurposeLowerName = "unspecifiedauthz_instancemilestonesorganizationid_p_form_callback"
func (i Purpose) String() string {
if i < 0 || i >= Purpose(len(_PurposeIndex)-1) {
return fmt.Sprintf("Purpose(%d)", i)
}
return _PurposeName[_PurposeIndex[i]:_PurposeIndex[i+1]]
}
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
func _PurposeNoOp() {
var x [1]struct{}
_ = x[PurposeUnspecified-(0)]
_ = x[PurposeAuthzInstance-(1)]
_ = x[PurposeMilestones-(2)]
_ = x[PurposeOrganization-(3)]
_ = x[PurposeIdPFormCallback-(4)]
}
var _PurposeValues = []Purpose{PurposeUnspecified, PurposeAuthzInstance, PurposeMilestones, PurposeOrganization, PurposeIdPFormCallback}
var _PurposeNameToValueMap = map[string]Purpose{
_PurposeName[0:11]: PurposeUnspecified,
_PurposeLowerName[0:11]: PurposeUnspecified,
_PurposeName[11:25]: PurposeAuthzInstance,
_PurposeLowerName[11:25]: PurposeAuthzInstance,
_PurposeName[25:35]: PurposeMilestones,
_PurposeLowerName[25:35]: PurposeMilestones,
_PurposeName[35:47]: PurposeOrganization,
_PurposeLowerName[35:47]: PurposeOrganization,
_PurposeName[47:65]: PurposeIdPFormCallback,
_PurposeLowerName[47:65]: PurposeIdPFormCallback,
}
var _PurposeNames = []string{
_PurposeName[0:11],
_PurposeName[11:25],
_PurposeName[25:35],
_PurposeName[35:47],
_PurposeName[47:65],
}
// PurposeString retrieves an enum value from the enum constants string name.
// Throws an error if the param is not part of the enum.
func PurposeString(s string) (Purpose, error) {
if val, ok := _PurposeNameToValueMap[s]; ok {
return val, nil
}
if val, ok := _PurposeNameToValueMap[strings.ToLower(s)]; ok {
return val, nil
}
return 0, fmt.Errorf("%s does not belong to Purpose values", s)
}
// PurposeValues returns all values of the enum
func PurposeValues() []Purpose {
return _PurposeValues
}
// PurposeStrings returns a slice of all String values of the enum
func PurposeStrings() []string {
strs := make([]string, len(_PurposeNames))
copy(strs, _PurposeNames)
return strs
}
// IsAPurpose returns "true" if the value is listed in the enum definition. "false" otherwise
func (i Purpose) IsAPurpose() bool {
for _, v := range _PurposeValues {
if i == v {
return true
}
}
return false
}

View File

@@ -1,10 +0,0 @@
package database
import (
"context"
)
type Connector interface {
Connect(ctx context.Context) (Pool, error)
// bla4.Configurer
}

View File

@@ -1,93 +0,0 @@
package database
import (
"context"
"io/fs"
)
type Row interface {
Scan(dest ...any) error
}
type Rows interface {
Row
Next() bool
Close() error
Err() error
}
type Transaction interface {
Commit(ctx context.Context) error
Rollback(ctx context.Context) error
End(ctx context.Context, err error) error
Begin(ctx context.Context) (Transaction, error)
QueryExecutor
}
type Client interface {
Beginner
QueryExecutor
Release(ctx context.Context) error
}
type Pool interface {
Beginner
QueryExecutor
Acquire(ctx context.Context) (Client, error)
Close(ctx context.Context) error
}
type TransactionOptions struct {
IsolationLevel IsolationLevel
AccessMode AccessMode
}
type IsolationLevel uint8
const (
IsolationLevelSerializable IsolationLevel = iota
IsolationLevelReadCommitted
)
type AccessMode uint8
const (
AccessModeReadWrite AccessMode = iota
AccessModeReadOnly
)
type Beginner interface {
Begin(ctx context.Context, opts *TransactionOptions) (Transaction, error)
}
type QueryExecutor interface {
Querier
Executor
}
type Querier interface {
Query(ctx context.Context, stmt string, args ...any) (Rows, error)
QueryRow(ctx context.Context, stmt string, args ...any) Row
}
func Query[Out any](q Querier, fn func(q Querier) ([]Out, error)) ([]Out, error) {
return fn(q)
}
func QueryRow[Out any](q Querier, fn func(q Querier) (Out, error)) (Out, error) {
return fn(q)
}
type Executor interface {
Exec(ctx context.Context, stmt string, args ...any) error
}
// LoadStatements sets the sql statements strings
// TODO: implement
func LoadStatements(fs.FS) error {
return nil
}

View File

@@ -1,135 +0,0 @@
package dialect
import (
"context"
"errors"
"reflect"
"github.com/mitchellh/mapstructure"
"github.com/spf13/viper"
"github.com/zitadel/zitadel/backend/storage/database"
"github.com/zitadel/zitadel/backend/storage/database/dialect/postgres"
)
type Hook struct {
Match func(string) bool
Decode func(config any) (database.Connector, error)
Name string
Constructor func() database.Connector
}
var hooks = []Hook{
{
Match: postgres.NameMatcher,
Decode: postgres.DecodeConfig,
Name: postgres.Name,
Constructor: func() database.Connector { return new(postgres.Config) },
},
// {
// Match: gosql.NameMatcher,
// Decode: gosql.DecodeConfig,
// Name: gosql.Name,
// Constructor: func() database.Connector { return new(gosql.Config) },
// },
}
type Config struct {
Dialects map[string]any `mapstructure:",remain" yaml:",inline"`
connector database.Connector
}
// Configure implements [configure.Configurer].
// func (c *Config) Configure() (any, error) {
// possibilities := make([]string, len(hooks))
// var cursor int
// for i, hook := range hooks {
// if _, ok := c.Dialects[hook.Name]; ok {
// cursor = i
// }
// possibilities[i] = hook.Name
// }
// prompt := promptui.Select{
// Label: "Select a dialect",
// Items: possibilities,
// CursorPos: cursor,
// }
// i, _, err := prompt.Run()
// if err != nil {
// return nil, err
// }
// var config bla4.Configurer
// if dialect, ok := c.Dialects[hooks[i].Name]; ok {
// config, err = hooks[i].Decode(dialect)
// if err != nil {
// return nil, err
// }
// } else {
// clear(c.Dialects)
// config = hooks[i].Constructor()
// }
// if c.Dialects == nil {
// c.Dialects = make(map[string]any)
// }
// c.Dialects[hooks[i].Name], err = config.Configure()
// if err != nil {
// return nil, err
// }
// return c, nil
// }
func (c Config) Connect(ctx context.Context) (database.Pool, error) {
if len(c.Dialects) != 1 {
return nil, errors.New("Exactly one dialect must be configured")
}
return c.connector.Connect(ctx)
}
// Hooks implements [configure.Unmarshaller].
func (c Config) Hooks() []viper.DecoderConfigOption {
return []viper.DecoderConfigOption{
viper.DecodeHook(decodeHook),
}
}
func (c *Config) decodeDialect() error {
for _, hook := range hooks {
for name, config := range c.Dialects {
if !hook.Match(name) {
continue
}
connector, err := hook.Decode(config)
if err != nil {
return err
}
c.connector = connector
return nil
}
}
return errors.New("no dialect found")
}
func decodeHook(from, to reflect.Value) (_ any, err error) {
if to.Type() != reflect.TypeOf(Config{}) {
return from.Interface(), nil
}
config := new(Config)
if err = mapstructure.Decode(from.Interface(), config); err != nil {
return nil, err
}
if err = config.decodeDialect(); err != nil {
return nil, err
}
return config, nil
}

View File

@@ -1,52 +0,0 @@
package gosql
import (
"context"
"database/sql"
"errors"
"strings"
"github.com/zitadel/zitadel/backend/storage/database"
)
var (
_ database.Connector = (*Config)(nil)
Name = "gosql"
)
type Config struct {
db *sql.DB
}
// Connect implements [database.Connector].
func (c *Config) Connect(ctx context.Context) (database.Pool, error) {
if err := c.db.PingContext(ctx); err != nil {
return nil, err
}
return &sqlPool{c.db}, nil
}
func NameMatcher(name string) bool {
name = strings.ToLower(name)
for _, driver := range sql.Drivers() {
if driver == name {
return true
}
}
return false
}
func DecodeConfig(name string, config any) (database.Connector, error) {
switch c := config.(type) {
case string:
db, err := sql.Open(name, c)
if err != nil {
return nil, err
}
return &Config{db}, nil
case map[string]any:
return nil, errors.New("map configuration not implemented")
}
return nil, errors.New("invalid configuration")
}

View File

@@ -1,45 +0,0 @@
package gosql
import (
"context"
"database/sql"
"github.com/zitadel/zitadel/backend/storage/database"
)
type sqlConn struct{ *sql.Conn }
var _ database.Client = (*sqlConn)(nil)
// Release implements [database.Client].
func (c *sqlConn) Release(_ context.Context) error {
return c.Conn.Close()
}
// Begin implements [database.Client].
func (c *sqlConn) Begin(ctx context.Context, opts *database.TransactionOptions) (database.Transaction, error) {
tx, err := c.Conn.BeginTx(ctx, transactionOptionsToSql(opts))
if err != nil {
return nil, err
}
return &sqlTx{tx}, nil
}
// Query implements sql.Client.
// Subtle: this method shadows the method (*Conn).Query of pgxConn.Conn.
func (c *sqlConn) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
return c.Conn.QueryContext(ctx, sql, args...)
}
// QueryRow implements sql.Client.
// Subtle: this method shadows the method (*Conn).QueryRow of pgxConn.Conn.
func (c *sqlConn) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return c.Conn.QueryRowContext(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (c *sqlConn) Exec(ctx context.Context, sql string, args ...any) error {
_, err := c.Conn.ExecContext(ctx, sql, args...)
return err
}

View File

@@ -1,54 +0,0 @@
package gosql
import (
"context"
"database/sql"
"github.com/zitadel/zitadel/backend/storage/database"
)
type sqlPool struct{ *sql.DB }
var _ database.Pool = (*sqlPool)(nil)
// Acquire implements [database.Pool].
func (c *sqlPool) Acquire(ctx context.Context) (database.Client, error) {
conn, err := c.DB.Conn(ctx)
if err != nil {
return nil, err
}
return &sqlConn{conn}, nil
}
// Query implements [database.Pool].
// Subtle: this method shadows the method (Pool).Query of pgxPool.Pool.
func (c *sqlPool) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
return c.DB.QueryContext(ctx, sql, args...)
}
// QueryRow implements [database.Pool].
// Subtle: this method shadows the method (Pool).QueryRow of pgxPool.Pool.
func (c *sqlPool) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return c.DB.QueryRowContext(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (c *sqlPool) Exec(ctx context.Context, sql string, args ...any) error {
_, err := c.DB.ExecContext(ctx, sql, args...)
return err
}
// Begin implements [database.Pool].
func (c *sqlPool) Begin(ctx context.Context, opts *database.TransactionOptions) (database.Transaction, error) {
tx, err := c.DB.BeginTx(ctx, transactionOptionsToSql(opts))
if err != nil {
return nil, err
}
return &sqlTx{tx}, nil
}
// Close implements [database.Pool].
func (c *sqlPool) Close(_ context.Context) error {
return c.DB.Close()
}

View File

@@ -1,79 +0,0 @@
package gosql
import (
"context"
"database/sql"
"errors"
"github.com/zitadel/zitadel/backend/storage/database"
)
type sqlTx struct{ *sql.Tx }
var _ database.Transaction = (*sqlTx)(nil)
// Commit implements [database.Transaction].
func (tx *sqlTx) Commit(_ context.Context) error {
return tx.Tx.Commit()
}
// Rollback implements [database.Transaction].
func (tx *sqlTx) Rollback(_ context.Context) error {
return tx.Tx.Rollback()
}
// End implements [database.Transaction].
func (tx *sqlTx) End(ctx context.Context, err error) error {
if err != nil {
tx.Rollback(ctx)
return err
}
return tx.Commit(ctx)
}
// Query implements [database.Transaction].
// Subtle: this method shadows the method (Tx).Query of pgxTx.Tx.
func (tx *sqlTx) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
return tx.Tx.QueryContext(ctx, sql, args...)
}
// QueryRow implements [database.Transaction].
// Subtle: this method shadows the method (Tx).QueryRow of pgxTx.Tx.
func (tx *sqlTx) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return tx.Tx.QueryRowContext(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (tx *sqlTx) Exec(ctx context.Context, sql string, args ...any) error {
_, err := tx.Tx.ExecContext(ctx, sql, args...)
return err
}
// Begin implements [database.Transaction].
// it is unimplemented
func (tx *sqlTx) Begin(ctx context.Context) (database.Transaction, error) {
return nil, errors.New("nested transactions are not supported")
}
func transactionOptionsToSql(opts *database.TransactionOptions) *sql.TxOptions {
if opts == nil {
return nil
}
return &sql.TxOptions{
Isolation: isolationToSql(opts.IsolationLevel),
ReadOnly: opts.AccessMode == database.AccessModeReadOnly,
}
}
func isolationToSql(isolation database.IsolationLevel) sql.IsolationLevel {
switch isolation {
case database.IsolationLevelSerializable:
return sql.LevelSerializable
case database.IsolationLevelReadCommitted:
return sql.LevelReadCommitted
default:
return sql.LevelSerializable
}
}

View File

@@ -1,134 +0,0 @@
package postgres
import (
"context"
"errors"
"slices"
"strings"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/manifoldco/promptui"
"github.com/mitchellh/mapstructure"
"github.com/zitadel/zitadel/backend/cmd/configure/bla4"
"github.com/zitadel/zitadel/backend/storage/database"
)
var (
_ database.Connector = (*Config)(nil)
Name = "postgres"
)
type Config struct {
config *pgxpool.Config
// Host string
// Port int32
// Database string
// MaxOpenConns uint32
// MaxIdleConns uint32
// MaxConnLifetime time.Duration
// MaxConnIdleTime time.Duration
// User User
// // Additional options to be appended as options=<Options>
// // The value will be taken as is. Multiple options are space separated.
// Options string
configuredFields []string
}
// FinishAllowed implements [bla4.Iterator].
func (c *Config) FinishAllowed() bool {
// Option can be skipped
return len(c.configuredFields) < 2
}
// NextField implements [bla4.Iterator].
func (c *Config) NextField() string {
if c.configuredFields == nil {
c.configuredFields = []string{"Host", "Port", "Database", "MaxOpenConns", "MaxIdleConns", "MaxConnLifetime", "MaxConnIdleTime", "User", "Options"}
}
if len(c.configuredFields) == 0 {
return ""
}
field := c.configuredFields[0]
c.configuredFields = c.configuredFields[1:]
return field
}
// Configure implements [bla4.Configurer].
func (c *Config) Configure() (value any, err error) {
typeSelect := promptui.Select{
Label: "Configure the database connection",
Items: []string{"connection string", "fields"},
}
i, _, err := typeSelect.Run()
if err != nil {
return nil, err
}
if i > 0 {
return nil, nil
}
if c.config == nil {
c.config, _ = pgxpool.ParseConfig("host=localhost user=zitadel password= dbname=zitadel sslmode=disable")
}
prompt := promptui.Prompt{
Label: "Connection string",
Default: c.config.ConnString(),
AllowEdit: c.config.ConnString() != "",
Validate: func(input string) error {
_, err := pgxpool.ParseConfig(input)
return err
},
}
return prompt.Run()
}
var _ bla4.Iterator = (*Config)(nil)
// Connect implements [database.Connector].
func (c *Config) Connect(ctx context.Context) (database.Pool, error) {
pool, err := pgxpool.NewWithConfig(ctx, c.config)
if err != nil {
return nil, err
}
if err = pool.Ping(ctx); err != nil {
return nil, err
}
return &pgxPool{pool}, nil
}
func NameMatcher(name string) bool {
return slices.Contains([]string{"postgres", "pg"}, strings.ToLower(name))
}
func DecodeConfig(input any) (database.Connector, error) {
switch c := input.(type) {
case string:
config, err := pgxpool.ParseConfig(c)
if err != nil {
return nil, err
}
return &Config{config: config}, nil
case map[string]any:
connector := new(Config)
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: connector,
})
if err != nil {
return nil, err
}
if err = decoder.Decode(c); err != nil {
return nil, err
}
return &Config{
config: &pgxpool.Config{},
}, nil
}
return nil, errors.New("invalid configuration")
}

View File

@@ -1,48 +0,0 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/zitadel/zitadel/backend/storage/database"
)
type pgxConn struct{ *pgxpool.Conn }
var _ database.Client = (*pgxConn)(nil)
// Release implements [database.Client].
func (c *pgxConn) Release(_ context.Context) error {
c.Conn.Release()
return nil
}
// Begin implements [database.Client].
func (c *pgxConn) Begin(ctx context.Context, opts *database.TransactionOptions) (database.Transaction, error) {
tx, err := c.Conn.BeginTx(ctx, transactionOptionsToPgx(opts))
if err != nil {
return nil, err
}
return &pgxTx{tx}, nil
}
// Query implements sql.Client.
// Subtle: this method shadows the method (*Conn).Query of pgxConn.Conn.
func (c *pgxConn) Query(ctx context.Context, sql string, args ...any) (database.Rows, error) {
rows, err := c.Conn.Query(ctx, sql, args...)
return &Rows{rows}, err
}
// QueryRow implements sql.Client.
// Subtle: this method shadows the method (*Conn).QueryRow of pgxConn.Conn.
func (c *pgxConn) QueryRow(ctx context.Context, sql string, args ...any) database.Row {
return c.Conn.QueryRow(ctx, sql, args...)
}
// Exec implements [database.Pool].
// Subtle: this method shadows the method (Pool).Exec of pgxPool.Pool.
func (c *pgxConn) Exec(ctx context.Context, sql string, args ...any) error {
_, err := c.Conn.Exec(ctx, sql, args...)
return err
}

Some files were not shown because too many files have changed in this diff Show More