mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-11 20:27:32 +00:00
start sqlite migrations
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -32,6 +32,7 @@ key.json
|
|||||||
.backups
|
.backups
|
||||||
|
|
||||||
cockroach-data/*
|
cockroach-data/*
|
||||||
|
.local/*
|
||||||
.build/
|
.build/
|
||||||
|
|
||||||
#binaries
|
#binaries
|
||||||
|
@@ -1,22 +0,0 @@
|
|||||||
package eventstore
|
|
||||||
|
|
||||||
//AggregateType is the object name
|
|
||||||
type AggregateType string
|
|
||||||
|
|
||||||
//Aggregate represents an object
|
|
||||||
type Aggregate struct {
|
|
||||||
//ID id is the unique identifier of the aggregate
|
|
||||||
// the client must generate it by it's own
|
|
||||||
ID string
|
|
||||||
//Type describes the meaning of this aggregate
|
|
||||||
// it could an object like user
|
|
||||||
Type AggregateType
|
|
||||||
|
|
||||||
//ResourceOwner is the organisation which owns this aggregate
|
|
||||||
// an aggregate can only be managed by one organisation
|
|
||||||
// use the ID of the org
|
|
||||||
ResourceOwner string
|
|
||||||
|
|
||||||
//Events describe all the changes made on an aggregate
|
|
||||||
Events []*Event
|
|
||||||
}
|
|
283
internal/eventstore/v2/blub_test.go
Normal file
283
internal/eventstore/v2/blub_test.go
Normal file
@@ -0,0 +1,283 @@
|
|||||||
|
package eventstore_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
//ReadModel is the minimum representation of a View model.
|
||||||
|
// it might be saved in a database or in memory
|
||||||
|
type ReadModel struct {
|
||||||
|
ProcessedSequence uint64
|
||||||
|
ID string
|
||||||
|
events []Event
|
||||||
|
}
|
||||||
|
|
||||||
|
//Append adds all the events to the aggregate.
|
||||||
|
// The function doesn't compute the new state of the read model
|
||||||
|
func (a *ReadModel) Append(events ...Event) {
|
||||||
|
a.events = append(a.events, events...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProjectReadModel struct {
|
||||||
|
ReadModel
|
||||||
|
Apps []*AppReadModel
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProjectReadModel) Append(events ...Event) {
|
||||||
|
for _, event := range events {
|
||||||
|
switch event.(type) {
|
||||||
|
case *AddAppEvent:
|
||||||
|
app := new(AppReadModel)
|
||||||
|
app.Append(event)
|
||||||
|
p.Apps = append(p.Apps, app)
|
||||||
|
case *UpdateAppEvent:
|
||||||
|
for _, app := range p.Apps {
|
||||||
|
app.Append(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.events = append(p.events, events...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Reduce calculates the new state of the read model
|
||||||
|
func (p *ProjectReadModel) Reduce() error {
|
||||||
|
for i := range p.Apps {
|
||||||
|
if err := p.Apps[i].Reduce(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, event := range p.events {
|
||||||
|
switch e := event.(type) {
|
||||||
|
case *CreateProjectEvent:
|
||||||
|
p.ID = e.ID
|
||||||
|
p.Name = e.Name
|
||||||
|
case *RemoveAppEvent:
|
||||||
|
for i := len(p.Apps) - 1; i >= 0; i-- {
|
||||||
|
app := p.Apps[i]
|
||||||
|
if app.ID == e.GetID() {
|
||||||
|
p.Apps[i] = p.Apps[len(p.Apps)-1]
|
||||||
|
p.Apps[len(p.Apps)-1] = nil
|
||||||
|
p.Apps = p.Apps[:len(p.Apps)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.ProcessedSequence = event.GetSequence()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppReadModel struct {
|
||||||
|
ReadModel
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
//Reduce calculates the new state of the read model
|
||||||
|
func (a *AppReadModel) Reduce() error {
|
||||||
|
for _, event := range a.events {
|
||||||
|
switch e := event.(type) {
|
||||||
|
case *AddAppEvent:
|
||||||
|
a.Name = e.Name
|
||||||
|
a.ID = e.GetID()
|
||||||
|
case *UpdateAppEvent:
|
||||||
|
a.Name = e.Name
|
||||||
|
}
|
||||||
|
a.ProcessedSequence = event.GetSequence()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Event is the minimal representation of a event
|
||||||
|
// which can be processed by the read models
|
||||||
|
type Event interface {
|
||||||
|
//GetSequence returns the event sequence
|
||||||
|
GetSequence() uint64
|
||||||
|
//GetID returns the id of the aggregate. It's not the id of the event
|
||||||
|
GetID() string
|
||||||
|
}
|
||||||
|
|
||||||
|
//DefaultEvent is the implementation of Event
|
||||||
|
type DefaultEvent struct {
|
||||||
|
Sequence uint64 `json:"-"`
|
||||||
|
ID string `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DefaultEvent) GetID() string {
|
||||||
|
return e.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DefaultEvent) GetSequence() uint64 {
|
||||||
|
return e.Sequence
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateProjectEvent struct {
|
||||||
|
DefaultEvent
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//CreateProjectEventFromEventstore returns the specific type
|
||||||
|
// of the general EventstoreEvent
|
||||||
|
func CreateProjectEventFromEventstore(event *EventstoreEvent) (Event, error) {
|
||||||
|
e := &CreateProjectEvent{
|
||||||
|
DefaultEvent: DefaultEvent{Sequence: event.Sequence, ID: event.AggregateID},
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(event.Data, e)
|
||||||
|
|
||||||
|
return e, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type AddAppEvent struct {
|
||||||
|
ProjectID string `json:"-"`
|
||||||
|
AppID string `json:"id"`
|
||||||
|
Sequence uint64 `json:"-"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *AddAppEvent) GetID() string {
|
||||||
|
return e.AppID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *AddAppEvent) GetSequence() uint64 {
|
||||||
|
return e.Sequence
|
||||||
|
}
|
||||||
|
|
||||||
|
func AppAddedEventFromEventstore(event *EventstoreEvent) (Event, error) {
|
||||||
|
e := &AddAppEvent{
|
||||||
|
Sequence: event.Sequence,
|
||||||
|
ProjectID: event.AggregateID,
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(event.Data, e)
|
||||||
|
|
||||||
|
return e, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdateAppEvent struct {
|
||||||
|
ProjectID string `json:"-"`
|
||||||
|
AppID string `json:"id"`
|
||||||
|
Sequence uint64 `json:"-"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UpdateAppEvent) GetID() string {
|
||||||
|
return e.AppID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UpdateAppEvent) GetSequence() uint64 {
|
||||||
|
return e.Sequence
|
||||||
|
}
|
||||||
|
|
||||||
|
func AppUpdatedEventFromEventstore(event *EventstoreEvent) (Event, error) {
|
||||||
|
e := &UpdateAppEvent{
|
||||||
|
Sequence: event.Sequence,
|
||||||
|
ProjectID: event.AggregateID,
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(event.Data, e)
|
||||||
|
|
||||||
|
return e, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoveAppEvent struct {
|
||||||
|
ProjectID string `json:"-"`
|
||||||
|
AppID string `json:"id"`
|
||||||
|
Sequence uint64 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RemoveAppEvent) GetID() string {
|
||||||
|
return e.AppID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RemoveAppEvent) GetSequence() uint64 {
|
||||||
|
return e.Sequence
|
||||||
|
}
|
||||||
|
|
||||||
|
func AppRemovedEventFromEventstore(event *EventstoreEvent) (Event, error) {
|
||||||
|
e := &RemoveAppEvent{
|
||||||
|
Sequence: event.Sequence,
|
||||||
|
ProjectID: event.AggregateID,
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(event.Data, e)
|
||||||
|
|
||||||
|
return e, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
eventstore := &Eventstore{
|
||||||
|
eventMapper: map[string]func(*EventstoreEvent) (Event, error){
|
||||||
|
"project.added": CreateProjectEventFromEventstore,
|
||||||
|
"app.added": AppAddedEventFromEventstore,
|
||||||
|
"app.updated": AppUpdatedEventFromEventstore,
|
||||||
|
"app.removed": AppRemovedEventFromEventstore,
|
||||||
|
},
|
||||||
|
events: []*EventstoreEvent{
|
||||||
|
{
|
||||||
|
AggregateID: "p1",
|
||||||
|
EventType: "project.added",
|
||||||
|
Sequence: 1,
|
||||||
|
Data: []byte(`{"name":"hodor"}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AggregateID: "123",
|
||||||
|
EventType: "app.added",
|
||||||
|
Sequence: 2,
|
||||||
|
Data: []byte(`{"id":"a1", "name": "ap 1"}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AggregateID: "123",
|
||||||
|
EventType: "app.updated",
|
||||||
|
Sequence: 3,
|
||||||
|
Data: []byte(`{"id":"a1", "name":"app 1"}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AggregateID: "123",
|
||||||
|
EventType: "app.added",
|
||||||
|
Sequence: 4,
|
||||||
|
Data: []byte(`{"id":"a2", "name": "app 2"}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AggregateID: "123",
|
||||||
|
EventType: "app.removed",
|
||||||
|
Sequence: 5,
|
||||||
|
Data: []byte(`{"id":"a1"}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
events, err := eventstore.GetEvents()
|
||||||
|
if err != nil {
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &ProjectReadModel{Apps: []*AppReadModel{}}
|
||||||
|
p.Append(events...)
|
||||||
|
p.Reduce()
|
||||||
|
|
||||||
|
fmt.Printf("%+v\n", p)
|
||||||
|
for _, app := range p.Apps {
|
||||||
|
fmt.Printf("%+v\n", app)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Eventstore is a simple abstraction of the eventstore framework
|
||||||
|
type Eventstore struct {
|
||||||
|
eventMapper map[string]func(*EventstoreEvent) (Event, error)
|
||||||
|
events []*EventstoreEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *Eventstore) GetEvents() (events []Event, err error) {
|
||||||
|
events = make([]Event, len(es.events))
|
||||||
|
for i, event := range es.events {
|
||||||
|
events[i], err = es.eventMapper[event.EventType](event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type EventstoreEvent struct {
|
||||||
|
AggregateID string
|
||||||
|
Sequence uint64
|
||||||
|
EventType string
|
||||||
|
Data []byte
|
||||||
|
}
|
@@ -2,11 +2,204 @@ package eventstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/caos/zitadel/internal/errors"
|
||||||
|
"github.com/caos/zitadel/internal/eventstore/v2/repository"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Eventstore interface {
|
type Event interface {
|
||||||
Health(ctx context.Context) error
|
//CheckPrevious ensures the event order if true
|
||||||
PushAggregates(ctx context.Context, aggregates ...*Aggregate) error
|
// if false the previous sequence is not checked on push
|
||||||
FilterEvents(ctx context.Context, searchQuery *SearchQueryFactory) (events []*Event, err error)
|
CheckPrevious() bool
|
||||||
LatestSequence(ctx context.Context, searchQuery *SearchQueryFactory) (uint64, error)
|
|
||||||
|
EditorService() string
|
||||||
|
EditorUser() string
|
||||||
|
Type() EventType
|
||||||
|
Data() interface{}
|
||||||
|
PreviousSequence() uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventAppender interface {
|
||||||
|
//AppendEvents appends the passed events to an internal list of events
|
||||||
|
AppendEvents(...Event) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type reducer interface {
|
||||||
|
//Reduce handles the events of the internal events list
|
||||||
|
// it only appends the newly added events
|
||||||
|
Reduce() error
|
||||||
|
}
|
||||||
|
type aggregater interface {
|
||||||
|
eventAppender
|
||||||
|
reducer
|
||||||
|
//ID returns the aggreagte id
|
||||||
|
ID() string
|
||||||
|
//Type returns the aggregate type
|
||||||
|
Type() AggregateType
|
||||||
|
//Events returns the events which will be pushed
|
||||||
|
Events() []Event
|
||||||
|
//ResourceOwner returns the organisation id which manages this aggregate
|
||||||
|
ResourceOwner() string
|
||||||
|
//Version represents the semantic version of the aggregate
|
||||||
|
Version() Version
|
||||||
|
}
|
||||||
|
type readModeler interface {
|
||||||
|
eventAppender
|
||||||
|
reducer
|
||||||
|
}
|
||||||
|
|
||||||
|
type Eventstore struct {
|
||||||
|
repo repository.Repository
|
||||||
|
interceptorMutex sync.Mutex
|
||||||
|
eventMapper map[EventType]eventTypeInterceptors
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventTypeInterceptors struct {
|
||||||
|
pushMapper func(Event) (*repository.Event, error)
|
||||||
|
filterMapper func(*repository.Event) (Event, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Health checks if the eventstore can properly work
|
||||||
|
// It checks if the repository can serve load
|
||||||
|
func (es *Eventstore) Health(ctx context.Context) error {
|
||||||
|
return es.repo.Health(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
//PushAggregates maps the events of all aggregates to an eventstore event
|
||||||
|
// based on the pushMapper
|
||||||
|
func (es *Eventstore) PushAggregates(ctx context.Context, aggregates ...aggregater) ([]Event, error) {
|
||||||
|
events, err := es.aggregatesToEvents(aggregates)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = es.repo.Push(ctx, events...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return es.mapEvents(events)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *Eventstore) aggregatesToEvents(aggregates []aggregater) ([]*repository.Event, error) {
|
||||||
|
events := make([]*repository.Event, 0, len(aggregates))
|
||||||
|
for _, aggregate := range aggregates {
|
||||||
|
var previousEvent *repository.Event
|
||||||
|
for _, event := range aggregate.Events() {
|
||||||
|
events = append(events, &repository.Event{
|
||||||
|
AggregateID: aggregate.ID(),
|
||||||
|
AggregateType: repository.AggregateType(aggregate.Type()),
|
||||||
|
ResourceOwner: aggregate.ResourceOwner(),
|
||||||
|
EditorService: event.EditorService(),
|
||||||
|
EditorUser: event.EditorUser(),
|
||||||
|
Type: repository.EventType(event.Type()),
|
||||||
|
Version: repository.Version(aggregate.Version()),
|
||||||
|
PreviousEvent: previousEvent,
|
||||||
|
Data: event.Data(),
|
||||||
|
})
|
||||||
|
if previousEvent != nil && event.CheckPrevious() {
|
||||||
|
events[len(events)-1].PreviousSequence = event.PreviousSequence()
|
||||||
|
}
|
||||||
|
previousEvent = events[len(events)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//FilterEvents filters the stored events based on the searchQuery
|
||||||
|
// and maps the events to the defined event structs
|
||||||
|
func (es *Eventstore) FilterEvents(ctx context.Context, queryFactory *SearchQueryFactory) ([]Event, error) {
|
||||||
|
query, err := queryFactory.Build()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
events, err := es.repo.Filter(ctx, query)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return es.mapEvents(events)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *Eventstore) mapEvents(events []*repository.Event) (mappedEvents []Event, err error) {
|
||||||
|
mappedEvents = make([]Event, len(events))
|
||||||
|
|
||||||
|
es.interceptorMutex.Lock()
|
||||||
|
defer es.interceptorMutex.Unlock()
|
||||||
|
|
||||||
|
for i, event := range events {
|
||||||
|
interceptors, ok := es.eventMapper[EventType(event.Type)]
|
||||||
|
if !ok || interceptors.filterMapper == nil {
|
||||||
|
return nil, errors.ThrowPreconditionFailed(nil, "V2-usujB", "event mapper not defined")
|
||||||
|
}
|
||||||
|
mappedEvents[i], err = interceptors.filterMapper(event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mappedEvents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//FilterToAggregate filters the events based on the searchQuery, appends all events to the aggregate and reduces the aggregate
|
||||||
|
func (es *Eventstore) FilterToAggregate(ctx context.Context, searchQuery *SearchQueryFactory, aggregate aggregater) (err error) {
|
||||||
|
events, err := es.FilterEvents(ctx, searchQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = aggregate.AppendEvents(events...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return aggregate.Reduce()
|
||||||
|
}
|
||||||
|
|
||||||
|
//FilterToReadModel filters the events based on the searchQuery, appends all events to the readModel and reduces the readModel
|
||||||
|
func (es *Eventstore) FilterToReadModel(ctx context.Context, searchQuery *SearchQueryFactory, readModel readModeler) (err error) {
|
||||||
|
events, err := es.FilterEvents(ctx, searchQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = readModel.AppendEvents(events...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return readModel.Reduce()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *Eventstore) LatestSequence(ctx context.Context, searchQuery *SearchQueryFactory) (uint64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//RegisterPushEventMapper registers a function for mapping an eventstore event to an event
|
||||||
|
func (es *Eventstore) RegisterFilterEventMapper(eventType EventType, mapper func(*repository.Event) (Event, error)) error {
|
||||||
|
if eventType == "" || mapper == nil {
|
||||||
|
return errors.ThrowInvalidArgument(nil, "V2-IPpUR", "eventType and mapper must be filled")
|
||||||
|
}
|
||||||
|
|
||||||
|
es.interceptorMutex.Lock()
|
||||||
|
defer es.interceptorMutex.Unlock()
|
||||||
|
|
||||||
|
interceptor := es.eventMapper[eventType]
|
||||||
|
interceptor.filterMapper = mapper
|
||||||
|
es.eventMapper[eventType] = interceptor
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//RegisterPushEventMapper registers a function for mapping an event to an eventstore event
|
||||||
|
func (es *Eventstore) RegisterPushEventMapper(eventType EventType, mapper func(Event) (*repository.Event, error)) error {
|
||||||
|
if eventType == "" || mapper == nil {
|
||||||
|
return errors.ThrowInvalidArgument(nil, "V2-Kexpp", "eventType and mapper must be filled")
|
||||||
|
}
|
||||||
|
|
||||||
|
es.interceptorMutex.Lock()
|
||||||
|
defer es.interceptorMutex.Unlock()
|
||||||
|
|
||||||
|
interceptor := es.eventMapper[eventType]
|
||||||
|
interceptor.pushMapper = mapper
|
||||||
|
es.eventMapper[eventType] = interceptor
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -1,28 +0,0 @@
|
|||||||
package eventstore
|
|
||||||
|
|
||||||
import "context"
|
|
||||||
|
|
||||||
type inMemoryEventstore struct {
|
|
||||||
events []*Event
|
|
||||||
}
|
|
||||||
|
|
||||||
func (es *inMemoryEventstore) Health(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (es *inMemoryEventstore) PushAggregates(ctx context.Context, aggregates ...*Aggregate) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (es *inMemoryEventstore) FilterEvents(ctx context.Context, searchQuery *SearchQueryFactory) (events []*Event, err error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (es *inMemoryEventstore) LatestSequence(ctx context.Context, searchQuery *SearchQueryFactory) (uint64, error) {
|
|
||||||
query, err := searchQuery.Build()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
query.Filters[0].
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
@@ -1,24 +1,266 @@
|
|||||||
package eventstore
|
package eventstore
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/caos/zitadel/internal/errors"
|
||||||
|
"github.com/caos/zitadel/internal/eventstore/v2/repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testEvent struct {
|
||||||
|
description string
|
||||||
|
shouldCheckPrevious bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *testEvent) CheckPrevious() bool {
|
||||||
|
return e.shouldCheckPrevious
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPushMapper(Event) (*repository.Event, error) {
|
||||||
|
return &repository.Event{AggregateID: "aggregateID"}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_eventstore_RegisterPushEventMapper(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
eventMapper map[EventType]eventTypeInterceptors
|
||||||
|
}
|
||||||
|
type args struct {
|
||||||
|
eventType EventType
|
||||||
|
mapper func(Event) (*repository.Event, error)
|
||||||
|
}
|
||||||
|
type res struct {
|
||||||
|
event *repository.Event
|
||||||
|
isErr func(error) bool
|
||||||
|
}
|
||||||
|
|
||||||
func TestPushAggregates(t *testing.T) {
|
|
||||||
type res struct{}
|
|
||||||
type args struct{}
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
fields fields
|
||||||
args args
|
args args
|
||||||
res *SearchQueryFactory
|
res res
|
||||||
}{}
|
}{
|
||||||
|
{
|
||||||
|
name: "no event type",
|
||||||
|
args: args{
|
||||||
|
eventType: "",
|
||||||
|
mapper: testPushMapper,
|
||||||
|
},
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
isErr: errors.IsErrorInvalidArgument,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no event mapper",
|
||||||
|
args: args{
|
||||||
|
eventType: "event.type",
|
||||||
|
mapper: nil,
|
||||||
|
},
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
isErr: errors.IsErrorInvalidArgument,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "new interceptor",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "new.event",
|
||||||
|
mapper: testPushMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &repository.Event{AggregateID: "aggregateID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing interceptor new push mapper",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{
|
||||||
|
"existing": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "new.event",
|
||||||
|
mapper: testPushMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &repository.Event{AggregateID: "aggregateID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing interceptor existing push mapper",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{
|
||||||
|
"existing": {
|
||||||
|
pushMapper: func(Event) (*repository.Event, error) {
|
||||||
|
return nil, errors.ThrowUnimplemented(nil, "V2-1qPvn", "unimplemented")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "new.event",
|
||||||
|
mapper: testPushMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &repository.Event{AggregateID: "aggregateID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// factory := NewSearchQueryFactory(tt.args.aggregateTypes...)
|
es := &Eventstore{
|
||||||
// for _, setter := range tt.args.setters {
|
eventMapper: tt.fields.eventMapper,
|
||||||
// factory = setter(factory)
|
}
|
||||||
// }
|
err := es.RegisterPushEventMapper(tt.args.eventType, tt.args.mapper)
|
||||||
// if !reflect.DeepEqual(factory, tt.res) {
|
if (tt.res.isErr != nil && !tt.res.isErr(err)) || (tt.res.isErr == nil && err != nil) {
|
||||||
// t.Errorf("NewSearchQueryFactory() = %v, want %v", factory, tt.res)
|
t.Errorf("wrong error got: %v", err)
|
||||||
// }
|
return
|
||||||
|
}
|
||||||
|
if tt.res.isErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper := es.eventMapper[tt.args.eventType]
|
||||||
|
event, err := mapper.pushMapper(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(tt.res.event, event) {
|
||||||
|
t.Errorf("events should be deep equal. \ngot %v\nwant %v", event, tt.res.event)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testFilterMapper(*repository.Event) (Event, error) {
|
||||||
|
return &testEvent{description: "hodor"}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_eventstore_RegisterFilterEventMapper(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
eventMapper map[EventType]eventTypeInterceptors
|
||||||
|
}
|
||||||
|
type args struct {
|
||||||
|
eventType EventType
|
||||||
|
mapper func(*repository.Event) (Event, error)
|
||||||
|
}
|
||||||
|
type res struct {
|
||||||
|
event Event
|
||||||
|
isErr func(error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fields fields
|
||||||
|
args args
|
||||||
|
res res
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no event type",
|
||||||
|
args: args{
|
||||||
|
eventType: "",
|
||||||
|
mapper: testFilterMapper,
|
||||||
|
},
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
isErr: errors.IsErrorInvalidArgument,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no event mapper",
|
||||||
|
args: args{
|
||||||
|
eventType: "event.type",
|
||||||
|
mapper: nil,
|
||||||
|
},
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
isErr: errors.IsErrorInvalidArgument,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "new interceptor",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "event.type",
|
||||||
|
mapper: testFilterMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &testEvent{description: "hodor"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing interceptor new filter mapper",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{
|
||||||
|
"event.type": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "new.event",
|
||||||
|
mapper: testFilterMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &testEvent{description: "hodor"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing interceptor existing filter mapper",
|
||||||
|
fields: fields{
|
||||||
|
eventMapper: map[EventType]eventTypeInterceptors{
|
||||||
|
"event.type": {
|
||||||
|
filterMapper: func(*repository.Event) (Event, error) {
|
||||||
|
return nil, errors.ThrowUnimplemented(nil, "V2-1qPvn", "unimplemented")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
eventType: "new.event",
|
||||||
|
mapper: testFilterMapper,
|
||||||
|
},
|
||||||
|
res: res{
|
||||||
|
event: &testEvent{description: "hodor"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
es := &Eventstore{
|
||||||
|
eventMapper: tt.fields.eventMapper,
|
||||||
|
}
|
||||||
|
err := es.RegisterFilterEventMapper(tt.args.eventType, tt.args.mapper)
|
||||||
|
if (tt.res.isErr != nil && !tt.res.isErr(err)) || (tt.res.isErr == nil && err != nil) {
|
||||||
|
t.Errorf("wrong error got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if tt.res.isErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper := es.eventMapper[tt.args.eventType]
|
||||||
|
event, err := mapper.filterMapper(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(tt.res.event, event) {
|
||||||
|
t.Errorf("events should be deep equal. \ngot %v\nwant %v", event, tt.res.event)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,3 +0,0 @@
|
|||||||
package eventstore
|
|
||||||
|
|
||||||
type ReadModel struct{}
|
|
22
internal/eventstore/v2/repository/aggregate.go
Normal file
22
internal/eventstore/v2/repository/aggregate.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
//AggregateType is the object name
|
||||||
|
type AggregateType string
|
||||||
|
|
||||||
|
// //Aggregate represents an object
|
||||||
|
// type Aggregate struct {
|
||||||
|
// //ID id is the unique identifier of the aggregate
|
||||||
|
// // the client must generate it by it's own
|
||||||
|
// ID string
|
||||||
|
// //Type describes the meaning of this aggregate
|
||||||
|
// // it could an object like user
|
||||||
|
// Type AggregateType
|
||||||
|
|
||||||
|
// //ResourceOwner is the organisation which owns this aggregate
|
||||||
|
// // an aggregate can only be managed by one organisation
|
||||||
|
// // use the ID of the org
|
||||||
|
// ResourceOwner string
|
||||||
|
|
||||||
|
// //Events describe all the changes made on an aggregate
|
||||||
|
// Events []*Event
|
||||||
|
// }
|
@@ -1,6 +1,8 @@
|
|||||||
package eventstore
|
package repository
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
//Event represents all information about a manipulation of an aggregate
|
//Event represents all information about a manipulation of an aggregate
|
||||||
type Event struct {
|
type Event struct {
|
||||||
@@ -13,6 +15,10 @@ type Event struct {
|
|||||||
// if it's 0 then it's the first event of this aggregate
|
// if it's 0 then it's the first event of this aggregate
|
||||||
PreviousSequence uint64
|
PreviousSequence uint64
|
||||||
|
|
||||||
|
//PreviousEvent is needed in push to update PreviousSequence
|
||||||
|
// it implements a linked list
|
||||||
|
PreviousEvent *Event
|
||||||
|
|
||||||
//CreationDate is the time the event is created
|
//CreationDate is the time the event is created
|
||||||
// it's used for human readability.
|
// it's used for human readability.
|
||||||
// Don't use it for event ordering,
|
// Don't use it for event ordering,
|
82
internal/eventstore/v2/repository/in_memory.go
Normal file
82
internal/eventstore/v2/repository/in_memory.go
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type InMemory struct {
|
||||||
|
events []*Event
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repo *InMemory) Health(ctx context.Context) error { return nil }
|
||||||
|
|
||||||
|
// PushEvents adds all events of the given aggregates to the eventstreams of the aggregates.
|
||||||
|
// This call is transaction save. The transaction will be rolled back if one event fails
|
||||||
|
func (repo *InMemory) Push(ctx context.Context, events ...*Event) error {
|
||||||
|
repo.events = append(repo.events, events...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter returns all events matching the given search query
|
||||||
|
func (repo *InMemory) Filter(ctx context.Context, searchQuery *SearchQuery) (events []*Event, err error) {
|
||||||
|
indexes := repo.filter(searchQuery)
|
||||||
|
events = make([]*Event, len(indexes))
|
||||||
|
for i, index := range indexes {
|
||||||
|
events[i] = repo.events[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repo *InMemory) filter(query *SearchQuery) []int {
|
||||||
|
foundIndex := make([]int, 0, query.Limit)
|
||||||
|
events:
|
||||||
|
for i, event := range repo.events {
|
||||||
|
if query.Limit > 0 && uint64(len(foundIndex)) < query.Limit {
|
||||||
|
return foundIndex
|
||||||
|
}
|
||||||
|
for _, filter := range query.Filters {
|
||||||
|
var value interface{}
|
||||||
|
switch filter.field {
|
||||||
|
case Field_AggregateID:
|
||||||
|
value = event.AggregateID
|
||||||
|
case Field_EditorService:
|
||||||
|
value = event.EditorService
|
||||||
|
case Field_EventType:
|
||||||
|
value = event.Type
|
||||||
|
case Field_AggregateType:
|
||||||
|
value = event.AggregateType
|
||||||
|
case Field_EditorUser:
|
||||||
|
value = event.EditorUser
|
||||||
|
case Field_ResourceOwner:
|
||||||
|
value = event.ResourceOwner
|
||||||
|
case Field_LatestSequence:
|
||||||
|
value = event.Sequence
|
||||||
|
}
|
||||||
|
switch filter.operation {
|
||||||
|
case Operation_Equals:
|
||||||
|
if filter.value == value {
|
||||||
|
foundIndex = append(foundIndex, i)
|
||||||
|
}
|
||||||
|
case Operation_Greater:
|
||||||
|
fallthrough
|
||||||
|
case Operation_Less:
|
||||||
|
|
||||||
|
return nil
|
||||||
|
case Operation_In:
|
||||||
|
values := filter.Value().([]interface{})
|
||||||
|
for _, val := range values {
|
||||||
|
if val == value {
|
||||||
|
foundIndex = append(foundIndex, i)
|
||||||
|
continue events
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return foundIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
//LatestSequence returns the latests sequence found by the the search query
|
||||||
|
func (repo *InMemory) LatestSequence(ctx context.Context, queryFactory *SearchQuery) (uint64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
17
internal/eventstore/v2/repository/repository.go
Normal file
17
internal/eventstore/v2/repository/repository.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Repository interface {
|
||||||
|
Health(ctx context.Context) error
|
||||||
|
|
||||||
|
// PushEvents adds all events of the given aggregates to the eventstreams of the aggregates.
|
||||||
|
// This call is transaction save. The transaction will be rolled back if one event fails
|
||||||
|
Push(ctx context.Context, events ...*Event) error
|
||||||
|
// Filter returns all events matching the given search query
|
||||||
|
Filter(ctx context.Context, searchQuery *SearchQuery) (events []*Event, err error)
|
||||||
|
//LatestSequence returns the latests sequence found by the the search query
|
||||||
|
LatestSequence(ctx context.Context, queryFactory *SearchQuery) (uint64, error)
|
||||||
|
}
|
@@ -1,7 +1,21 @@
|
|||||||
package eventstore
|
package repository
|
||||||
|
|
||||||
import (
|
import "github.com/caos/zitadel/internal/errors"
|
||||||
"github.com/caos/zitadel/internal/errors"
|
|
||||||
|
type SearchQuery struct {
|
||||||
|
Columns Columns
|
||||||
|
Limit uint64
|
||||||
|
Desc bool
|
||||||
|
Filters []*Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
type Columns int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Columns_Event = iota
|
||||||
|
Columns_Max_Sequence
|
||||||
|
//insert new columns-types above this ColumnsCount because count is needed for validation
|
||||||
|
ColumnsCount
|
||||||
)
|
)
|
||||||
|
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
@@ -52,16 +66,16 @@ func (f *Filter) Value() interface{} {
|
|||||||
|
|
||||||
func (f *Filter) Validate() error {
|
func (f *Filter) Validate() error {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return errors.ThrowPreconditionFailed(nil, "MODEL-z6KcG", "filter is nil")
|
return errors.ThrowPreconditionFailed(nil, "REPO-z6KcG", "filter is nil")
|
||||||
}
|
}
|
||||||
if f.field <= 0 {
|
if f.field <= 0 {
|
||||||
return errors.ThrowPreconditionFailed(nil, "MODEL-zw62U", "field not definded")
|
return errors.ThrowPreconditionFailed(nil, "REPO-zw62U", "field not definded")
|
||||||
}
|
}
|
||||||
if f.value == nil {
|
if f.value == nil {
|
||||||
return errors.ThrowPreconditionFailed(nil, "MODEL-GJ9ct", "no value definded")
|
return errors.ThrowPreconditionFailed(nil, "REPO-GJ9ct", "no value definded")
|
||||||
}
|
}
|
||||||
if f.operation <= 0 {
|
if f.operation <= 0 {
|
||||||
return errors.ThrowPreconditionFailed(nil, "MODEL-RrQTy", "operation not definded")
|
return errors.ThrowPreconditionFailed(nil, "REPO-RrQTy", "operation not definded")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
@@ -1,4 +1,4 @@
|
|||||||
package eventstore
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
18
internal/eventstore/v2/repository/version.go
Normal file
18
internal/eventstore/v2/repository/version.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/caos/zitadel/internal/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var versionRegexp = regexp.MustCompile(`^v[0-9]+(\.[0-9]+){0,2}$`)
|
||||||
|
|
||||||
|
type Version string
|
||||||
|
|
||||||
|
func (v Version) Validate() error {
|
||||||
|
if !versionRegexp.MatchString(string(v)) {
|
||||||
|
return errors.ThrowPreconditionFailed(nil, "MODEL-luDuS", "version is not semver")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@@ -1,4 +1,4 @@
|
|||||||
package eventstore
|
package repository
|
||||||
|
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
@@ -2,10 +2,11 @@ package eventstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/caos/zitadel/internal/errors"
|
"github.com/caos/zitadel/internal/errors"
|
||||||
|
"github.com/caos/zitadel/internal/eventstore/v2/repository"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SearchQueryFactory struct {
|
type SearchQueryFactory struct {
|
||||||
columns Columns
|
columns repository.Columns
|
||||||
limit uint64
|
limit uint64
|
||||||
desc bool
|
desc bool
|
||||||
aggregateTypes []AggregateType
|
aggregateTypes []AggregateType
|
||||||
@@ -15,22 +16,16 @@ type SearchQueryFactory struct {
|
|||||||
resourceOwner string
|
resourceOwner string
|
||||||
}
|
}
|
||||||
|
|
||||||
type searchQuery struct {
|
type Columns repository.Columns
|
||||||
Columns Columns
|
|
||||||
Limit uint64
|
|
||||||
Desc bool
|
|
||||||
Filters []*Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
type Columns int32
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Columns_Event = iota
|
Columns_Event Columns = repository.Columns_Event
|
||||||
Columns_Max_Sequence
|
Columns_Max_Sequence Columns = repository.Columns_Max_Sequence
|
||||||
//insert new columns-types above this columnsCount because count is needed for validation
|
|
||||||
columnsCount
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type AggregateType repository.AggregateType
|
||||||
|
type EventType repository.EventType
|
||||||
|
|
||||||
func NewSearchQueryFactory(aggregateTypes ...AggregateType) *SearchQueryFactory {
|
func NewSearchQueryFactory(aggregateTypes ...AggregateType) *SearchQueryFactory {
|
||||||
return &SearchQueryFactory{
|
return &SearchQueryFactory{
|
||||||
aggregateTypes: aggregateTypes,
|
aggregateTypes: aggregateTypes,
|
||||||
@@ -38,7 +33,7 @@ func NewSearchQueryFactory(aggregateTypes ...AggregateType) *SearchQueryFactory
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) Columns(columns Columns) *SearchQueryFactory {
|
func (factory *SearchQueryFactory) Columns(columns Columns) *SearchQueryFactory {
|
||||||
factory.columns = columns
|
factory.columns = repository.Columns(columns)
|
||||||
return factory
|
return factory
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,17 +72,17 @@ func (factory *SearchQueryFactory) OrderAsc() *SearchQueryFactory {
|
|||||||
return factory
|
return factory
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) Build() (*searchQuery, error) {
|
func (factory *SearchQueryFactory) Build() (*repository.SearchQuery, error) {
|
||||||
if factory == nil ||
|
if factory == nil ||
|
||||||
len(factory.aggregateTypes) < 1 ||
|
len(factory.aggregateTypes) < 1 ||
|
||||||
(factory.columns < 0 || factory.columns >= columnsCount) {
|
(factory.columns < 0 || factory.columns >= repository.ColumnsCount) {
|
||||||
return nil, errors.ThrowPreconditionFailed(nil, "MODEL-tGAD3", "factory invalid")
|
return nil, errors.ThrowPreconditionFailed(nil, "MODEL-tGAD3", "factory invalid")
|
||||||
}
|
}
|
||||||
filters := []*Filter{
|
filters := []*repository.Filter{
|
||||||
factory.aggregateTypeFilter(),
|
factory.aggregateTypeFilter(),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range []func() *Filter{
|
for _, f := range []func() *repository.Filter{
|
||||||
factory.aggregateIDFilter,
|
factory.aggregateIDFilter,
|
||||||
factory.eventSequenceFilter,
|
factory.eventSequenceFilter,
|
||||||
factory.eventTypeFilter,
|
factory.eventTypeFilter,
|
||||||
@@ -98,55 +93,55 @@ func (factory *SearchQueryFactory) Build() (*searchQuery, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &searchQuery{
|
return &repository.SearchQuery{
|
||||||
Columns: factory.columns,
|
Columns: repository.Columns(factory.columns),
|
||||||
Limit: factory.limit,
|
Limit: factory.limit,
|
||||||
Desc: factory.desc,
|
Desc: factory.desc,
|
||||||
Filters: filters,
|
Filters: filters,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) aggregateIDFilter() *Filter {
|
func (factory *SearchQueryFactory) aggregateIDFilter() *repository.Filter {
|
||||||
if len(factory.aggregateIDs) < 1 {
|
if len(factory.aggregateIDs) < 1 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(factory.aggregateIDs) == 1 {
|
if len(factory.aggregateIDs) == 1 {
|
||||||
return NewFilter(Field_AggregateID, factory.aggregateIDs[0], Operation_Equals)
|
return repository.NewFilter(repository.Field_AggregateID, factory.aggregateIDs[0], repository.Operation_Equals)
|
||||||
}
|
}
|
||||||
return NewFilter(Field_AggregateID, factory.aggregateIDs, Operation_In)
|
return repository.NewFilter(repository.Field_AggregateID, factory.aggregateIDs, repository.Operation_In)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) eventTypeFilter() *Filter {
|
func (factory *SearchQueryFactory) eventTypeFilter() *repository.Filter {
|
||||||
if len(factory.eventTypes) < 1 {
|
if len(factory.eventTypes) < 1 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(factory.eventTypes) == 1 {
|
if len(factory.eventTypes) == 1 {
|
||||||
return NewFilter(Field_EventType, factory.eventTypes[0], Operation_Equals)
|
return repository.NewFilter(repository.Field_EventType, factory.eventTypes[0], repository.Operation_Equals)
|
||||||
}
|
}
|
||||||
return NewFilter(Field_EventType, factory.eventTypes, Operation_In)
|
return repository.NewFilter(repository.Field_EventType, factory.eventTypes, repository.Operation_In)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) aggregateTypeFilter() *Filter {
|
func (factory *SearchQueryFactory) aggregateTypeFilter() *repository.Filter {
|
||||||
if len(factory.aggregateTypes) == 1 {
|
if len(factory.aggregateTypes) == 1 {
|
||||||
return NewFilter(Field_AggregateType, factory.aggregateTypes[0], Operation_Equals)
|
return repository.NewFilter(repository.Field_AggregateType, factory.aggregateTypes[0], repository.Operation_Equals)
|
||||||
}
|
}
|
||||||
return NewFilter(Field_AggregateType, factory.aggregateTypes, Operation_In)
|
return repository.NewFilter(repository.Field_AggregateType, factory.aggregateTypes, repository.Operation_In)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) eventSequenceFilter() *Filter {
|
func (factory *SearchQueryFactory) eventSequenceFilter() *repository.Filter {
|
||||||
if factory.eventSequence == 0 {
|
if factory.eventSequence == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
sortOrder := Operation_Greater
|
sortOrder := repository.Operation_Greater
|
||||||
if factory.desc {
|
if factory.desc {
|
||||||
sortOrder = Operation_Less
|
sortOrder = repository.Operation_Less
|
||||||
}
|
}
|
||||||
return NewFilter(Field_LatestSequence, factory.eventSequence, sortOrder)
|
return repository.NewFilter(repository.Field_LatestSequence, factory.eventSequence, sortOrder)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *SearchQueryFactory) resourceOwnerFilter() *Filter {
|
func (factory *SearchQueryFactory) resourceOwnerFilter() *repository.Filter {
|
||||||
if factory.resourceOwner == "" {
|
if factory.resourceOwner == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return NewFilter(Field_ResourceOwner, factory.resourceOwner, Operation_Equals)
|
return repository.NewFilter(repository.Field_ResourceOwner, factory.resourceOwner, repository.Operation_Equals)
|
||||||
}
|
}
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/caos/zitadel/internal/errors"
|
"github.com/caos/zitadel/internal/errors"
|
||||||
|
"github.com/caos/zitadel/internal/eventstore/v2/repository"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testSetColumns(columns Columns) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
func testSetColumns(columns Columns) func(factory *SearchQueryFactory) *SearchQueryFactory {
|
||||||
@@ -82,10 +83,10 @@ func TestSearchQueryFactorySetters(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set columns",
|
name: "set columns",
|
||||||
args: args{
|
args: args{
|
||||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetColumns(Columns_Max_Sequence)},
|
setters: []func(*SearchQueryFactory) *SearchQueryFactory{testSetColumns(repository.Columns_Max_Sequence)},
|
||||||
},
|
},
|
||||||
res: &SearchQueryFactory{
|
res: &SearchQueryFactory{
|
||||||
columns: Columns_Max_Sequence,
|
columns: repository.Columns_Max_Sequence,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -166,7 +167,7 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
}
|
}
|
||||||
type res struct {
|
type res struct {
|
||||||
isErr func(err error) bool
|
isErr func(err error) bool
|
||||||
query *searchQuery
|
query *repository.SearchQuery
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -201,7 +202,7 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
args: args{
|
args: args{
|
||||||
aggregateTypes: []AggregateType{"user"},
|
aggregateTypes: []AggregateType{"user"},
|
||||||
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
setters: []func(*SearchQueryFactory) *SearchQueryFactory{
|
||||||
testSetColumns(columnsCount),
|
testSetColumns(repository.ColumnsCount),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
@@ -216,12 +217,12 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -234,12 +235,12 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, []AggregateType{"user", "org"}, Operation_In),
|
repository.NewFilter(repository.Field_AggregateType, []AggregateType{"user", "org"}, repository.Operation_In),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -256,13 +257,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: true,
|
Desc: true,
|
||||||
Limit: 5,
|
Limit: 5,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Less),
|
repository.NewFilter(repository.Field_LatestSequence, uint64(100), repository.Operation_Less),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -279,13 +280,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 5,
|
Limit: 5,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Greater),
|
repository.NewFilter(repository.Field_LatestSequence, uint64(100), repository.Operation_Greater),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -298,18 +299,18 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
testSetLimit(5),
|
testSetLimit(5),
|
||||||
testSetSortOrder(false),
|
testSetSortOrder(false),
|
||||||
testSetSequence(100),
|
testSetSequence(100),
|
||||||
testSetColumns(Columns_Max_Sequence),
|
testSetColumns(repository.Columns_Max_Sequence),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: Columns_Max_Sequence,
|
Columns: repository.Columns_Max_Sequence,
|
||||||
Desc: true,
|
Desc: true,
|
||||||
Limit: 5,
|
Limit: 5,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_LatestSequence, uint64(100), Operation_Less),
|
repository.NewFilter(repository.Field_LatestSequence, uint64(100), repository.Operation_Less),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -324,13 +325,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_AggregateID, "1234", Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateID, "1234", repository.Operation_Equals),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -345,13 +346,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_AggregateID, []string{"1234", "0815"}, Operation_In),
|
repository.NewFilter(repository.Field_AggregateID, []string{"1234", "0815"}, repository.Operation_In),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -366,13 +367,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_LatestSequence, uint64(8), Operation_Greater),
|
repository.NewFilter(repository.Field_LatestSequence, uint64(8), repository.Operation_Greater),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -387,13 +388,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_EventType, EventType("user.created"), Operation_Equals),
|
repository.NewFilter(repository.Field_EventType, EventType("user.created"), repository.Operation_Equals),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -408,13 +409,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_EventType, []EventType{"user.created", "user.changed"}, Operation_In),
|
repository.NewFilter(repository.Field_EventType, []EventType{"user.created", "user.changed"}, repository.Operation_In),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -429,13 +430,13 @@ func TestSearchQueryFactoryBuild(t *testing.T) {
|
|||||||
},
|
},
|
||||||
res: res{
|
res: res{
|
||||||
isErr: nil,
|
isErr: nil,
|
||||||
query: &searchQuery{
|
query: &repository.SearchQuery{
|
||||||
Columns: 0,
|
Columns: 0,
|
||||||
Desc: false,
|
Desc: false,
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
Filters: []*Filter{
|
Filters: []*repository.Filter{
|
||||||
NewFilter(Field_AggregateType, AggregateType("user"), Operation_Equals),
|
repository.NewFilter(repository.Field_AggregateType, AggregateType("user"), repository.Operation_Equals),
|
||||||
NewFilter(Field_ResourceOwner, "hodor", Operation_Equals),
|
repository.NewFilter(repository.Field_ResourceOwner, "hodor", repository.Operation_Equals),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@@ -1,18 +1,7 @@
|
|||||||
package eventstore
|
package eventstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"github.com/caos/zitadel/internal/eventstore/v2/repository"
|
||||||
|
|
||||||
"github.com/caos/zitadel/internal/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var versionRegexp = regexp.MustCompile(`^v[0-9]+(\.[0-9]+){0,2}$`)
|
type Version repository.Version
|
||||||
|
|
||||||
type Version string
|
|
||||||
|
|
||||||
func (v Version) Validate() error {
|
|
||||||
if !versionRegexp.MatchString(string(v)) {
|
|
||||||
return errors.ThrowPreconditionFailed(nil, "MODEL-luDuS", "version is not semver")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
36
migrations/sqlite/V1.0__databases.sql
Normal file
36
migrations/sqlite/V1.0__databases.sql
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/management.db' AS 'management';
|
||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/auth.db' AS 'auth';
|
||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/notification.db' AS 'notification';
|
||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/adminapi.db' AS 'adminapi';
|
||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/authz.db' AS 'authz';
|
||||||
|
ATTACH DATABASE '${GOPATH}/src/github.com/caos/zitadel/.local/eventstore.db' AS 'eventstore';
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
-- CREATE USER eventstore;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO eventstore;
|
||||||
|
|
||||||
|
-- CREATE USER management;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE management TO management;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO management;
|
||||||
|
|
||||||
|
-- CREATE USER adminapi;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DELETE, DROP ON DATABASE adminapi TO adminapi;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO adminapi;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE auth TO adminapi;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE authz TO adminapi;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE management TO adminapi;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DROP, DELETE ON DATABASE notification TO adminapi;
|
||||||
|
|
||||||
|
-- CREATE USER auth;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE auth TO auth;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO auth;
|
||||||
|
|
||||||
|
-- CREATE USER notification;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE notification TO notification;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO notification;
|
||||||
|
|
||||||
|
-- CREATE USER authz;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE, DELETE ON DATABASE authz TO authz;
|
||||||
|
-- GRANT SELECT, INSERT ON DATABASE eventstore TO authz;
|
||||||
|
-- GRANT SELECT, INSERT, UPDATE ON DATABASE auth TO authz;
|
15
migrations/sqlite/V1.10__user_machine.sql
Normal file
15
migrations/sqlite/V1.10__user_machine.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
ALTER TABLE management.users ADD COLUMN machine_name STRING, ADD COLUMN machine_description STRING, ADD COLUMN user_type STRING;
|
||||||
|
ALTER TABLE adminapi.users ADD COLUMN machine_name STRING, ADD COLUMN machine_description STRING, ADD COLUMN user_type STRING;
|
||||||
|
ALTER TABLE auth.users ADD COLUMN machine_name STRING, ADD COLUMN machine_description STRING, ADD COLUMN user_type STRING;
|
||||||
|
|
||||||
|
CREATE TABLE management.machine_keys (
|
||||||
|
id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
|
||||||
|
machine_type SMALLINT,
|
||||||
|
expiration_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
|
||||||
|
PRIMARY KEY (id, user_id)
|
||||||
|
)
|
18
migrations/sqlite/V1.11__usermembership.sql
Normal file
18
migrations/sqlite/V1.11__usermembership.sql
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
CREATE TABLE auth.user_memberships (
|
||||||
|
user_id TEXT,
|
||||||
|
member_type SMALLINT,
|
||||||
|
aggregate_id TEXT,
|
||||||
|
object_id TEXT,
|
||||||
|
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
resource_owner_name TEXT,
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_id, member_type, aggregate_id, object_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE management.user_memberships ADD COLUMN resource_owner_name TEXT;
|
14
migrations/sqlite/V1.12__machine_keys.sql
Normal file
14
migrations/sqlite/V1.12__machine_keys.sql
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
CREATE TABLE auth.machine_keys (
|
||||||
|
id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
|
||||||
|
machine_type SMALLINT,
|
||||||
|
expiration_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
public_key JSONB,
|
||||||
|
|
||||||
|
PRIMARY KEY (id, user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE management.machine_keys ADD COLUMN public_key JSONB;
|
5
migrations/sqlite/V1.13__machine_keys_public.sql
Normal file
5
migrations/sqlite/V1.13__machine_keys_public.sql
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
ALTER TABLE management.machine_keys DROP COLUMN IF EXISTS public_key;
|
||||||
|
ALTER TABLE management.machine_keys ADD COLUMN public_key BYTES;
|
||||||
|
|
||||||
|
ALTER TABLE auth.machine_keys DROP COLUMN IF EXISTS public_key;
|
||||||
|
ALTER TABLE auth.machine_keys ADD COLUMN public_key BYTES;
|
105
migrations/sqlite/V1.14__auth_loginpolicy.sql
Normal file
105
migrations/sqlite/V1.14__auth_loginpolicy.sql
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
ALTER TABLE adminapi.idp_configs ADD COLUMN oidc_idp_display_name_mapping SMALLINT;
|
||||||
|
ALTER TABLE adminapi.idp_configs ADD COLUMN oidc_idp_username_mapping SMALLINT;
|
||||||
|
|
||||||
|
ALTER TABLE management.idp_configs ADD COLUMN oidc_idp_display_name_mapping SMALLINT;
|
||||||
|
ALTER TABLE management.idp_configs ADD COLUMN oidc_idp_username_mapping SMALLINT;
|
||||||
|
|
||||||
|
CREATE TABLE auth.idp_configs (
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
aggregate_id TEXT,
|
||||||
|
name TEXT,
|
||||||
|
logo_src BYTES,
|
||||||
|
idp_state SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_client_secret JSONB,
|
||||||
|
oidc_issuer TEXT,
|
||||||
|
oidc_scopes TEXT ARRAY,
|
||||||
|
oidc_idp_display_name_mapping SMALLINT,
|
||||||
|
oidc_idp_username_mapping SMALLINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE auth.login_policies (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
login_policy_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
allow_register BOOLEAN,
|
||||||
|
allow_username_password BOOLEAN,
|
||||||
|
allow_external_idp BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.idp_providers (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
name string,
|
||||||
|
idp_config_type SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id, idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE auth.user_external_idps (
|
||||||
|
external_user_id TEXT,
|
||||||
|
idp_config_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
idp_name TEXT,
|
||||||
|
user_display_name TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (external_user_id, idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.user_external_idps (
|
||||||
|
idp_config_id TEXT,
|
||||||
|
external_user_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
idp_name TEXT,
|
||||||
|
user_display_name TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (external_user_id, idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.user_external_idps (
|
||||||
|
idp_config_id TEXT,
|
||||||
|
external_user_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
idp_name TEXT,
|
||||||
|
user_display_name TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (external_user_id, idp_config_id)
|
||||||
|
);
|
3
migrations/sqlite/V1.15__idp_providers.sql
Normal file
3
migrations/sqlite/V1.15__idp_providers.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE adminapi.idp_providers ADD COLUMN idp_state SMALLINT;
|
||||||
|
ALTER TABLE management.idp_providers ADD COLUMN idp_state SMALLINT;
|
||||||
|
ALTER TABLE auth.idp_providers ADD COLUMN idp_state SMALLINT;
|
18
migrations/sqlite/V1.1__eventstore.sql
Normal file
18
migrations/sqlite/V1.1__eventstore.sql
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
CREATE TABLE eventstore.events (
|
||||||
|
event_type TEXT,
|
||||||
|
aggregate_type TEXT NOT NULL,
|
||||||
|
aggregate_id TEXT NOT NULL,
|
||||||
|
aggregate_version TEXT NOT NULL,
|
||||||
|
event_sequence INTEGER,
|
||||||
|
previous_sequence BIGINT,
|
||||||
|
creation_date TIMESTAMPT NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
event_data JSONB,
|
||||||
|
editor_user TEXT NOT NULL,
|
||||||
|
editor_service TEXT NOT NULL,
|
||||||
|
resource_owner TEXT NOT NULL,
|
||||||
|
|
||||||
|
CONSTRAINT event_sequence_pk PRIMARY KEY (event_sequence DESC),
|
||||||
|
CONSTRAINT previous_sequence_unique UNIQUE (previous_sequence DESC)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX eventstore.agg_type_agg_id ON events (aggregate_type, aggregate_id);
|
628
migrations/sqlite/V1.2__views.sql
Normal file
628
migrations/sqlite/V1.2__views.sql
Normal file
@@ -0,0 +1,628 @@
|
|||||||
|
CREATE TABLE management.locks (
|
||||||
|
locker_id TEXT,
|
||||||
|
locked_until TIMESTAMP,
|
||||||
|
view_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.current_sequences (
|
||||||
|
view_name TEXT,
|
||||||
|
current_sequence BIGINT,
|
||||||
|
timestamp TIMESTAMP,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.failed_events (
|
||||||
|
view_name TEXT,
|
||||||
|
failed_sequence BIGINT,
|
||||||
|
failure_count SMALLINT,
|
||||||
|
err_msg TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name, failed_sequence)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.projects (
|
||||||
|
project_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
project_name TEXT,
|
||||||
|
project_state SMALLINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (project_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.project_grants (
|
||||||
|
grant_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
project_id TEXT,
|
||||||
|
project_name TEXT,
|
||||||
|
org_name TEXT,
|
||||||
|
project_state SMALLINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_id TEXT,
|
||||||
|
granted_role_keys TEXT Array,
|
||||||
|
sequence BIGINT,
|
||||||
|
resource_owner_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (grant_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.project_roles (
|
||||||
|
project_id TEXT,
|
||||||
|
role_key TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_id TEXT,
|
||||||
|
group_name TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (org_id, project_id, role_key)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.project_members (
|
||||||
|
user_id TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
user_name TEXT,
|
||||||
|
email_address TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (project_id, user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.project_grant_members (
|
||||||
|
user_id TEXT,
|
||||||
|
grant_id TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
user_name TEXT,
|
||||||
|
email_address TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (grant_id, user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.applications (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
app_state SMALLINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
app_name TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
app_type SMALLINT,
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_redirect_uris TEXT ARRAY,
|
||||||
|
oidc_response_types SMALLINT ARRAY,
|
||||||
|
oidc_grant_types SMALLINT ARRAY,
|
||||||
|
oidc_application_type SMALLINT,
|
||||||
|
oidc_auth_method_type SMALLINT,
|
||||||
|
oidc_post_logout_redirect_uris TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.users (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
user_state SMALLINT,
|
||||||
|
last_login TIMESTAMP,
|
||||||
|
password_change TIMESTAMP,
|
||||||
|
user_name TEXT,
|
||||||
|
login_names TEXT ARRAY,
|
||||||
|
preferred_login_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
nick_Name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
preferred_language TEXT,
|
||||||
|
gender SMALLINT,
|
||||||
|
email TEXT,
|
||||||
|
is_email_verified BOOLEAN,
|
||||||
|
phone TEXT,
|
||||||
|
is_phone_verified BOOLEAN,
|
||||||
|
country TEXT,
|
||||||
|
locality TEXT,
|
||||||
|
postal_code TEXT,
|
||||||
|
region TEXT,
|
||||||
|
street_address TEXT,
|
||||||
|
otp_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
password_set BOOLEAN,
|
||||||
|
password_change_required BOOLEAN,
|
||||||
|
mfa_max_set_up SMALLINT,
|
||||||
|
mfa_init_skipped TIMESTAMP,
|
||||||
|
init_required BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.user_grants (
|
||||||
|
id TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
org_name TEXT,
|
||||||
|
project_name TEXT,
|
||||||
|
user_name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
email TEXT,
|
||||||
|
role_keys TEXT Array,
|
||||||
|
grant_id TEXT,
|
||||||
|
|
||||||
|
grant_state SMALLINT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.org_domains (
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
domain TEXT,
|
||||||
|
org_id TEXT,
|
||||||
|
verified BOOLEAN,
|
||||||
|
primary_domain BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (org_id, domain)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.locks (
|
||||||
|
locker_id TEXT,
|
||||||
|
locked_until TIMESTAMP,
|
||||||
|
view_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.current_sequences (
|
||||||
|
view_name TEXT,
|
||||||
|
timestamp TIMESTAMP,
|
||||||
|
|
||||||
|
current_sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.failed_events (
|
||||||
|
view_name TEXT,
|
||||||
|
failed_sequence BIGINT,
|
||||||
|
failure_count SMALLINT,
|
||||||
|
err_msg TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name, failed_sequence)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.auth_requests (
|
||||||
|
id TEXT,
|
||||||
|
request JSONB,
|
||||||
|
code TEXT,
|
||||||
|
request_type smallint,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.users (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
user_state SMALLINT,
|
||||||
|
password_set BOOLEAN,
|
||||||
|
password_change_required BOOLEAN,
|
||||||
|
password_change TIMESTAMP,
|
||||||
|
last_login TIMESTAMP,
|
||||||
|
user_name TEXT,
|
||||||
|
login_names TEXT ARRAY,
|
||||||
|
preferred_login_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
nick_name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
preferred_language TEXT,
|
||||||
|
gender SMALLINT,
|
||||||
|
email TEXT,
|
||||||
|
is_email_verified BOOLEAN,
|
||||||
|
phone TEXT,
|
||||||
|
is_phone_verified BOOLEAN,
|
||||||
|
country TEXT,
|
||||||
|
locality TEXT,
|
||||||
|
postal_code TEXT,
|
||||||
|
region TEXT,
|
||||||
|
street_address TEXT,
|
||||||
|
otp_state SMALLINT,
|
||||||
|
mfa_max_set_up SMALLINT,
|
||||||
|
mfa_init_skipped TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
init_required BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.user_sessions (
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
state SMALLINT,
|
||||||
|
user_agent_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
user_name TEXT,
|
||||||
|
password_verification TIMESTAMP,
|
||||||
|
mfa_software_verification TIMESTAMP,
|
||||||
|
mfa_hardware_verification TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
mfa_software_verification_type SMALLINT,
|
||||||
|
mfa_hardware_verification_type SMALLINT,
|
||||||
|
user_display_name TEXT,
|
||||||
|
login_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_agent_id, user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.tokens (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
application_id TEXT,
|
||||||
|
user_agent_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
expiration TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
scopes TEXT ARRAY,
|
||||||
|
audience TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE notification.locks (
|
||||||
|
locker_id TEXT,
|
||||||
|
locked_until TIMESTAMP,
|
||||||
|
view_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE notification.current_sequences (
|
||||||
|
view_name TEXT,
|
||||||
|
timestamp TIMESTAMP,
|
||||||
|
|
||||||
|
current_sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE notification.failed_events (
|
||||||
|
view_name TEXT,
|
||||||
|
failed_sequence BIGINT,
|
||||||
|
failure_count SMALLINT,
|
||||||
|
err_msg TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name, failed_sequence)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE notification.notify_users (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
user_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
nick_Name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
preferred_language TEXT,
|
||||||
|
gender SMALLINT,
|
||||||
|
last_email TEXT,
|
||||||
|
verified_email TEXT,
|
||||||
|
last_phone TEXT,
|
||||||
|
verified_phone TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
password_set BOOLEAN,
|
||||||
|
login_names TEXT,
|
||||||
|
preferred_login_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.orgs (
|
||||||
|
id TEXT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
domain TEXT,
|
||||||
|
name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.failed_events (
|
||||||
|
view_name TEXT,
|
||||||
|
failed_sequence BIGINT,
|
||||||
|
failure_count SMALLINT,
|
||||||
|
err_msg TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name, failed_sequence)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.locks (
|
||||||
|
locker_id TEXT,
|
||||||
|
locked_until TIMESTAMP,
|
||||||
|
view_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.current_sequences (
|
||||||
|
view_name TEXT,
|
||||||
|
timestamp TIMESTAMP,
|
||||||
|
|
||||||
|
current_sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.iam_members (
|
||||||
|
user_id TEXT,
|
||||||
|
|
||||||
|
iam_id TEXT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
user_name TEXT,
|
||||||
|
email_address TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.orgs (
|
||||||
|
id TEXT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
domain TEXT,
|
||||||
|
name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.org_members (
|
||||||
|
user_id TEXT,
|
||||||
|
org_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
user_name TEXT,
|
||||||
|
email_address TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (org_id, user_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE auth.keys (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
private BOOLEAN,
|
||||||
|
expiry TIMESTAMP,
|
||||||
|
algorithm TEXT,
|
||||||
|
usage SMALLINT,
|
||||||
|
key JSONB,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id, private)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.applications (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
app_state SMALLINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
app_name TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
app_type SMALLINT,
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_redirect_uris TEXT ARRAY,
|
||||||
|
oidc_response_types SMALLINT ARRAY,
|
||||||
|
oidc_grant_types SMALLINT ARRAY,
|
||||||
|
oidc_application_type SMALLINT,
|
||||||
|
oidc_auth_method_type SMALLINT,
|
||||||
|
oidc_post_logout_redirect_uris TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.user_grants (
|
||||||
|
id TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
org_name TEXT,
|
||||||
|
project_name TEXT,
|
||||||
|
user_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
email TEXT,
|
||||||
|
role_keys TEXT Array,
|
||||||
|
grant_id TEXT,
|
||||||
|
|
||||||
|
grant_state SMALLINT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE auth.orgs (
|
||||||
|
id TEXT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
domain TEXT,
|
||||||
|
name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.locks (
|
||||||
|
locker_id TEXT,
|
||||||
|
locked_until TIMESTAMP,
|
||||||
|
view_name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.current_sequences (
|
||||||
|
view_name TEXT,
|
||||||
|
timestamp TIMESTAMP,
|
||||||
|
|
||||||
|
current_sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.failed_events (
|
||||||
|
view_name TEXT,
|
||||||
|
failed_sequence BIGINT,
|
||||||
|
failure_count SMALLINT,
|
||||||
|
err_msg TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (view_name, failed_sequence)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.user_grants (
|
||||||
|
id TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
org_name TEXT,
|
||||||
|
project_name TEXT,
|
||||||
|
user_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
email TEXT,
|
||||||
|
role_keys TEXT Array,
|
||||||
|
grant_id TEXT,
|
||||||
|
|
||||||
|
grant_state SMALLINT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.applications (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
app_state SMALLINT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
app_name TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
app_type SMALLINT,
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_redirect_uris TEXT ARRAY,
|
||||||
|
oidc_response_types SMALLINT ARRAY,
|
||||||
|
oidc_grant_types SMALLINT ARRAY,
|
||||||
|
oidc_application_type SMALLINT,
|
||||||
|
oidc_auth_method_type SMALLINT,
|
||||||
|
oidc_post_logout_redirect_uris TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE authz.orgs (
|
||||||
|
id TEXT,
|
||||||
|
creation_date TIMESTAMP,
|
||||||
|
change_date TIMESTAMP,
|
||||||
|
resource_owner TEXT,
|
||||||
|
org_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
domain TEXT,
|
||||||
|
name TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
15
migrations/sqlite/V1.3__usermembership.sql
Normal file
15
migrations/sqlite/V1.3__usermembership.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
CREATE TABLE management.user_memberships (
|
||||||
|
user_id TEXT,
|
||||||
|
member_type SMALLINT,
|
||||||
|
aggregate_id TEXT,
|
||||||
|
object_id TEXT,
|
||||||
|
|
||||||
|
roles TEXT ARRAY,
|
||||||
|
display_name TEXT,
|
||||||
|
resource_owner TEXT,
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_id, member_type, aggregate_id, object_id)
|
||||||
|
);
|
14
migrations/sqlite/V1.4__compliance.sql
Normal file
14
migrations/sqlite/V1.4__compliance.sql
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
ALTER TABLE management.applications ADD COLUMN oidc_version SMALLINT;
|
||||||
|
ALTER TABLE management.applications ADD COLUMN none_compliant BOOLEAN;
|
||||||
|
ALTER TABLE management.applications ADD COLUMN compliance_problems TEXT ARRAY;
|
||||||
|
ALTER TABLE management.applications ADD COLUMN dev_mode BOOLEAN;
|
||||||
|
|
||||||
|
ALTER TABLE auth.applications ADD COLUMN oidc_version SMALLINT;
|
||||||
|
ALTER TABLE auth.applications ADD COLUMN none_compliant BOOLEAN;
|
||||||
|
ALTER TABLE auth.applications ADD COLUMN compliance_problems TEXT ARRAY;
|
||||||
|
ALTER TABLE auth.applications ADD COLUMN dev_mode BOOLEAN;
|
||||||
|
|
||||||
|
ALTER TABLE authz.applications ADD COLUMN oidc_version SMALLINT;
|
||||||
|
ALTER TABLE authz.applications ADD COLUMN none_compliant BOOLEAN;
|
||||||
|
ALTER TABLE authz.applications ADD COLUMN compliance_problems TEXT ARRAY;
|
||||||
|
ALTER TABLE authz.applications ADD COLUMN dev_mode BOOLEAN;
|
44
migrations/sqlite/V1.5__orgdomain_validationtype.sql
Normal file
44
migrations/sqlite/V1.5__orgdomain_validationtype.sql
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
BEGIN;
|
||||||
|
|
||||||
|
ALTER TABLE management.org_domains ADD COLUMN validation_type SMALLINT;
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.users (
|
||||||
|
id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
|
||||||
|
resource_owner TEXT,
|
||||||
|
user_state SMALLINT,
|
||||||
|
last_login TIMESTAMPTZ,
|
||||||
|
password_change TIMESTAMPTZ,
|
||||||
|
user_name TEXT,
|
||||||
|
login_names TEXT ARRAY,
|
||||||
|
preferred_login_name TEXT,
|
||||||
|
first_name TEXT,
|
||||||
|
last_name TEXT,
|
||||||
|
nick_Name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
preferred_language TEXT,
|
||||||
|
gender SMALLINT,
|
||||||
|
email TEXT,
|
||||||
|
is_email_verified BOOLEAN,
|
||||||
|
phone TEXT,
|
||||||
|
is_phone_verified BOOLEAN,
|
||||||
|
country TEXT,
|
||||||
|
locality TEXT,
|
||||||
|
postal_code TEXT,
|
||||||
|
region TEXT,
|
||||||
|
street_address TEXT,
|
||||||
|
otp_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
password_set BOOLEAN,
|
||||||
|
password_change_required BOOLEAN,
|
||||||
|
mfa_max_set_up SMALLINT,
|
||||||
|
mfa_init_skipped TIMESTAMPTZ,
|
||||||
|
init_required BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
COMMIT;
|
15
migrations/sqlite/V1.6__origin_allow_list.sql
Normal file
15
migrations/sqlite/V1.6__origin_allow_list.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
BEGIN;
|
||||||
|
|
||||||
|
ALTER TABLE management.applications ADD COLUMN origin_allow_list TEXT ARRAY;
|
||||||
|
ALTER TABLE auth.applications ADD COLUMN origin_allow_list TEXT ARRAY;
|
||||||
|
ALTER TABLE authz.applications ADD COLUMN origin_allow_list TEXT ARRAY;
|
||||||
|
|
||||||
|
TRUNCATE TABLE management.applications;
|
||||||
|
TRUNCATE TABLE auth.applications;
|
||||||
|
TRUNCATE TABLE authz.applications;
|
||||||
|
|
||||||
|
UPDATE management.current_sequences set current_sequence = 0 where view_name = 'management.applications';
|
||||||
|
UPDATE auth.current_sequences set current_sequence = 0 where view_name = 'auth.applications';
|
||||||
|
UPDATE authz.current_sequences set current_sequence = 0 where view_name = 'authz.applications';
|
||||||
|
|
||||||
|
COMMIT;
|
105
migrations/sqlite/V1.7__idps.sql
Normal file
105
migrations/sqlite/V1.7__idps.sql
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
|
||||||
|
CREATE TABLE adminapi.idp_configs (
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
aggregate_id TEXT,
|
||||||
|
name TEXT,
|
||||||
|
logo_src BYTES,
|
||||||
|
idp_state SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_client_secret JSONB,
|
||||||
|
oidc_issuer TEXT,
|
||||||
|
oidc_scopes TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE management.idp_configs (
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
aggregate_id TEXT,
|
||||||
|
name TEXT,
|
||||||
|
logo_src BYTES,
|
||||||
|
idp_state SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
is_oidc BOOLEAN,
|
||||||
|
oidc_client_id TEXT,
|
||||||
|
oidc_client_secret JSONB,
|
||||||
|
oidc_issuer TEXT,
|
||||||
|
oidc_scopes TEXT ARRAY,
|
||||||
|
|
||||||
|
PRIMARY KEY (idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.login_policies (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
login_policy_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
allow_register BOOLEAN,
|
||||||
|
allow_username_password BOOLEAN,
|
||||||
|
allow_external_idp BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE management.login_policies (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
login_policy_state SMALLINT,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
allow_register BOOLEAN,
|
||||||
|
allow_username_password BOOLEAN,
|
||||||
|
allow_external_idp BOOLEAN,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE adminapi.idp_providers (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
name string,
|
||||||
|
idp_config_type SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id, idp_config_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE management.idp_providers (
|
||||||
|
aggregate_id TEXT,
|
||||||
|
idp_config_id TEXT,
|
||||||
|
|
||||||
|
creation_date TIMESTAMPTZ,
|
||||||
|
change_date TIMESTAMPTZ,
|
||||||
|
sequence BIGINT,
|
||||||
|
|
||||||
|
name string,
|
||||||
|
idp_config_type SMALLINT,
|
||||||
|
idp_provider_type SMALLINT,
|
||||||
|
|
||||||
|
PRIMARY KEY (aggregate_id, idp_config_id)
|
||||||
|
);
|
7
migrations/sqlite/V1.8__username_change.sql
Normal file
7
migrations/sqlite/V1.8__username_change.sql
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
BEGIN;
|
||||||
|
|
||||||
|
ALTER TABLE management.users ADD COLUMN username_change_required BOOLEAN;
|
||||||
|
ALTER TABLE auth.users ADD COLUMN username_change_required BOOLEAN;
|
||||||
|
ALTER TABLE adminapi.users ADD COLUMN username_change_required BOOLEAN;
|
||||||
|
|
||||||
|
COMMIT;
|
2
migrations/sqlite/V1.9__token.sql
Normal file
2
migrations/sqlite/V1.9__token.sql
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
|
||||||
|
ALTER TABLE auth.tokens ADD COLUMN preferred_language TEXT;
|
5
migrations/sqlite/clean_local.go
Normal file
5
migrations/sqlite/clean_local.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//+build ignore
|
||||||
|
|
||||||
|
package migrations
|
||||||
|
|
||||||
|
//go:generate flyway -url=jdbc:postgresql://localhost:26257/defaultdb -user=root -password= -locations=filesystem:./ clean
|
5
migrations/sqlite/migrate_local.go
Normal file
5
migrations/sqlite/migrate_local.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//+build ignore
|
||||||
|
|
||||||
|
package migrations
|
||||||
|
|
||||||
|
//go:generate flyway -url=jdbc:sqlite:/Users/silvanreusser/go/src/github.com/caos/zitadel/.local/zitadel.db -user=admin -password= -schemas=eventstore, -locations=filesystem:./ migrate
|
Reference in New Issue
Block a user