feat(database): support for postgres (#3998)

* beginning with postgres statements

* try pgx

* use pgx

* database

* init works for postgres

* arrays working

* init for cockroach

* init

* start tests

* tests

* TESTS

* ch

* ch

* chore: use go 1.18

* read stmts

* fix typo

* tests

* connection string

* add missing error handler

* cleanup

* start all apis

* go mod tidy

* old update

* switch back to minute

* on conflict

* replace string slice with `database.StringArray` in db models

* fix tests and start

* update go version in dockerfile

* setup go

* clean up

* remove notification migration

* update

* docs: add deploy guide for postgres

* fix: revert sonyflake

* use `database.StringArray` for daos

* use `database.StringArray` every where

* new tables

* index naming,
metadata primary key,
project grant role key type

* docs(postgres): change to beta

* chore: correct compose

* fix(defaults): add empty postgres config

* refactor: remove unused code

* docs: add postgres to self hosted

* fix broken link

* so?

* change title

* add mdx to link

* fix stmt

* update goreleaser in test-code

* docs: improve postgres example

* update more projections

* fix: add beta log for postgres

* revert index name change

* prerelease

* fix: add sequence to v1 "reduce paniced"

* log if nil

* add logging

* fix: log output

* fix(import): check if org exists and user

* refactor: imports

* fix(user): ignore malformed events

* refactor: method naming

* fix: test

* refactor: correct errors.Is call

* ci: don't build dev binaries on main

* fix(go releaser): update version to 1.11.0

* fix(user): projection should not break

* fix(user): handle error properly

* docs: correct config example

* Update .releaserc.js

* Update .releaserc.js

Co-authored-by: Livio Amstutz <livio.a@gmail.com>
Co-authored-by: Elio Bischof <eliobischof@gmail.com>
This commit is contained in:
Silvan
2022-08-31 09:52:43 +02:00
committed by GitHub
parent d6c9815945
commit 77b4fc5487
189 changed files with 3401 additions and 2956 deletions

View File

@@ -2,8 +2,6 @@ package sql
import (
"database/sql"
_ "github.com/lib/pq"
)
func Start(client *sql.DB) *SQL {

View File

@@ -7,7 +7,6 @@ import (
"strconv"
"strings"
"github.com/lib/pq"
"github.com/zitadel/logging"
z_errors "github.com/zitadel/zitadel/internal/errors"
@@ -72,10 +71,6 @@ func prepareCondition(filters [][]*es_models.Filter) (clause string, values []in
subClauses := make([]string, 0, len(filter))
for _, f := range filter {
value := f.GetValue()
switch value.(type) {
case []bool, []float64, []int64, []string, []es_models.AggregateType, []es_models.EventType, *[]bool, *[]float64, *[]int64, *[]string, *[]es_models.AggregateType, *[]es_models.EventType:
value = pq.Array(value)
}
subClauses = append(subClauses, getCondition(f))
if subClauses[len(subClauses)-1] == "" {

View File

@@ -6,8 +6,6 @@ import (
"testing"
"time"
"github.com/lib/pq"
"github.com/zitadel/zitadel/internal/errors"
es_models "github.com/zitadel/zitadel/internal/eventstore/v1/models"
)
@@ -365,7 +363,7 @@ func Test_prepareCondition(t *testing.T) {
},
res: res{
clause: " WHERE ( aggregate_type = ANY(?) )",
values: []interface{}{pq.Array([]es_models.AggregateType{"user", "org"})},
values: []interface{}{[]es_models.AggregateType{"user", "org"}},
},
},
{
@@ -381,7 +379,7 @@ func Test_prepareCondition(t *testing.T) {
},
res: res{
clause: " WHERE ( aggregate_type = ANY(?) AND aggregate_id = ? AND event_type = ANY(?) )",
values: []interface{}{pq.Array([]es_models.AggregateType{"user", "org"}), "1234", pq.Array([]es_models.EventType{"user.created", "org.created"})},
values: []interface{}{[]es_models.AggregateType{"user", "org"}, "1234", []es_models.EventType{"user.created", "org.created"}},
},
},
}

View File

@@ -3,9 +3,6 @@ package sql
import (
"context"
"database/sql"
//sql import
_ "github.com/lib/pq"
)
type SQL struct {

View File

@@ -31,7 +31,7 @@ func Renew(dbClient *sql.DB, lockTable, lockerID, viewModel, instanceID string,
return crdb.ExecuteTx(context.Background(), dbClient, nil, func(tx *sql.Tx) error {
insert := fmt.Sprintf(insertStmtFormat, lockTable)
result, err := tx.Exec(insert,
lockerID, waitTime.Milliseconds()/millisecondsAsSeconds, viewModel, instanceID)
lockerID, waitTime, viewModel, instanceID)
if err != nil {
tx.Rollback()

View File

@@ -38,7 +38,12 @@ func ReduceEvent(handler Handler, event *models.Event) {
if err != nil {
handler.Subscription().Unsubscribe()
logging.WithFields("cause", err, "stack", string(debug.Stack())).Error("reduce panicked")
logging.WithFields(
"cause", err,
"stack", string(debug.Stack()),
"sequence", event.Sequence,
"instnace", event.InstanceID,
).Error("reduce panicked")
}
}()
currentSequence, err := handler.CurrentSequence(event.InstanceID)

View File

@@ -75,7 +75,10 @@ func (s *spooledHandler) load(workerID string) {
err := recover()
if err != nil {
logging.WithFields("cause", err, "stack", string(debug.Stack())).Error("reduce panicked")
logging.WithFields(
"cause", err,
"stack", string(debug.Stack()),
).Error("reduce panicked")
}
}()
ctx, cancel := context.WithCancel(context.Background())
@@ -167,7 +170,7 @@ func (s *spooledHandler) query(ctx context.Context, instanceIDs ...string) ([]*m
return s.eventstore.FilterEvents(ctx, query)
}
//lock ensures the lock on the database.
// lock ensures the lock on the database.
// the returned channel will be closed if ctx is done or an error occured durring lock
func (s *spooledHandler) lock(ctx context.Context, errs chan<- error, workerID string) chan bool {
renewTimer := time.After(0)