fix(eventstore): use decimal, correct mirror (#9906)

back port #9812, #9878, #9881, #9884

---------

Co-authored-by: Livio Spring <livio.a@gmail.com>
This commit is contained in:
Silvan
2025-05-20 13:58:32 +02:00
committed by GitHub
parent eeb7cd6910
commit 6d33f9e75a
58 changed files with 540 additions and 322 deletions

View File

@@ -3,6 +3,7 @@ package mirror
import (
"context"
"database/sql"
"fmt"
"net/http"
"sync"
"time"
@@ -103,6 +104,7 @@ func projections(
config *ProjectionsConfig,
masterKey string,
) {
logging.Info("starting to fill projections")
start := time.Now()
client, err := database.Connect(config.Destination, false)
@@ -118,7 +120,11 @@ func projections(
logging.OnError(err).Fatal("unable create static storage")
config.Eventstore.Querier = old_es.NewCRDB(client)
config.Eventstore.Pusher = new_es.NewEventstore(client)
newES := new_es.NewEventstore(client)
config.Eventstore.Pusher = newES
config.Eventstore.Searcher = newES
es := eventstore.NewEventstore(config.Eventstore)
esV4 := es_v4.NewEventstoreFromOne(es_v4_pg.New(client, &es_v4_pg.Config{
MaxRetries: config.Eventstore.MaxRetries,
@@ -249,11 +255,13 @@ func projections(
}()
for i := 0; i < int(config.Projections.ConcurrentInstances); i++ {
go execProjections(ctx, instances, failedInstances, &wg)
go execProjections(ctx, es, instances, failedInstances, &wg)
}
for _, instance := range queryInstanceIDs(ctx, client) {
existingInstances := queryInstanceIDs(ctx, client)
for i, instance := range existingInstances {
instances <- instance
logging.WithFields("id", instance, "index", fmt.Sprintf("%d/%d", i, len(existingInstances))).Info("instance queued for projection")
}
close(instances)
wg.Wait()
@@ -263,9 +271,9 @@ func projections(
logging.WithFields("took", time.Since(start)).Info("projections executed")
}
func execProjections(ctx context.Context, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
func execProjections(ctx context.Context, es *eventstore.Eventstore, instances <-chan string, failedInstances chan<- string, wg *sync.WaitGroup) {
for instance := range instances {
logging.WithFields("instance", instance).Info("start projections")
logging.WithFields("instance", instance).Info("starting projections")
ctx = internal_authz.WithInstanceID(ctx, instance)
err := projection.ProjectInstance(ctx)
@@ -282,6 +290,13 @@ func execProjections(ctx context.Context, instances <-chan string, failedInstanc
continue
}
err = projection.ProjectInstanceFields(ctx)
if err != nil {
logging.WithFields("instance", instance).WithError(err).Info("trigger fields failed")
failedInstances <- instance
continue
}
err = auth_handler.ProjectInstance(ctx)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger auth handler failed")
@@ -289,7 +304,7 @@ func execProjections(ctx context.Context, instances <-chan string, failedInstanc
continue
}
err = notification.ProjectInstance(ctx)
err = notification.SetCurrentState(ctx, es)
if err != nil {
logging.WithFields("instance", instance).OnError(err).Info("trigger notification failed")
failedInstances <- instance
@@ -300,7 +315,7 @@ func execProjections(ctx context.Context, instances <-chan string, failedInstanc
wg.Done()
}
// returns the instance configured by flag
// queryInstanceIDs returns the instance configured by flag
// or all instances which are not removed
func queryInstanceIDs(ctx context.Context, source *database.DB) []string {
if len(instanceIDs) > 0 {