mirror of
https://github.com/zitadel/zitadel.git
synced 2025-11-01 00:46:23 +00:00
fix(cache): use key versioning (#10657)
# Which Problems Are Solved Cached object may have a different schema between Zitadel versions. # How the Problems Are Solved Use the curent build version in DB based cache connectors PostgreSQL and Redis. # Additional Changes - Cleanup the ZitadelVersion field from the authz Instance - Solve potential race condition on global variables in build package. # Additional Context - Closes https://github.com/zitadel/zitadel/issues/10648 - Obsoletes https://github.com/zitadel/zitadel/pull/10646 - Needs to be back-ported to v4 over https://github.com/zitadel/zitadel/pull/10645
This commit is contained in:
@@ -1,33 +1,46 @@
|
||||
package build
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
var (
|
||||
version = ""
|
||||
commit = ""
|
||||
date = ""
|
||||
dateTime time.Time
|
||||
"github.com/zitadel/logging"
|
||||
)
|
||||
|
||||
func Version() string {
|
||||
if version != "" {
|
||||
return version
|
||||
// These variables are set via ldflags in the Makefile
|
||||
var (
|
||||
version = ""
|
||||
commit = ""
|
||||
date = ""
|
||||
)
|
||||
|
||||
// dateTime is the parsed version of [date]
|
||||
var dateTime time.Time
|
||||
|
||||
// init prevents race conditions when accessing dateTime and version.
|
||||
func init() {
|
||||
var err error
|
||||
dateTime, err = time.Parse(time.RFC3339, date)
|
||||
if err != nil {
|
||||
logging.WithError(err).Warn("could not parse build date, using current time instead")
|
||||
dateTime = time.Now()
|
||||
}
|
||||
version = Date().Format(time.RFC3339)
|
||||
if version == "" {
|
||||
logging.Warn("no build version set, using timestamp as version")
|
||||
version = date
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the current build version of Zitadel
|
||||
func Version() string {
|
||||
return version
|
||||
}
|
||||
|
||||
// Commit returns the git commit hash of the current build of Zitadel
|
||||
func Commit() string {
|
||||
return commit
|
||||
}
|
||||
|
||||
// Date returns the build date of the current build of Zitadel
|
||||
func Date() time.Time {
|
||||
if !dateTime.IsZero() {
|
||||
return dateTime
|
||||
}
|
||||
dateTime, _ = time.Parse(time.RFC3339, date)
|
||||
if dateTime.IsZero() {
|
||||
dateTime = time.Now()
|
||||
}
|
||||
return dateTime
|
||||
}
|
||||
|
||||
3
internal/cache/cache.go
vendored
3
internal/cache/cache.go
vendored
@@ -97,6 +97,9 @@ const (
|
||||
type Config struct {
|
||||
Connector Connector
|
||||
|
||||
// Cache keys are prefixed with the Zitadel version.
|
||||
ZitadelVersion string
|
||||
|
||||
// Age since an object was added to the cache,
|
||||
// after which the object is considered invalid.
|
||||
// 0 disables max age checks.
|
||||
|
||||
5
internal/cache/connector/connector.go
vendored
5
internal/cache/connector/connector.go
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/build"
|
||||
"github.com/zitadel/zitadel/internal/cache"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
|
||||
"github.com/zitadel/zitadel/internal/cache/connector/noop"
|
||||
@@ -55,7 +56,7 @@ func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Conte
|
||||
return c, nil
|
||||
}
|
||||
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
|
||||
c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres)
|
||||
c, err := pg.NewCache[I, K, V](background, purpose, build.Version(), *conf, indices, connectors.Postgres)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("start cache: %w", err)
|
||||
}
|
||||
@@ -64,7 +65,7 @@ func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Conte
|
||||
}
|
||||
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
|
||||
db := connectors.Redis.Config.DBOffset + int(purpose)
|
||||
c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices)
|
||||
c := redis.NewCache[I, K, V](*conf, build.Version(), connectors.Redis, db, indices)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
||||
66
internal/cache/connector/pg/pg.go
vendored
66
internal/cache/connector/pg/pg.go
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -40,21 +41,23 @@ type PGXPool interface {
|
||||
}
|
||||
|
||||
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
|
||||
purpose cache.Purpose
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
purpose cache.Purpose
|
||||
zitadelVersion string
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
|
||||
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
|
||||
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, zitadelVersion string, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
|
||||
c := &pgCache[I, K, V]{
|
||||
purpose: purpose,
|
||||
config: &config,
|
||||
indices: indices,
|
||||
connector: connector,
|
||||
logger: config.Log.Slog().With("cache_purpose", purpose),
|
||||
purpose: purpose,
|
||||
zitadelVersion: zitadelVersion,
|
||||
config: &config,
|
||||
indices: indices,
|
||||
connector: connector,
|
||||
logger: config.Log.Slog().With("cache_purpose", purpose),
|
||||
}
|
||||
c.logger.InfoContext(ctx, "pg cache logging enabled")
|
||||
|
||||
@@ -115,7 +118,14 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er
|
||||
if !slices.Contains(c.indices, index) {
|
||||
return value, cache.NewIndexUnknownErr(index)
|
||||
}
|
||||
err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
|
||||
err = c.connector.QueryRow(ctx,
|
||||
getQuery,
|
||||
c.purpose.String(),
|
||||
index,
|
||||
c.versionedKey(key),
|
||||
c.config.MaxAge,
|
||||
c.config.LastUseAge,
|
||||
).Scan(&value)
|
||||
return value, err
|
||||
}
|
||||
|
||||
@@ -123,7 +133,8 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys)
|
||||
versionedKeys := c.versionedKeys(keys)
|
||||
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, versionedKeys)
|
||||
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
|
||||
return err
|
||||
}
|
||||
@@ -132,7 +143,8 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err
|
||||
ctx, span := tracing.NewSpan(ctx)
|
||||
defer func() { span.EndWithError(err) }()
|
||||
|
||||
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys)
|
||||
versionedKeys := c.versionedKeys(keys)
|
||||
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, versionedKeys)
|
||||
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
|
||||
return err
|
||||
}
|
||||
@@ -155,20 +167,32 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
type indexKey[I, K comparable] struct {
|
||||
IndexID I `json:"index_id"`
|
||||
IndexKey K `json:"index_key"`
|
||||
type indexKey[I comparable] struct {
|
||||
IndexID I `json:"index_id"`
|
||||
IndexKey string `json:"index_key"`
|
||||
}
|
||||
|
||||
func (c *pgCache[I, K, V]) indexKeysFromEntry(entry V) []indexKey[I, K] {
|
||||
keys := make([]indexKey[I, K], 0, len(c.indices)*3) // naive assumption
|
||||
func (c *pgCache[I, K, V]) indexKeysFromEntry(entry V) []indexKey[I] {
|
||||
keys := make([]indexKey[I], 0, len(c.indices)*3) // naive assumption
|
||||
for _, index := range c.indices {
|
||||
for _, key := range entry.Keys(index) {
|
||||
keys = append(keys, indexKey[I, K]{
|
||||
keys = append(keys, indexKey[I]{
|
||||
IndexID: index,
|
||||
IndexKey: key,
|
||||
IndexKey: c.versionedKey(key),
|
||||
})
|
||||
}
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (c *pgCache[I, K, V]) versionedKey(key K) string {
|
||||
return fmt.Sprintf("%s:%s", c.zitadelVersion, key)
|
||||
}
|
||||
|
||||
func (c *pgCache[I, K, V]) versionedKeys(key []K) []string {
|
||||
result := make([]string, len(key))
|
||||
for i, k := range key {
|
||||
result[i] = c.versionedKey(k)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
34
internal/cache/connector/pg/pg_test.go
vendored
34
internal/cache/connector/pg/pg_test.go
vendored
@@ -80,7 +80,7 @@ func TestNewCache(t *testing.T) {
|
||||
PGXPool: pool,
|
||||
}
|
||||
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, "VERSION", conf, testIndices, connector)
|
||||
require.ErrorIs(t, err, tt.wantErr)
|
||||
if tt.wantErr == nil {
|
||||
assert.NotNil(t, c)
|
||||
@@ -115,10 +115,10 @@ func Test_pgCache_Set(t *testing.T) {
|
||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||
ppi.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(),
|
||||
[]indexKey[testIndex, string]{
|
||||
{IndexID: testIndexID, IndexKey: "id1"},
|
||||
{IndexID: testIndexName, IndexKey: "foo"},
|
||||
{IndexID: testIndexName, IndexKey: "bar"},
|
||||
[]indexKey[testIndex]{
|
||||
{IndexID: testIndexID, IndexKey: "VERSION:id1"},
|
||||
{IndexID: testIndexName, IndexKey: "VERSION:foo"},
|
||||
{IndexID: testIndexName, IndexKey: "VERSION:bar"},
|
||||
},
|
||||
&testObject{
|
||||
ID: "id1",
|
||||
@@ -139,10 +139,10 @@ func Test_pgCache_Set(t *testing.T) {
|
||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||
ppi.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(),
|
||||
[]indexKey[testIndex, string]{
|
||||
{IndexID: testIndexID, IndexKey: "id1"},
|
||||
{IndexID: testIndexName, IndexKey: "foo"},
|
||||
{IndexID: testIndexName, IndexKey: "bar"},
|
||||
[]indexKey[testIndex]{
|
||||
{IndexID: testIndexID, IndexKey: "VERSION:id1"},
|
||||
{IndexID: testIndexName, IndexKey: "VERSION:foo"},
|
||||
{IndexID: testIndexName, IndexKey: "VERSION:bar"},
|
||||
},
|
||||
&testObject{
|
||||
ID: "id1",
|
||||
@@ -207,7 +207,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Duration(0), time.Duration(0)).
|
||||
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
|
||||
},
|
||||
wantOk: false,
|
||||
@@ -224,7 +224,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Duration(0), time.Duration(0)).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantOk: false,
|
||||
@@ -241,7 +241,7 @@ func Test_pgCache_Get(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectQuery(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second).
|
||||
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Minute, time.Second).
|
||||
WillReturnRows(
|
||||
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
|
||||
ID: "id1",
|
||||
@@ -296,7 +296,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
@@ -313,7 +313,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@@ -358,7 +358,7 @@ func Test_pgCache_Delete(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||
WillReturnError(pgx.ErrTxClosed)
|
||||
},
|
||||
wantErr: pgx.ErrTxClosed,
|
||||
@@ -375,7 +375,7 @@ func Test_pgCache_Delete(t *testing.T) {
|
||||
},
|
||||
expect: func(pci pgxmock.PgxCommonIface) {
|
||||
pci.ExpectExec(queryExpect).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
||||
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||
},
|
||||
},
|
||||
@@ -518,7 +518,7 @@ func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex,
|
||||
connector := &Connector{
|
||||
PGXPool: pool,
|
||||
}
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, "VERSION", conf, testIndices, connector)
|
||||
require.NoError(t, err)
|
||||
return c, pool
|
||||
}
|
||||
|
||||
26
internal/cache/connector/redis/redis.go
vendored
26
internal/cache/connector/redis/redis.go
vendored
@@ -38,21 +38,23 @@ var (
|
||||
)
|
||||
|
||||
type redisCache[I, K comparable, V cache.Entry[I, K]] struct {
|
||||
db int
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
db int
|
||||
zitadelVersion string
|
||||
config *cache.Config
|
||||
indices []I
|
||||
connector *Connector
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a cache that stores and retrieves object using single Redis.
|
||||
func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, client *Connector, db int, indices []I) cache.Cache[I, K, V] {
|
||||
func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, zitadelVersion string, client *Connector, db int, indices []I) cache.Cache[I, K, V] {
|
||||
return &redisCache[I, K, V]{
|
||||
config: &config,
|
||||
db: db,
|
||||
indices: indices,
|
||||
connector: client,
|
||||
logger: config.Log.Slog(),
|
||||
config: &config,
|
||||
zitadelVersion: zitadelVersion,
|
||||
db: db,
|
||||
indices: indices,
|
||||
connector: client,
|
||||
logger: config.Log.Slog(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,7 +168,7 @@ func (c *redisCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
||||
func (c *redisCache[I, K, V]) redisIndexKeys(index I, keys ...K) []string {
|
||||
out := make([]string, len(keys))
|
||||
for i, k := range keys {
|
||||
out[i] = fmt.Sprintf("%v:%v", index, k)
|
||||
out[i] = fmt.Sprintf("%s:%v:%v", c.zitadelVersion, index, k)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
26
internal/cache/connector/redis/redis_test.go
vendored
26
internal/cache/connector/redis/redis_test.go
vendored
@@ -68,9 +68,9 @@ func Test_redisCache_set(t *testing.T) {
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
},
|
||||
@@ -88,9 +88,9 @@ func Test_redisCache_set(t *testing.T) {
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
@@ -115,9 +115,9 @@ func Test_redisCache_set(t *testing.T) {
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||
assert.NotEmpty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
@@ -141,9 +141,9 @@ func Test_redisCache_set(t *testing.T) {
|
||||
},
|
||||
},
|
||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||
s.CheckGet(t, "0:one", objectID)
|
||||
s.CheckGet(t, "1:foo", objectID)
|
||||
s.CheckGet(t, "1:bar", objectID)
|
||||
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||
assert.Positive(t, s.TTL(objectID))
|
||||
@@ -710,7 +710,7 @@ func prepareCache(t *testing.T, conf cache.Config, options ...func(*Config)) (ca
|
||||
connector.Close()
|
||||
server.Close()
|
||||
})
|
||||
c := NewCache[testIndex, string, *testObject](conf, connector, testDB, testIndices)
|
||||
c := NewCache[testIndex, string, *testObject](conf, "VERSION", connector, testDB, testIndices)
|
||||
return c, server
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/zitadel/logging"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/zitadel/zitadel/cmd/build"
|
||||
"github.com/zitadel/zitadel/internal/api/authz"
|
||||
"github.com/zitadel/zitadel/internal/database"
|
||||
"github.com/zitadel/zitadel/internal/eventstore"
|
||||
@@ -218,7 +217,7 @@ func (q *Queries) InstanceByHost(ctx context.Context, instanceHost, publicHost s
|
||||
publicDomain := strings.Split(publicHost, ":")[0] // remove possible port
|
||||
|
||||
instance, ok := q.caches.instance.Get(ctx, instanceIndexByHost, instanceDomain)
|
||||
if ok && instance.ZitadelVersion == build.Version() {
|
||||
if ok {
|
||||
return instance, instance.checkDomain(instanceDomain, publicDomain)
|
||||
}
|
||||
instance, scan := scanAuthzInstance()
|
||||
@@ -241,7 +240,7 @@ func (q *Queries) InstanceByID(ctx context.Context, id string) (_ authz.Instance
|
||||
}()
|
||||
|
||||
instance, ok := q.caches.instance.Get(ctx, instanceIndexByID, id)
|
||||
if ok && instance.ZitadelVersion == build.Version() {
|
||||
if ok {
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
@@ -250,7 +249,6 @@ func (q *Queries) InstanceByID(ctx context.Context, id string) (_ authz.Instance
|
||||
logging.OnError(err).WithField("instance_id", id).Warn("instance by ID")
|
||||
|
||||
if err == nil {
|
||||
instance.ZitadelVersion = build.Version()
|
||||
q.caches.instance.Set(ctx, instance)
|
||||
}
|
||||
return instance, err
|
||||
@@ -478,7 +476,6 @@ type authzInstance struct {
|
||||
ExternalDomains database.TextArray[string] `json:"external_domains,omitempty"`
|
||||
TrustedDomains database.TextArray[string] `json:"trusted_domains,omitempty"`
|
||||
ExecutionTargets target_domain.Router `json:"execution_targets,omitzero"`
|
||||
ZitadelVersion string `json:"zitadel_version,omitempty"`
|
||||
}
|
||||
|
||||
type csp struct {
|
||||
|
||||
Reference in New Issue
Block a user