mirror of
https://github.com/zitadel/zitadel.git
synced 2025-12-06 16:22:13 +00:00
fix(cache): use key versioning (#10657)
# Which Problems Are Solved
Cached object may have a different schema between Zitadel versions.
# How the Problems Are Solved
Use the curent build version in DB based cache connectors PostgreSQL and
Redis.
# Additional Changes
- Cleanup the ZitadelVersion field from the authz Instance
- Solve potential race condition on global variables in build package.
# Additional Context
- Closes https://github.com/zitadel/zitadel/issues/10648
- Obsoletes https://github.com/zitadel/zitadel/pull/10646
- Needs to be back-ported to v4 over
https://github.com/zitadel/zitadel/pull/10645
(cherry picked from commit f6f37d3a31)
This commit is contained in:
committed by
Livio Spring
parent
f9b3c1ef50
commit
6e90d4a927
@@ -1,33 +1,46 @@
|
|||||||
package build
|
package build
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
var (
|
"github.com/zitadel/logging"
|
||||||
version = ""
|
|
||||||
commit = ""
|
|
||||||
date = ""
|
|
||||||
dateTime time.Time
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Version() string {
|
// These variables are set via ldflags in the Makefile
|
||||||
if version != "" {
|
var (
|
||||||
return version
|
version = ""
|
||||||
|
commit = ""
|
||||||
|
date = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
// dateTime is the parsed version of [date]
|
||||||
|
var dateTime time.Time
|
||||||
|
|
||||||
|
// init prevents race conditions when accessing dateTime and version.
|
||||||
|
func init() {
|
||||||
|
var err error
|
||||||
|
dateTime, err = time.Parse(time.RFC3339, date)
|
||||||
|
if err != nil {
|
||||||
|
logging.WithError(err).Warn("could not parse build date, using current time instead")
|
||||||
|
dateTime = time.Now()
|
||||||
}
|
}
|
||||||
version = Date().Format(time.RFC3339)
|
if version == "" {
|
||||||
|
logging.Warn("no build version set, using timestamp as version")
|
||||||
|
version = date
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the current build version of Zitadel
|
||||||
|
func Version() string {
|
||||||
return version
|
return version
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Commit returns the git commit hash of the current build of Zitadel
|
||||||
func Commit() string {
|
func Commit() string {
|
||||||
return commit
|
return commit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Date returns the build date of the current build of Zitadel
|
||||||
func Date() time.Time {
|
func Date() time.Time {
|
||||||
if !dateTime.IsZero() {
|
|
||||||
return dateTime
|
|
||||||
}
|
|
||||||
dateTime, _ = time.Parse(time.RFC3339, date)
|
|
||||||
if dateTime.IsZero() {
|
|
||||||
dateTime = time.Now()
|
|
||||||
}
|
|
||||||
return dateTime
|
return dateTime
|
||||||
}
|
}
|
||||||
|
|||||||
3
internal/cache/cache.go
vendored
3
internal/cache/cache.go
vendored
@@ -97,6 +97,9 @@ const (
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
Connector Connector
|
Connector Connector
|
||||||
|
|
||||||
|
// Cache keys are prefixed with the Zitadel version.
|
||||||
|
ZitadelVersion string
|
||||||
|
|
||||||
// Age since an object was added to the cache,
|
// Age since an object was added to the cache,
|
||||||
// after which the object is considered invalid.
|
// after which the object is considered invalid.
|
||||||
// 0 disables max age checks.
|
// 0 disables max age checks.
|
||||||
|
|||||||
5
internal/cache/connector/connector.go
vendored
5
internal/cache/connector/connector.go
vendored
@@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/zitadel/zitadel/cmd/build"
|
||||||
"github.com/zitadel/zitadel/internal/cache"
|
"github.com/zitadel/zitadel/internal/cache"
|
||||||
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
|
"github.com/zitadel/zitadel/internal/cache/connector/gomap"
|
||||||
"github.com/zitadel/zitadel/internal/cache/connector/noop"
|
"github.com/zitadel/zitadel/internal/cache/connector/noop"
|
||||||
@@ -55,7 +56,7 @@ func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Conte
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
|
if conf.Connector == cache.ConnectorPostgres && connectors.Postgres != nil {
|
||||||
c, err := pg.NewCache[I, K, V](background, purpose, *conf, indices, connectors.Postgres)
|
c, err := pg.NewCache[I, K, V](background, purpose, build.Version(), *conf, indices, connectors.Postgres)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("start cache: %w", err)
|
return nil, fmt.Errorf("start cache: %w", err)
|
||||||
}
|
}
|
||||||
@@ -64,7 +65,7 @@ func StartCache[I ~int, K ~string, V cache.Entry[I, K]](background context.Conte
|
|||||||
}
|
}
|
||||||
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
|
if conf.Connector == cache.ConnectorRedis && connectors.Redis != nil {
|
||||||
db := connectors.Redis.Config.DBOffset + int(purpose)
|
db := connectors.Redis.Config.DBOffset + int(purpose)
|
||||||
c := redis.NewCache[I, K, V](*conf, connectors.Redis, db, indices)
|
c := redis.NewCache[I, K, V](*conf, build.Version(), connectors.Redis, db, indices)
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
66
internal/cache/connector/pg/pg.go
vendored
66
internal/cache/connector/pg/pg.go
vendored
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -40,21 +41,23 @@ type PGXPool interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
|
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
|
||||||
purpose cache.Purpose
|
purpose cache.Purpose
|
||||||
config *cache.Config
|
zitadelVersion string
|
||||||
indices []I
|
config *cache.Config
|
||||||
connector *Connector
|
indices []I
|
||||||
logger *slog.Logger
|
connector *Connector
|
||||||
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
|
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
|
||||||
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
|
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, purpose cache.Purpose, zitadelVersion string, config cache.Config, indices []I, connector *Connector) (cache.PrunerCache[I, K, V], error) {
|
||||||
c := &pgCache[I, K, V]{
|
c := &pgCache[I, K, V]{
|
||||||
purpose: purpose,
|
purpose: purpose,
|
||||||
config: &config,
|
zitadelVersion: zitadelVersion,
|
||||||
indices: indices,
|
config: &config,
|
||||||
connector: connector,
|
indices: indices,
|
||||||
logger: config.Log.Slog().With("cache_purpose", purpose),
|
connector: connector,
|
||||||
|
logger: config.Log.Slog().With("cache_purpose", purpose),
|
||||||
}
|
}
|
||||||
c.logger.InfoContext(ctx, "pg cache logging enabled")
|
c.logger.InfoContext(ctx, "pg cache logging enabled")
|
||||||
|
|
||||||
@@ -115,7 +118,14 @@ func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, er
|
|||||||
if !slices.Contains(c.indices, index) {
|
if !slices.Contains(c.indices, index) {
|
||||||
return value, cache.NewIndexUnknownErr(index)
|
return value, cache.NewIndexUnknownErr(index)
|
||||||
}
|
}
|
||||||
err = c.connector.QueryRow(ctx, getQuery, c.purpose.String(), index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
|
err = c.connector.QueryRow(ctx,
|
||||||
|
getQuery,
|
||||||
|
c.purpose.String(),
|
||||||
|
index,
|
||||||
|
c.versionedKey(key),
|
||||||
|
c.config.MaxAge,
|
||||||
|
c.config.LastUseAge,
|
||||||
|
).Scan(&value)
|
||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,7 +133,8 @@ func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (
|
|||||||
ctx, span := tracing.NewSpan(ctx)
|
ctx, span := tracing.NewSpan(ctx)
|
||||||
defer func() { span.EndWithError(err) }()
|
defer func() { span.EndWithError(err) }()
|
||||||
|
|
||||||
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, keys)
|
versionedKeys := c.versionedKeys(keys)
|
||||||
|
_, err = c.connector.Exec(ctx, invalidateQuery, c.purpose.String(), index, versionedKeys)
|
||||||
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
|
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -132,7 +143,8 @@ func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err
|
|||||||
ctx, span := tracing.NewSpan(ctx)
|
ctx, span := tracing.NewSpan(ctx)
|
||||||
defer func() { span.EndWithError(err) }()
|
defer func() { span.EndWithError(err) }()
|
||||||
|
|
||||||
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, keys)
|
versionedKeys := c.versionedKeys(keys)
|
||||||
|
_, err = c.connector.Exec(ctx, deleteQuery, c.purpose.String(), index, versionedKeys)
|
||||||
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
|
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -155,20 +167,32 @@ func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type indexKey[I, K comparable] struct {
|
type indexKey[I comparable] struct {
|
||||||
IndexID I `json:"index_id"`
|
IndexID I `json:"index_id"`
|
||||||
IndexKey K `json:"index_key"`
|
IndexKey string `json:"index_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pgCache[I, K, V]) indexKeysFromEntry(entry V) []indexKey[I, K] {
|
func (c *pgCache[I, K, V]) indexKeysFromEntry(entry V) []indexKey[I] {
|
||||||
keys := make([]indexKey[I, K], 0, len(c.indices)*3) // naive assumption
|
keys := make([]indexKey[I], 0, len(c.indices)*3) // naive assumption
|
||||||
for _, index := range c.indices {
|
for _, index := range c.indices {
|
||||||
for _, key := range entry.Keys(index) {
|
for _, key := range entry.Keys(index) {
|
||||||
keys = append(keys, indexKey[I, K]{
|
keys = append(keys, indexKey[I]{
|
||||||
IndexID: index,
|
IndexID: index,
|
||||||
IndexKey: key,
|
IndexKey: c.versionedKey(key),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *pgCache[I, K, V]) versionedKey(key K) string {
|
||||||
|
return fmt.Sprintf("%s:%s", c.zitadelVersion, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *pgCache[I, K, V]) versionedKeys(key []K) []string {
|
||||||
|
result := make([]string, len(key))
|
||||||
|
for i, k := range key {
|
||||||
|
result[i] = c.versionedKey(k)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|||||||
34
internal/cache/connector/pg/pg_test.go
vendored
34
internal/cache/connector/pg/pg_test.go
vendored
@@ -80,7 +80,7 @@ func TestNewCache(t *testing.T) {
|
|||||||
PGXPool: pool,
|
PGXPool: pool,
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, "VERSION", conf, testIndices, connector)
|
||||||
require.ErrorIs(t, err, tt.wantErr)
|
require.ErrorIs(t, err, tt.wantErr)
|
||||||
if tt.wantErr == nil {
|
if tt.wantErr == nil {
|
||||||
assert.NotNil(t, c)
|
assert.NotNil(t, c)
|
||||||
@@ -115,10 +115,10 @@ func Test_pgCache_Set(t *testing.T) {
|
|||||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||||
ppi.ExpectExec(queryExpect).
|
ppi.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(),
|
WithArgs(cachePurpose.String(),
|
||||||
[]indexKey[testIndex, string]{
|
[]indexKey[testIndex]{
|
||||||
{IndexID: testIndexID, IndexKey: "id1"},
|
{IndexID: testIndexID, IndexKey: "VERSION:id1"},
|
||||||
{IndexID: testIndexName, IndexKey: "foo"},
|
{IndexID: testIndexName, IndexKey: "VERSION:foo"},
|
||||||
{IndexID: testIndexName, IndexKey: "bar"},
|
{IndexID: testIndexName, IndexKey: "VERSION:bar"},
|
||||||
},
|
},
|
||||||
&testObject{
|
&testObject{
|
||||||
ID: "id1",
|
ID: "id1",
|
||||||
@@ -139,10 +139,10 @@ func Test_pgCache_Set(t *testing.T) {
|
|||||||
expect: func(ppi pgxmock.PgxCommonIface) {
|
expect: func(ppi pgxmock.PgxCommonIface) {
|
||||||
ppi.ExpectExec(queryExpect).
|
ppi.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(),
|
WithArgs(cachePurpose.String(),
|
||||||
[]indexKey[testIndex, string]{
|
[]indexKey[testIndex]{
|
||||||
{IndexID: testIndexID, IndexKey: "id1"},
|
{IndexID: testIndexID, IndexKey: "VERSION:id1"},
|
||||||
{IndexID: testIndexName, IndexKey: "foo"},
|
{IndexID: testIndexName, IndexKey: "VERSION:foo"},
|
||||||
{IndexID: testIndexName, IndexKey: "bar"},
|
{IndexID: testIndexName, IndexKey: "VERSION:bar"},
|
||||||
},
|
},
|
||||||
&testObject{
|
&testObject{
|
||||||
ID: "id1",
|
ID: "id1",
|
||||||
@@ -207,7 +207,7 @@ func Test_pgCache_Get(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectQuery(queryExpect).
|
pci.ExpectQuery(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Duration(0), time.Duration(0)).
|
||||||
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
|
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
|
||||||
},
|
},
|
||||||
wantOk: false,
|
wantOk: false,
|
||||||
@@ -224,7 +224,7 @@ func Test_pgCache_Get(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectQuery(queryExpect).
|
pci.ExpectQuery(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Duration(0), time.Duration(0)).
|
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Duration(0), time.Duration(0)).
|
||||||
WillReturnError(pgx.ErrTxClosed)
|
WillReturnError(pgx.ErrTxClosed)
|
||||||
},
|
},
|
||||||
wantOk: false,
|
wantOk: false,
|
||||||
@@ -241,7 +241,7 @@ func Test_pgCache_Get(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectQuery(queryExpect).
|
pci.ExpectQuery(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, "id1", time.Minute, time.Second).
|
WithArgs(cachePurpose.String(), testIndexID, "VERSION:id1", time.Minute, time.Second).
|
||||||
WillReturnRows(
|
WillReturnRows(
|
||||||
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
|
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
|
||||||
ID: "id1",
|
ID: "id1",
|
||||||
@@ -296,7 +296,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectExec(queryExpect).
|
pci.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||||
WillReturnError(pgx.ErrTxClosed)
|
WillReturnError(pgx.ErrTxClosed)
|
||||||
},
|
},
|
||||||
wantErr: pgx.ErrTxClosed,
|
wantErr: pgx.ErrTxClosed,
|
||||||
@@ -313,7 +313,7 @@ func Test_pgCache_Invalidate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectExec(queryExpect).
|
pci.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -358,7 +358,7 @@ func Test_pgCache_Delete(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectExec(queryExpect).
|
pci.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||||
WillReturnError(pgx.ErrTxClosed)
|
WillReturnError(pgx.ErrTxClosed)
|
||||||
},
|
},
|
||||||
wantErr: pgx.ErrTxClosed,
|
wantErr: pgx.ErrTxClosed,
|
||||||
@@ -375,7 +375,7 @@ func Test_pgCache_Delete(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expect: func(pci pgxmock.PgxCommonIface) {
|
expect: func(pci pgxmock.PgxCommonIface) {
|
||||||
pci.ExpectExec(queryExpect).
|
pci.ExpectExec(queryExpect).
|
||||||
WithArgs(cachePurpose.String(), testIndexID, []string{"id1", "id2"}).
|
WithArgs(cachePurpose.String(), testIndexID, []string{"VERSION:id1", "VERSION:id2"}).
|
||||||
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
WillReturnResult(pgxmock.NewResult("DELETE", 1))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -518,7 +518,7 @@ func prepareCache(t *testing.T, conf cache.Config) (cache.PrunerCache[testIndex,
|
|||||||
connector := &Connector{
|
connector := &Connector{
|
||||||
PGXPool: pool,
|
PGXPool: pool,
|
||||||
}
|
}
|
||||||
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, conf, testIndices, connector)
|
c, err := NewCache[testIndex, string, *testObject](context.Background(), cachePurpose, "VERSION", conf, testIndices, connector)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return c, pool
|
return c, pool
|
||||||
}
|
}
|
||||||
|
|||||||
26
internal/cache/connector/redis/redis.go
vendored
26
internal/cache/connector/redis/redis.go
vendored
@@ -38,21 +38,23 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type redisCache[I, K comparable, V cache.Entry[I, K]] struct {
|
type redisCache[I, K comparable, V cache.Entry[I, K]] struct {
|
||||||
db int
|
db int
|
||||||
config *cache.Config
|
zitadelVersion string
|
||||||
indices []I
|
config *cache.Config
|
||||||
connector *Connector
|
indices []I
|
||||||
logger *slog.Logger
|
connector *Connector
|
||||||
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCache returns a cache that stores and retrieves object using single Redis.
|
// NewCache returns a cache that stores and retrieves object using single Redis.
|
||||||
func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, client *Connector, db int, indices []I) cache.Cache[I, K, V] {
|
func NewCache[I, K comparable, V cache.Entry[I, K]](config cache.Config, zitadelVersion string, client *Connector, db int, indices []I) cache.Cache[I, K, V] {
|
||||||
return &redisCache[I, K, V]{
|
return &redisCache[I, K, V]{
|
||||||
config: &config,
|
config: &config,
|
||||||
db: db,
|
zitadelVersion: zitadelVersion,
|
||||||
indices: indices,
|
db: db,
|
||||||
connector: client,
|
indices: indices,
|
||||||
logger: config.Log.Slog(),
|
connector: client,
|
||||||
|
logger: config.Log.Slog(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,7 +168,7 @@ func (c *redisCache[I, K, V]) Truncate(ctx context.Context) (err error) {
|
|||||||
func (c *redisCache[I, K, V]) redisIndexKeys(index I, keys ...K) []string {
|
func (c *redisCache[I, K, V]) redisIndexKeys(index I, keys ...K) []string {
|
||||||
out := make([]string, len(keys))
|
out := make([]string, len(keys))
|
||||||
for i, k := range keys {
|
for i, k := range keys {
|
||||||
out[i] = fmt.Sprintf("%v:%v", index, k)
|
out[i] = fmt.Sprintf("%s:%v:%v", c.zitadelVersion, index, k)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|||||||
26
internal/cache/connector/redis/redis_test.go
vendored
26
internal/cache/connector/redis/redis_test.go
vendored
@@ -68,9 +68,9 @@ func Test_redisCache_set(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||||
s.CheckGet(t, "0:one", objectID)
|
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||||
s.CheckGet(t, "1:foo", objectID)
|
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||||
s.CheckGet(t, "1:bar", objectID)
|
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||||
},
|
},
|
||||||
@@ -88,9 +88,9 @@ func Test_redisCache_set(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||||
s.CheckGet(t, "0:one", objectID)
|
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||||
s.CheckGet(t, "1:foo", objectID)
|
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||||
s.CheckGet(t, "1:bar", objectID)
|
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||||
assert.Positive(t, s.TTL(objectID))
|
assert.Positive(t, s.TTL(objectID))
|
||||||
@@ -115,9 +115,9 @@ func Test_redisCache_set(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||||
s.CheckGet(t, "0:one", objectID)
|
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||||
s.CheckGet(t, "1:foo", objectID)
|
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||||
s.CheckGet(t, "1:bar", objectID)
|
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||||
assert.NotEmpty(t, s.HGet(objectID, "expiry"))
|
assert.NotEmpty(t, s.HGet(objectID, "expiry"))
|
||||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||||
assert.Positive(t, s.TTL(objectID))
|
assert.Positive(t, s.TTL(objectID))
|
||||||
@@ -141,9 +141,9 @@ func Test_redisCache_set(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
assertions: func(t *testing.T, s *miniredis.Miniredis, objectID string) {
|
||||||
s.CheckGet(t, "0:one", objectID)
|
s.CheckGet(t, "VERSION:0:one", objectID)
|
||||||
s.CheckGet(t, "1:foo", objectID)
|
s.CheckGet(t, "VERSION:1:foo", objectID)
|
||||||
s.CheckGet(t, "1:bar", objectID)
|
s.CheckGet(t, "VERSION:1:bar", objectID)
|
||||||
assert.Empty(t, s.HGet(objectID, "expiry"))
|
assert.Empty(t, s.HGet(objectID, "expiry"))
|
||||||
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
assert.JSONEq(t, `{"ID":"one","Name":["foo","bar"]}`, s.HGet(objectID, "object"))
|
||||||
assert.Positive(t, s.TTL(objectID))
|
assert.Positive(t, s.TTL(objectID))
|
||||||
@@ -710,7 +710,7 @@ func prepareCache(t *testing.T, conf cache.Config, options ...func(*Config)) (ca
|
|||||||
connector.Close()
|
connector.Close()
|
||||||
server.Close()
|
server.Close()
|
||||||
})
|
})
|
||||||
c := NewCache[testIndex, string, *testObject](conf, connector, testDB, testIndices)
|
c := NewCache[testIndex, string, *testObject](conf, "VERSION", connector, testDB, testIndices)
|
||||||
return c, server
|
return c, server
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/zitadel/logging"
|
"github.com/zitadel/logging"
|
||||||
"golang.org/x/text/language"
|
"golang.org/x/text/language"
|
||||||
|
|
||||||
"github.com/zitadel/zitadel/cmd/build"
|
|
||||||
"github.com/zitadel/zitadel/internal/api/authz"
|
"github.com/zitadel/zitadel/internal/api/authz"
|
||||||
"github.com/zitadel/zitadel/internal/database"
|
"github.com/zitadel/zitadel/internal/database"
|
||||||
"github.com/zitadel/zitadel/internal/eventstore"
|
"github.com/zitadel/zitadel/internal/eventstore"
|
||||||
@@ -218,7 +217,7 @@ func (q *Queries) InstanceByHost(ctx context.Context, instanceHost, publicHost s
|
|||||||
publicDomain := strings.Split(publicHost, ":")[0] // remove possible port
|
publicDomain := strings.Split(publicHost, ":")[0] // remove possible port
|
||||||
|
|
||||||
instance, ok := q.caches.instance.Get(ctx, instanceIndexByHost, instanceDomain)
|
instance, ok := q.caches.instance.Get(ctx, instanceIndexByHost, instanceDomain)
|
||||||
if ok && instance.ZitadelVersion == build.Version() {
|
if ok {
|
||||||
return instance, instance.checkDomain(instanceDomain, publicDomain)
|
return instance, instance.checkDomain(instanceDomain, publicDomain)
|
||||||
}
|
}
|
||||||
instance, scan := scanAuthzInstance()
|
instance, scan := scanAuthzInstance()
|
||||||
@@ -242,7 +241,7 @@ func (q *Queries) InstanceByID(ctx context.Context, id string) (_ authz.Instance
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
instance, ok := q.caches.instance.Get(ctx, instanceIndexByID, id)
|
instance, ok := q.caches.instance.Get(ctx, instanceIndexByID, id)
|
||||||
if ok && instance.ZitadelVersion == build.Version() {
|
if ok {
|
||||||
return instance, nil
|
return instance, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,7 +250,6 @@ func (q *Queries) InstanceByID(ctx context.Context, id string) (_ authz.Instance
|
|||||||
logging.OnError(err).WithField("instance_id", id).Warn("instance by ID")
|
logging.OnError(err).WithField("instance_id", id).Warn("instance by ID")
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
instance.ZitadelVersion = build.Version()
|
|
||||||
q.caches.instance.Set(ctx, instance)
|
q.caches.instance.Set(ctx, instance)
|
||||||
}
|
}
|
||||||
return instance, err
|
return instance, err
|
||||||
@@ -479,7 +477,6 @@ type authzInstance struct {
|
|||||||
ExternalDomains database.TextArray[string] `json:"external_domains,omitempty"`
|
ExternalDomains database.TextArray[string] `json:"external_domains,omitempty"`
|
||||||
TrustedDomains database.TextArray[string] `json:"trusted_domains,omitempty"`
|
TrustedDomains database.TextArray[string] `json:"trusted_domains,omitempty"`
|
||||||
ExecutionTargets target_domain.Router `json:"execution_targets,omitzero"`
|
ExecutionTargets target_domain.Router `json:"execution_targets,omitzero"`
|
||||||
ZitadelVersion string `json:"zitadel_version,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type csp struct {
|
type csp struct {
|
||||||
|
|||||||
Reference in New Issue
Block a user