perf(cache): pgx pool connector (#8703)

# Which Problems Are Solved

Cache implementation using a PGX connection pool.

# How the Problems Are Solved

Defines a new schema `cache` in the zitadel database.
A table for string keys and a table for objects is defined.
For postgreSQL, tables are unlogged and partitioned by cache name for
performance.

Cockroach does not have unlogged tables and partitioning is an
enterprise feature that uses alternative syntax combined with sharding.
Regular tables are used here.

# Additional Changes

- `postgres.Config` can return a pxg pool. See following discussion

# Additional Context

- Part of https://github.com/zitadel/zitadel/issues/8648
- Closes https://github.com/zitadel/zitadel/issues/8647

---------

Co-authored-by: Silvan <silvan.reusser@gmail.com>
This commit is contained in:
Tim Möhlmann
2024-10-04 16:15:41 +03:00
committed by GitHub
parent bee0744d46
commit 25dc7bfe72
29 changed files with 1034 additions and 140 deletions

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/database/postgres"
)
// Cache stores objects with a value of type `V`.
@@ -55,9 +56,6 @@ type Cache[I, K comparable, V Entry[I, K]] interface {
// Truncate deletes all cached objects.
Truncate(ctx context.Context) error
// Close the cache. Subsequent calls to the cache are not allowed.
Close(ctx context.Context) error
}
// Entry contains a value of type `V` to be cached.
@@ -75,8 +73,8 @@ type Entry[I, K comparable] interface {
type CachesConfig struct {
Connectors struct {
Memory MemoryConnectorConfig
// SQL database.Config
Memory MemoryConnectorConfig
Postgres PostgresConnectorConfig
// Redis redis.Config?
}
Instance *CacheConfig
@@ -104,3 +102,9 @@ type MemoryConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
}
type PostgresConnectorConfig struct {
Enabled bool
AutoPrune AutoPruneConfig
Connection postgres.Config
}

View File

@@ -109,15 +109,11 @@ func (c *mapCache[I, K, V]) Prune(ctx context.Context) error {
func (c *mapCache[I, K, V]) Truncate(ctx context.Context) error {
for name, index := range c.indexMap {
index.Truncate()
c.logger.DebugContext(ctx, "map cache clear", "index", name)
c.logger.DebugContext(ctx, "map cache truncate", "index", name)
}
return nil
}
func (c *mapCache[I, K, V]) Close(ctx context.Context) error {
return ctx.Err()
}
type index[K comparable, V any] struct {
mutex sync.RWMutex
config *cache.CacheConfig

View File

@@ -49,7 +49,6 @@ func Test_mapCache_Get(t *testing.T) {
AddSource: true,
},
})
defer c.Close(context.Background())
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
@@ -112,7 +111,6 @@ func Test_mapCache_Invalidate(t *testing.T) {
AddSource: true,
},
})
defer c.Close(context.Background())
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
@@ -134,7 +132,6 @@ func Test_mapCache_Delete(t *testing.T) {
AddSource: true,
},
})
defer c.Close(context.Background())
obj := &testObject{
id: "id",
names: []string{"foo", "bar"},
@@ -168,7 +165,6 @@ func Test_mapCache_Prune(t *testing.T) {
AddSource: true,
},
})
defer c.Close(context.Background())
objects := []*testObject{
{
@@ -205,7 +201,6 @@ func Test_mapCache_Truncate(t *testing.T) {
AddSource: true,
},
})
defer c.Close(context.Background())
objects := []*testObject{
{
id: "id1",

View File

@@ -19,4 +19,3 @@ func (noop[I, K, V]) Invalidate(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Delete(context.Context, I, ...K) (err error) { return }
func (noop[I, K, V]) Prune(context.Context) (err error) { return }
func (noop[I, K, V]) Truncate(context.Context) (err error) { return }
func (noop[I, K, V]) Close(context.Context) (err error) { return }

View File

@@ -0,0 +1,7 @@
create unlogged table if not exists cache.objects_{{ . }}
partition of cache.objects
for values in ('{{ . }}');
create unlogged table if not exists cache.string_keys_{{ . }}
partition of cache.string_keys
for values in ('{{ . }}');

5
internal/cache/pg/delete.sql vendored Normal file
View File

@@ -0,0 +1,5 @@
delete from cache.string_keys k
where k.cache_name = $1
and k.index_id = $2
and k.index_key = any($3)
;

19
internal/cache/pg/get.sql vendored Normal file
View File

@@ -0,0 +1,19 @@
update cache.objects
set last_used_at = now()
where cache_name = $1
and (
select object_id
from cache.string_keys k
where cache_name = $1
and index_id = $2
and index_key = $3
) = id
and case when $4::interval > '0s'
then created_at > now()-$4::interval -- max age
else true
end
and case when $5::interval > '0s'
then last_used_at > now()-$5::interval -- last use
else true
end
returning payload;

9
internal/cache/pg/invalidate.sql vendored Normal file
View File

@@ -0,0 +1,9 @@
delete from cache.objects o
using cache.string_keys k
where k.cache_name = $1
and k.index_id = $2
and k.index_key = any($3)
and o.cache_name = k.cache_name
and o.id = k.object_id
;

176
internal/cache/pg/pg.go vendored Normal file
View File

@@ -0,0 +1,176 @@
package pg
import (
"context"
_ "embed"
"errors"
"log/slog"
"strings"
"text/template"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"golang.org/x/exp/slices"
"github.com/zitadel/zitadel/internal/cache"
"github.com/zitadel/zitadel/internal/telemetry/tracing"
)
var (
//go:embed create_partition.sql.tmpl
createPartitionQuery string
createPartitionTmpl = template.Must(template.New("create_partition").Parse(createPartitionQuery))
//go:embed set.sql
setQuery string
//go:embed get.sql
getQuery string
//go:embed invalidate.sql
invalidateQuery string
//go:embed delete.sql
deleteQuery string
//go:embed prune.sql
pruneQuery string
//go:embed truncate.sql
truncateQuery string
)
type PGXPool interface {
Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error)
QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
}
type pgCache[I ~int, K ~string, V cache.Entry[I, K]] struct {
name string
config *cache.CacheConfig
indices []I
pool PGXPool
logger *slog.Logger
}
// NewCache returns a cache that stores and retrieves objects using PostgreSQL unlogged tables.
func NewCache[I ~int, K ~string, V cache.Entry[I, K]](ctx context.Context, name string, config cache.CacheConfig, indices []I, pool PGXPool, dialect string) (cache.PrunerCache[I, K, V], error) {
c := &pgCache[I, K, V]{
name: name,
config: &config,
indices: indices,
pool: pool,
logger: config.Log.Slog().With("cache_name", name),
}
c.logger.InfoContext(ctx, "pg cache logging enabled")
if dialect == "postgres" {
if err := c.createPartition(ctx); err != nil {
return nil, err
}
}
return c, nil
}
func (c *pgCache[I, K, V]) createPartition(ctx context.Context) error {
var query strings.Builder
if err := createPartitionTmpl.Execute(&query, c.name); err != nil {
return err
}
_, err := c.pool.Exec(ctx, query.String())
return err
}
func (c *pgCache[I, K, V]) Set(ctx context.Context, entry V) {
//nolint:errcheck
c.set(ctx, entry)
}
func (c *pgCache[I, K, V]) set(ctx context.Context, entry V) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
keys := c.indexKeysFromEntry(entry)
c.logger.DebugContext(ctx, "pg cache set", "index_key", keys)
_, err = c.pool.Exec(ctx, setQuery, c.name, keys, entry)
if err != nil {
c.logger.ErrorContext(ctx, "pg cache set", "err", err)
return err
}
return nil
}
func (c *pgCache[I, K, V]) Get(ctx context.Context, index I, key K) (value V, ok bool) {
value, err := c.get(ctx, index, key)
if err == nil {
c.logger.DebugContext(ctx, "pg cache get", "index", index, "key", key)
return value, true
}
logger := c.logger.With("err", err, "index", index, "key", key)
if errors.Is(err, pgx.ErrNoRows) {
logger.InfoContext(ctx, "pg cache miss")
return value, false
}
logger.ErrorContext(ctx, "pg cache get", "err", err)
return value, false
}
func (c *pgCache[I, K, V]) get(ctx context.Context, index I, key K) (value V, err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
if !slices.Contains(c.indices, index) {
return value, cache.NewIndexUnknownErr(index)
}
err = c.pool.QueryRow(ctx, getQuery, c.name, index, key, c.config.MaxAge, c.config.LastUseAge).Scan(&value)
return value, err
}
func (c *pgCache[I, K, V]) Invalidate(ctx context.Context, index I, keys ...K) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, invalidateQuery, c.name, index, keys)
c.logger.DebugContext(ctx, "pg cache invalidate", "index", index, "keys", keys)
return err
}
func (c *pgCache[I, K, V]) Delete(ctx context.Context, index I, keys ...K) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, deleteQuery, c.name, index, keys)
c.logger.DebugContext(ctx, "pg cache delete", "index", index, "keys", keys)
return err
}
func (c *pgCache[I, K, V]) Prune(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, pruneQuery, c.name, c.config.MaxAge, c.config.LastUseAge)
c.logger.DebugContext(ctx, "pg cache prune")
return err
}
func (c *pgCache[I, K, V]) Truncate(ctx context.Context) (err error) {
ctx, span := tracing.NewSpan(ctx)
defer func() { span.EndWithError(err) }()
_, err = c.pool.Exec(ctx, truncateQuery, c.name)
c.logger.DebugContext(ctx, "pg cache truncate")
return err
}
type indexKey[I, K comparable] struct {
IndexID I `json:"index_id"`
IndexKey K `json:"index_key"`
}
func (c *pgCache[I, K, V]) indexKeysFromEntry(entry V) []indexKey[I, K] {
keys := make([]indexKey[I, K], 0, len(c.indices)*3) // naive assumption
for _, index := range c.indices {
for _, key := range entry.Keys(index) {
keys = append(keys, indexKey[I, K]{
IndexID: index,
IndexKey: key,
})
}
}
return keys
}

519
internal/cache/pg/pg_test.go vendored Normal file
View File

@@ -0,0 +1,519 @@
package pg
import (
"context"
"regexp"
"testing"
"time"
"github.com/jackc/pgx/v5"
"github.com/pashagolub/pgxmock/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zitadel/logging"
"github.com/zitadel/zitadel/internal/cache"
)
type testIndex int
const (
testIndexID testIndex = iota
testIndexName
)
var testIndices = []testIndex{
testIndexID,
testIndexName,
}
type testObject struct {
ID string
Name []string
}
func (o *testObject) Keys(index testIndex) []string {
switch index {
case testIndexID:
return []string{o.ID}
case testIndexName:
return o.Name
default:
return nil
}
}
func TestNewCache(t *testing.T) {
tests := []struct {
name string
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "success",
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
conf := cache.CacheConfig{
Log: &logging.Config{
Level: "debug",
AddSource: true,
},
}
pool, err := pgxmock.NewPool()
require.NoError(t, err)
tt.expect(pool)
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
require.ErrorIs(t, err, tt.wantErr)
if tt.wantErr == nil {
assert.NotNil(t, c)
}
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Set(t *testing.T) {
queryExpect := regexp.QuoteMeta(setQuery)
type args struct {
entry *testObject
}
tests := []struct {
name string
args args
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
args: args{
&testObject{
ID: "id1",
Name: []string{"foo", "bar"},
},
},
expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect).
WithArgs("test",
[]indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"},
{IndexID: testIndexName, IndexKey: "bar"},
},
&testObject{
ID: "id1",
Name: []string{"foo", "bar"},
}).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "success",
args: args{
&testObject{
ID: "id1",
Name: []string{"foo", "bar"},
},
},
expect: func(ppi pgxmock.PgxCommonIface) {
ppi.ExpectExec(queryExpect).
WithArgs("test",
[]indexKey[testIndex, string]{
{IndexID: testIndexID, IndexKey: "id1"},
{IndexID: testIndexName, IndexKey: "foo"},
{IndexID: testIndexName, IndexKey: "bar"},
},
&testObject{
ID: "id1",
Name: []string{"foo", "bar"},
}).
WillReturnResult(pgxmock.NewResult("INSERT", 1))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, cache.CacheConfig{})
defer pool.Close()
tt.expect(pool)
err := c.(*pgCache[testIndex, string, *testObject]).
set(context.Background(), tt.args.entry)
require.ErrorIs(t, err, tt.wantErr)
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Get(t *testing.T) {
queryExpect := regexp.QuoteMeta(getQuery)
type args struct {
index testIndex
key string
}
tests := []struct {
name string
config cache.CacheConfig
args args
expect func(pgxmock.PgxCommonIface)
want *testObject
wantOk bool
}{
{
name: "invalid index",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
args: args{
index: 99,
key: "id1",
},
expect: func(pci pgxmock.PgxCommonIface) {},
wantOk: false,
},
{
name: "no rows",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
args: args{
index: testIndexID,
key: "id1",
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnRows(pgxmock.NewRows([]string{"payload"}))
},
wantOk: false,
},
{
name: "error",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
args: args{
index: testIndexID,
key: "id1",
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed)
},
wantOk: false,
},
{
name: "ok",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
args: args{
index: testIndexID,
key: "id1",
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectQuery(queryExpect).
WithArgs("test", testIndexID, "id1", time.Minute, time.Second).
WillReturnRows(
pgxmock.NewRows([]string{"payload"}).AddRow(&testObject{
ID: "id1",
Name: []string{"foo", "bar"},
}),
)
},
want: &testObject{
ID: "id1",
Name: []string{"foo", "bar"},
},
wantOk: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, tt.config)
defer pool.Close()
tt.expect(pool)
got, ok := c.Get(context.Background(), tt.args.index, tt.args.key)
assert.Equal(t, tt.wantOk, ok)
assert.Equal(t, tt.want, got)
err := pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Invalidate(t *testing.T) {
queryExpect := regexp.QuoteMeta(invalidateQuery)
type args struct {
index testIndex
keys []string
}
tests := []struct {
name string
config cache.CacheConfig
args args
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
args: args{
index: testIndexID,
keys: []string{"id1", "id2"},
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
args: args{
index: testIndexID,
keys: []string{"id1", "id2"},
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, tt.config)
defer pool.Close()
tt.expect(pool)
err := c.Invalidate(context.Background(), tt.args.index, tt.args.keys...)
assert.ErrorIs(t, err, tt.wantErr)
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Delete(t *testing.T) {
queryExpect := regexp.QuoteMeta(deleteQuery)
type args struct {
index testIndex
keys []string
}
tests := []struct {
name string
config cache.CacheConfig
args args
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
args: args{
index: testIndexID,
keys: []string{"id1", "id2"},
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
args: args{
index: testIndexID,
keys: []string{"id1", "id2"},
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", testIndexID, []string{"id1", "id2"}).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, tt.config)
defer pool.Close()
tt.expect(pool)
err := c.Delete(context.Background(), tt.args.index, tt.args.keys...)
assert.ErrorIs(t, err, tt.wantErr)
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Prune(t *testing.T) {
queryExpect := regexp.QuoteMeta(pruneQuery)
tests := []struct {
name string
config cache.CacheConfig
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", time.Duration(0), time.Duration(0)).
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test", time.Minute, time.Second).
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, tt.config)
defer pool.Close()
tt.expect(pool)
err := c.Prune(context.Background())
assert.ErrorIs(t, err, tt.wantErr)
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
func Test_pgCache_Truncate(t *testing.T) {
queryExpect := regexp.QuoteMeta(truncateQuery)
tests := []struct {
name string
config cache.CacheConfig
expect func(pgxmock.PgxCommonIface)
wantErr error
}{
{
name: "error",
config: cache.CacheConfig{
MaxAge: 0,
LastUseAge: 0,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test").
WillReturnError(pgx.ErrTxClosed)
},
wantErr: pgx.ErrTxClosed,
},
{
name: "ok",
config: cache.CacheConfig{
MaxAge: time.Minute,
LastUseAge: time.Second,
},
expect: func(pci pgxmock.PgxCommonIface) {
pci.ExpectExec(queryExpect).
WithArgs("test").
WillReturnResult(pgxmock.NewResult("DELETE", 1))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, pool := prepareCache(t, tt.config)
defer pool.Close()
tt.expect(pool)
err := c.Truncate(context.Background())
assert.ErrorIs(t, err, tt.wantErr)
err = pool.ExpectationsWereMet()
assert.NoError(t, err)
})
}
}
const (
cacheName = "test"
expectedCreatePartitionQuery = `create unlogged table if not exists cache.objects_test
partition of cache.objects
for values in ('test');
create unlogged table if not exists cache.string_keys_test
partition of cache.string_keys
for values in ('test');
`
)
func prepareCache(t *testing.T, conf cache.CacheConfig) (cache.PrunerCache[testIndex, string, *testObject], pgxmock.PgxPoolIface) {
conf.Log = &logging.Config{
Level: "debug",
AddSource: true,
}
pool, err := pgxmock.NewPool()
require.NoError(t, err)
pool.ExpectExec(regexp.QuoteMeta(expectedCreatePartitionQuery)).
WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
c, err := NewCache[testIndex, string, *testObject](context.Background(), cacheName, conf, testIndices, pool, "postgres")
require.NoError(t, err)
return c, pool
}

18
internal/cache/pg/prune.sql vendored Normal file
View File

@@ -0,0 +1,18 @@
delete from cache.objects o
where o.cache_name = $1
and (
case when $2::interval > '0s'
then created_at < now()-$2::interval -- max age
else false
end
or case when $3::interval > '0s'
then last_used_at < now()-$3::interval -- last use
else false
end
or o.id not in (
select object_id
from cache.string_keys
where cache_name = $1
)
)
;

19
internal/cache/pg/set.sql vendored Normal file
View File

@@ -0,0 +1,19 @@
with object as (
insert into cache.objects (cache_name, payload)
values ($1, $3)
returning id
)
insert into cache.string_keys (
cache_name,
index_id,
index_key,
object_id
)
select $1, keys.index_id, keys.index_key, id as object_id
from object, jsonb_to_recordset($2) keys (
index_id bigint,
index_key text
)
on conflict (cache_name, index_id, index_key) do
update set object_id = EXCLUDED.object_id
;

3
internal/cache/pg/truncate.sql vendored Normal file
View File

@@ -0,0 +1,3 @@
delete from cache.objects o
where o.cache_name = $1
;