mirror of
https://github.com/restic/restic.git
synced 2025-10-27 12:18:35 +00:00
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores This commit introduces basic support for transitioning pack files stored in cold storage to hot storage on S3 and S3-compatible providers. To prevent unexpected behavior for existing users, the feature is gated behind new flags: - `s3.enable-restore`: opt-in flag (defaults to false) - `s3.restore-days`: number of days for the restored objects to remain in hot storage (defaults to `7`) - `s3.restore-timeout`: maximum time to wait for a single restoration (default to `1 day`) - `s3.restore-tier`: retrieval tier at which the restore will be processed. (default to `Standard`) As restoration times can be lengthy, this implementation preemptively restores selected packs to prevent incessant restore-delays during downloads. This is slightly sub-optimal as we could process packs out-of-order (as soon as they're transitioned), but this would really add too much complexity for a marginal gain in speed. To maintain simplicity and prevent resources exhautions with lots of packs, no new concurrency mechanisms or goroutines were added. This just hooks gracefully into the existing routines. **Limitations:** - Tests against the backend were not written due to the lack of cold storage class support in MinIO. Testing was done manually on Scaleway's S3-compatible object storage. If necessary, we could explore testing with LocalStack or mocks, though this requires further discussion. - Currently, this feature only warms up before restores and repacks (prune/copy), as those are the two main use-cases I came across. Support for other commands may be added in future iterations, as long as affected packs can be calculated in advance. - The feature is gated behind a new alpha `s3-restore` feature flag to make it explicit that the feature is still wet behind the ears. - There is no explicit user notification for ongoing pack restorations. While I think it is not necessary because of the opt-in flag, showing some notice may improve usability (but would probably require major refactoring in the progress bar which I didn't want to start). Another possibility would be to add a flag to send restores requests and fail early. See https://github.com/restic/restic/issues/3202 * ui: warn user when files are warming up from cold storage * refactor: remove the PacksWarmer struct It's easier to handle multiple handles in the backend directly, and it may open the door to reducing the number of requests made to the backend in the future.
This commit is contained in:
@@ -475,3 +475,9 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||
|
||||
// Close does nothing
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -335,3 +335,9 @@ func (be *b2Backend) Delete(ctx context.Context) error {
|
||||
|
||||
// Close does nothing
|
||||
func (be *b2Backend) Close() error { return nil }
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *b2Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *b2Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -75,6 +75,21 @@ type Backend interface {
|
||||
|
||||
// Delete removes all data in the backend.
|
||||
Delete(ctx context.Context) error
|
||||
|
||||
// Warmup ensures that the specified handles are ready for upcoming reads.
|
||||
// This is particularly useful for transitioning files from cold to hot
|
||||
// storage.
|
||||
//
|
||||
// The method is non-blocking. WarmupWait can be used to wait for
|
||||
// completion.
|
||||
//
|
||||
// Returns:
|
||||
// - Handles currently warming up.
|
||||
// - An error if warmup fails.
|
||||
Warmup(ctx context.Context, h []Handle) ([]Handle, error)
|
||||
|
||||
// WarmupWait waits until all given handles are warm.
|
||||
WarmupWait(ctx context.Context, h []Handle) error
|
||||
}
|
||||
|
||||
type Unwrapper interface {
|
||||
|
||||
10
internal/backend/cache/backend.go
vendored
10
internal/backend/cache/backend.go
vendored
@@ -258,3 +258,13 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Warmup delegates to wrapped backend.
|
||||
func (b *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||
return b.Backend.Warmup(ctx, h)
|
||||
}
|
||||
|
||||
// WarmupWait delegates to wrapped backend.
|
||||
func (b *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||
return b.Backend.WarmupWait(ctx, h)
|
||||
}
|
||||
|
||||
@@ -82,3 +82,9 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse
|
||||
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
|
||||
return be.b.Stat(ctx, h)
|
||||
}
|
||||
|
||||
// Warmup should not occur during dry-runs.
|
||||
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -363,3 +363,9 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||
|
||||
// Close does nothing.
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -371,3 +371,9 @@ func (b *Local) Close() error {
|
||||
// same function.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Warmup not implemented
|
||||
func (b *Local) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (b *Local) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -249,3 +249,9 @@ func (be *MemoryBackend) Delete(ctx context.Context) error {
|
||||
func (be *MemoryBackend) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -20,6 +20,8 @@ type Backend struct {
|
||||
ListFn func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error
|
||||
RemoveFn func(ctx context.Context, h backend.Handle) error
|
||||
DeleteFn func(ctx context.Context) error
|
||||
WarmupFn func(ctx context.Context, h []backend.Handle) ([]backend.Handle, error)
|
||||
WarmupWaitFn func(ctx context.Context, h []backend.Handle) error
|
||||
ConnectionsFn func() uint
|
||||
HasherFn func() hash.Hash
|
||||
HasAtomicReplaceFn func() bool
|
||||
@@ -150,5 +152,21 @@ func (m *Backend) Delete(ctx context.Context) error {
|
||||
return m.DeleteFn(ctx)
|
||||
}
|
||||
|
||||
func (m *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||
if m.WarmupFn == nil {
|
||||
return []backend.Handle{}, errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.WarmupFn(ctx, h)
|
||||
}
|
||||
|
||||
func (m *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||
if m.WarmupWaitFn == nil {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.WarmupWaitFn(ctx, h)
|
||||
}
|
||||
|
||||
// Make sure that Backend implements the backend interface.
|
||||
var _ backend.Backend = &Backend{}
|
||||
|
||||
@@ -340,3 +340,9 @@ func (be *Backend) Close() error {
|
||||
debug.Log("wait for rclone returned: %v", be.waitResult)
|
||||
return be.waitResult
|
||||
}
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -439,3 +439,9 @@ func (b *Backend) Close() error {
|
||||
func (b *Backend) Delete(ctx context.Context) error {
|
||||
return util.DefaultDelete(ctx, b)
|
||||
}
|
||||
|
||||
// Warmup not implemented
|
||||
func (b *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (b *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -289,3 +289,11 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend
|
||||
func (be *Backend) Unwrap() backend.Backend {
|
||||
return be.Backend
|
||||
}
|
||||
|
||||
// Warmup delegates to wrapped backend
|
||||
func (be *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||
return be.Backend.Warmup(ctx, h)
|
||||
}
|
||||
func (be *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||
return be.Backend.WarmupWait(ctx, h)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
@@ -23,6 +24,11 @@ type Config struct {
|
||||
Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"`
|
||||
StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"`
|
||||
|
||||
EnableRestore bool `option:"enable-restore" help:"restore objects from GLACIER or DEEP_ARCHIVE storage classes (default: false, requires \"s3-restore\" feature flag)"`
|
||||
RestoreDays int `option:"restore-days" help:"lifetime in days of restored object (default: 7)"`
|
||||
RestoreTimeout time.Duration `option:"restore-timeout" help:"maximum time to wait for objects transition (default: 1d)"`
|
||||
RestoreTier string `option:"restore-tier" help:"Retrieval tier at which the restore will be processed. (Standard, Bulk or Expedited) (default: Standard)"`
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
||||
Region string `option:"region" help:"set region"`
|
||||
@@ -34,8 +40,12 @@ type Config struct {
|
||||
// NewConfig returns a new Config with the default values filled in.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Connections: 5,
|
||||
ListObjectsV1: false,
|
||||
Connections: 5,
|
||||
ListObjectsV1: false,
|
||||
EnableRestore: false,
|
||||
RestoreDays: 7,
|
||||
RestoreTimeout: 24 * time.Hour,
|
||||
RestoreTier: "Standard",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,117 +3,117 @@ package s3
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
)
|
||||
|
||||
func newTestConfig(cfg Config) Config {
|
||||
if cfg.Connections == 0 {
|
||||
cfg.Connections = 5
|
||||
}
|
||||
if cfg.RestoreDays == 0 {
|
||||
cfg.RestoreDays = 7
|
||||
}
|
||||
if cfg.RestoreTimeout == 0 {
|
||||
cfg.RestoreTimeout = 24 * time.Hour
|
||||
}
|
||||
if cfg.RestoreTier == "" {
|
||||
cfg.RestoreTier = "Standard"
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
var configTests = []test.ConfigTestData[Config]{
|
||||
{S: "s3://eu-central-1/bucketname", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3://eu-central-1/bucketname/", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:eu-central-1/foobar", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:eu-central-1/foobar/", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:eu-central-1/foobar/prefix/directory", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:hostname.foo/foobar", Cfg: Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:https://hostname/foobar", Cfg: Config{
|
||||
Endpoint: "hostname",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:https://hostname:9999/foobar", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:https://hostname:9999/foobar/", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:http://hostname:9999/foobar", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:http://hostname:9999/foobar/", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3://eu-central-1/bucketname", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3://eu-central-1/bucketname/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
})},
|
||||
{S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
})},
|
||||
{S: "s3:eu-central-1/foobar", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:eu-central-1/foobar/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:eu-central-1/foobar/prefix/directory", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
})},
|
||||
{S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
})},
|
||||
{S: "s3:hostname.foo/foobar", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
})},
|
||||
{S: "s3:https://hostname/foobar", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:https://hostname:9999/foobar", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:https://hostname:9999/foobar/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
})},
|
||||
{S: "s3:http://hostname:9999/foobar", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
})},
|
||||
{S: "s3:http://hostname:9999/foobar/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
})},
|
||||
{S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
})},
|
||||
{S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: newTestConfig(Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
})},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
@@ -32,6 +35,17 @@ type Backend struct {
|
||||
// make sure that *Backend implements backend.Backend
|
||||
var _ backend.Backend = &Backend{}
|
||||
|
||||
var archiveClasses = []string{"GLACIER", "DEEP_ARCHIVE"}
|
||||
|
||||
type warmupStatus int
|
||||
|
||||
const (
|
||||
warmupStatusCold warmupStatus = iota
|
||||
warmupStatusWarmingUp
|
||||
warmupStatusWarm
|
||||
warmupStatusLukewarm
|
||||
)
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
@@ -39,6 +53,10 @@ func NewFactory() location.Factory {
|
||||
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
if cfg.EnableRestore && !feature.Flag.Enabled(feature.S3Restore) {
|
||||
return nil, fmt.Errorf("feature flag `s3-restore` is required to use `-o s3.enable-restore=true`")
|
||||
}
|
||||
|
||||
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
||||
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
||||
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
||||
@@ -266,9 +284,9 @@ func (be *Backend) Path() string {
|
||||
// For archive storage classes, only data files are stored using that class; metadata
|
||||
// must remain instantly accessible.
|
||||
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
||||
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
||||
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
||||
return isDataFile || notArchiveClass
|
||||
isArchiveClass := slices.Contains(archiveClasses, be.cfg.StorageClass)
|
||||
return !isArchiveClass || isDataFile
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
@@ -440,3 +458,148 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||
|
||||
// Close does nothing
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
||||
// Warmup transitions handles from cold to hot storage if needed.
|
||||
func (be *Backend) Warmup(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
|
||||
handlesWarmingUp := []backend.Handle{}
|
||||
|
||||
if be.cfg.EnableRestore {
|
||||
for _, h := range handles {
|
||||
filename := be.Filename(h)
|
||||
isWarmingUp, err := be.requestRestore(ctx, filename)
|
||||
if err != nil {
|
||||
return handlesWarmingUp, err
|
||||
}
|
||||
if isWarmingUp {
|
||||
debug.Log("s3 file is being restored: %s", filename)
|
||||
handlesWarmingUp = append(handlesWarmingUp, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return handlesWarmingUp, nil
|
||||
}
|
||||
|
||||
// requestRestore sends a glacier restore request on a given file.
|
||||
func (be *Backend) requestRestore(ctx context.Context, filename string) (bool, error) {
|
||||
objectInfo, err := be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ws := be.getWarmupStatus(objectInfo)
|
||||
switch ws {
|
||||
case warmupStatusWarm:
|
||||
return false, nil
|
||||
case warmupStatusWarmingUp:
|
||||
return true, nil
|
||||
}
|
||||
|
||||
opts := minio.RestoreRequest{}
|
||||
opts.SetDays(be.cfg.RestoreDays)
|
||||
opts.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierType(be.cfg.RestoreTier)})
|
||||
|
||||
if err := be.client.RestoreObject(ctx, be.cfg.Bucket, filename, "", opts); err != nil {
|
||||
var e minio.ErrorResponse
|
||||
if errors.As(err, &e) {
|
||||
switch e.Code {
|
||||
case "InvalidObjectState":
|
||||
return false, nil
|
||||
case "RestoreAlreadyInProgress":
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
isWarmingUp := ws != warmupStatusLukewarm
|
||||
return isWarmingUp, nil
|
||||
}
|
||||
|
||||
// getWarmupStatus returns the warmup status of the provided object.
|
||||
func (be *Backend) getWarmupStatus(objectInfo minio.ObjectInfo) warmupStatus {
|
||||
// We can't use objectInfo.StorageClass to get the storage class of the
|
||||
// object because this field is only set during ListObjects operations.
|
||||
// The response header is the documented way to get the storage class
|
||||
// for GetObject/StatObject operations.
|
||||
storageClass := objectInfo.Metadata.Get("X-Amz-Storage-Class")
|
||||
isArchiveClass := slices.Contains(archiveClasses, storageClass)
|
||||
if !isArchiveClass {
|
||||
return warmupStatusWarm
|
||||
}
|
||||
|
||||
restore := objectInfo.Restore
|
||||
if restore != nil {
|
||||
if restore.OngoingRestore {
|
||||
return warmupStatusWarmingUp
|
||||
}
|
||||
|
||||
minExpiryTime := time.Now().Add(time.Duration(be.cfg.RestoreDays) * 24 * time.Hour)
|
||||
expiryTime := restore.ExpiryTime
|
||||
if !expiryTime.IsZero() {
|
||||
if minExpiryTime.Before(expiryTime) {
|
||||
return warmupStatusWarm
|
||||
}
|
||||
return warmupStatusLukewarm
|
||||
}
|
||||
}
|
||||
|
||||
return warmupStatusCold
|
||||
}
|
||||
|
||||
// WarmupWait waits until all handles are in hot storage.
|
||||
func (be *Backend) WarmupWait(ctx context.Context, handles []backend.Handle) error {
|
||||
timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, be.cfg.RestoreTimeout)
|
||||
defer timeoutCtxCancel()
|
||||
|
||||
if be.cfg.EnableRestore {
|
||||
for _, h := range handles {
|
||||
filename := be.Filename(h)
|
||||
err := be.waitForRestore(timeoutCtx, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
debug.Log("s3 file is restored: %s", filename)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForRestore waits for a given file to be restored.
|
||||
func (be *Backend) waitForRestore(ctx context.Context, filename string) error {
|
||||
for {
|
||||
var objectInfo minio.ObjectInfo
|
||||
|
||||
// Restore requests can last many hours, therefore network may fail
|
||||
// temporarily. We don't need to die in such even.
|
||||
b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 10)
|
||||
b = backoff.WithContext(b, ctx)
|
||||
err := backoff.Retry(
|
||||
func() (err error) {
|
||||
objectInfo, err = be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
||||
return
|
||||
},
|
||||
b,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ws := be.getWarmupStatus(objectInfo)
|
||||
switch ws {
|
||||
case warmupStatusLukewarm:
|
||||
fallthrough
|
||||
case warmupStatusWarm:
|
||||
return nil
|
||||
case warmupStatusCold:
|
||||
return errors.New("waiting on S3 handle that is not warming up")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -588,3 +588,9 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
|
||||
func (r *SFTP) Delete(ctx context.Context) error {
|
||||
return r.deleteRecursive(ctx, r.p)
|
||||
}
|
||||
|
||||
// Warmup not implemented
|
||||
func (r *SFTP) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (r *SFTP) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
@@ -269,3 +269,9 @@ func (be *beSwift) Delete(ctx context.Context) error {
|
||||
|
||||
// Close does nothing
|
||||
func (be *beSwift) Close() error { return nil }
|
||||
|
||||
// Warmup not implemented
|
||||
func (be *beSwift) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||
return []backend.Handle{}, nil
|
||||
}
|
||||
func (be *beSwift) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||
|
||||
Reference in New Issue
Block a user