mirror of
https://github.com/restic/restic.git
synced 2025-08-12 18:47:42 +00:00
Moves files
This commit is contained in:
89
internal/backend/s3/config.go
Normal file
89
internal/backend/s3/config.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
"restic/options"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to an s3 compatible
|
||||
// server.
|
||||
type Config struct {
|
||||
Endpoint string
|
||||
UseHTTP bool
|
||||
KeyID, Secret string
|
||||
Bucket string
|
||||
Prefix string
|
||||
Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"`
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with the default values filled in.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Connections: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
options.Register("s3", Config{})
|
||||
}
|
||||
|
||||
const defaultPrefix = "restic"
|
||||
|
||||
// ParseConfig parses the string s and extracts the s3 config. The two
|
||||
// supported configuration formats are s3://host/bucketname/prefix and
|
||||
// s3:host:bucketname/prefix. The host can also be a valid s3 region
|
||||
// name. If no prefix is given the prefix "restic" will be used.
|
||||
func ParseConfig(s string) (interface{}, error) {
|
||||
switch {
|
||||
case strings.HasPrefix(s, "s3:http"):
|
||||
// assume that a URL has been specified, parse it and
|
||||
// use the host as the endpoint and the path as the
|
||||
// bucket name and prefix
|
||||
url, err := url.Parse(s[3:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "url.Parse")
|
||||
}
|
||||
|
||||
if url.Path == "" {
|
||||
return nil, errors.New("s3: bucket name not found")
|
||||
}
|
||||
|
||||
path := strings.SplitN(url.Path[1:], "/", 2)
|
||||
return createConfig(url.Host, path, url.Scheme == "http")
|
||||
case strings.HasPrefix(s, "s3://"):
|
||||
s = s[5:]
|
||||
case strings.HasPrefix(s, "s3:"):
|
||||
s = s[3:]
|
||||
default:
|
||||
return nil, errors.New("s3: invalid format")
|
||||
}
|
||||
// use the first entry of the path as the endpoint and the
|
||||
// remainder as bucket name and prefix
|
||||
path := strings.SplitN(s, "/", 3)
|
||||
return createConfig(path[0], path[1:], false)
|
||||
}
|
||||
|
||||
func createConfig(endpoint string, p []string, useHTTP bool) (interface{}, error) {
|
||||
var prefix string
|
||||
switch {
|
||||
case len(p) < 1:
|
||||
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
|
||||
case len(p) == 1 || p[1] == "":
|
||||
prefix = defaultPrefix
|
||||
default:
|
||||
prefix = path.Clean(p[1])
|
||||
}
|
||||
cfg := NewConfig()
|
||||
cfg.Endpoint = endpoint
|
||||
cfg.UseHTTP = useHTTP
|
||||
cfg.Bucket = p[0]
|
||||
cfg.Prefix = prefix
|
||||
return cfg, nil
|
||||
}
|
113
internal/backend/s3/config_test.go
Normal file
113
internal/backend/s3/config_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package s3
|
||||
|
||||
import "testing"
|
||||
|
||||
var configTests = []struct {
|
||||
s string
|
||||
cfg Config
|
||||
}{
|
||||
{"s3://eu-central-1/bucketname", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3://eu-central-1/bucketname/", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3://eu-central-1/bucketname/prefix/directory", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3://eu-central-1/bucketname/prefix/directory/", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:eu-central-1/foobar", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:eu-central-1/foobar/", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:eu-central-1/foobar/prefix/directory", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:eu-central-1/foobar/prefix/directory/", Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:https://hostname:9999/foobar", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:https://hostname:9999/foobar/", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:http://hostname:9999/foobar", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:http://hostname:9999/foobar/", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
Prefix: "restic",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:http://hostname:9999/bucket/prefix/directory", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
{"s3:http://hostname:9999/bucket/prefix/directory/", Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "bucket",
|
||||
Prefix: "prefix/directory",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
}},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
for i, test := range configTests {
|
||||
cfg, err := ParseConfig(test.s)
|
||||
if err != nil {
|
||||
t.Errorf("test %d:%s failed: %v", i, test.s, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if cfg != test.cfg {
|
||||
t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
|
||||
i, test.s, test.cfg, cfg)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
447
internal/backend/s3/s3.go
Normal file
447
internal/backend/s3/s3.go
Normal file
@@ -0,0 +1,447 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"restic"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
|
||||
"github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
// Backend stores data on an S3 endpoint.
|
||||
type Backend struct {
|
||||
client *minio.Client
|
||||
sem *backend.Semaphore
|
||||
cfg Config
|
||||
backend.Layout
|
||||
}
|
||||
|
||||
// make sure that *Backend implements backend.Backend
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func open(cfg Config) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
if cfg.MaxRetries > 0 {
|
||||
minio.MaxRetry = int(cfg.MaxRetries)
|
||||
}
|
||||
|
||||
var client *minio.Client
|
||||
var err error
|
||||
|
||||
if cfg.KeyID == "" || cfg.Secret == "" {
|
||||
debug.Log("key/secret not found, trying to get them from IAM")
|
||||
creds := credentials.NewIAM("")
|
||||
client, err = minio.NewWithCredentials(cfg.Endpoint, creds, !cfg.UseHTTP, "")
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "minio.NewWithCredentials")
|
||||
}
|
||||
} else {
|
||||
debug.Log("key/secret found")
|
||||
client, err = minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "minio.New")
|
||||
}
|
||||
}
|
||||
|
||||
sem, err := backend.NewSemaphore(cfg.Connections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
be := &Backend{
|
||||
client: client,
|
||||
sem: sem,
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
client.SetCustomTransport(backend.Transport())
|
||||
|
||||
l, err := backend.ParseLayout(be, cfg.Layout, defaultLayout, cfg.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
be.Layout = l
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
||||
// does not exist yet.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
return open(cfg)
|
||||
}
|
||||
|
||||
// Create opens the S3 backend at bucket and region and creates the bucket if
|
||||
// it does not exist yet.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
be, err := open(cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
found, err := be.client.BucketExists(cfg.Bucket)
|
||||
if err != nil {
|
||||
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
|
||||
return nil, errors.Wrap(err, "client.BucketExists")
|
||||
}
|
||||
|
||||
if !found {
|
||||
// create new bucket with default ACL in default region
|
||||
err = be.client.MakeBucket(cfg.Bucket, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "client.MakeBucket")
|
||||
}
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a not existing file.
|
||||
func (be *Backend) IsNotExist(err error) bool {
|
||||
debug.Log("IsNotExist(%T, %#v)", err, err)
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if e, ok := errors.Cause(err).(minio.ErrorResponse); ok && e.Code == "NoSuchKey" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Join combines path components with slashes.
|
||||
func (be *Backend) Join(p ...string) string {
|
||||
return path.Join(p...)
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi fileInfo) Name() string { return fi.name } // base name of the file
|
||||
func (fi fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others
|
||||
func (fi fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits
|
||||
func (fi fileInfo) ModTime() time.Time { return fi.modTime } // modification time
|
||||
func (fi fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir()
|
||||
func (fi fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
|
||||
|
||||
// ReadDir returns the entries for a directory.
|
||||
func (be *Backend) ReadDir(dir string) (list []os.FileInfo, err error) {
|
||||
debug.Log("ReadDir(%v)", dir)
|
||||
|
||||
// make sure dir ends with a slash
|
||||
if dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for obj := range be.client.ListObjects(be.cfg.Bucket, dir, false, done) {
|
||||
if obj.Key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(obj.Key, dir)
|
||||
if name == "" {
|
||||
return nil, errors.Errorf("invalid key name %v, removing prefix %v yielded empty string", obj.Key, dir)
|
||||
}
|
||||
entry := fileInfo{
|
||||
name: name,
|
||||
size: obj.Size,
|
||||
modTime: obj.LastModified,
|
||||
}
|
||||
|
||||
if name[len(name)-1] == '/' {
|
||||
entry.isDir = true
|
||||
entry.mode = os.ModeDir | 0755
|
||||
entry.name = name[:len(name)-1]
|
||||
} else {
|
||||
entry.mode = 0644
|
||||
}
|
||||
|
||||
list = append(list, entry)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Location returns this backend's location (the bucket name).
|
||||
func (be *Backend) Location() string {
|
||||
return be.Join(be.cfg.Bucket, be.cfg.Prefix)
|
||||
}
|
||||
|
||||
// Path returns the path in the bucket that is used for this backend.
|
||||
func (be *Backend) Path() string {
|
||||
return be.cfg.Prefix
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
|
||||
debug.Log("Save %v", h)
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
// Check key does not already exist
|
||||
_, err = be.client.StatObject(be.cfg.Bucket, objName)
|
||||
if err == nil {
|
||||
debug.Log("%v already exists", h)
|
||||
return errors.New("key already exists")
|
||||
}
|
||||
|
||||
// prevent the HTTP client from closing a file
|
||||
rd = ioutil.NopCloser(rd)
|
||||
|
||||
be.sem.GetToken()
|
||||
debug.Log("PutObject(%v, %v)", be.cfg.Bucket, objName)
|
||||
n, err := be.client.PutObject(be.cfg.Bucket, objName, rd, "application/octet-stream")
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err)
|
||||
|
||||
return errors.Wrap(err, "client.PutObject")
|
||||
}
|
||||
|
||||
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
|
||||
type wrapReader struct {
|
||||
io.ReadCloser
|
||||
f func()
|
||||
}
|
||||
|
||||
func (wr wrapReader) Close() error {
|
||||
err := wr.ReadCloser.Close()
|
||||
wr.f()
|
||||
return err
|
||||
}
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
// given offset. If length is nonzero, only a portion of the file is
|
||||
// returned. rd must be closed after use.
|
||||
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
|
||||
if err := h.Valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, errors.New("offset is negative")
|
||||
}
|
||||
|
||||
if length < 0 {
|
||||
return nil, errors.Errorf("invalid length %d", length)
|
||||
}
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
byteRange := fmt.Sprintf("bytes=%d-", offset)
|
||||
if length > 0 {
|
||||
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
|
||||
}
|
||||
headers := minio.NewGetReqHeaders()
|
||||
headers.Add("Range", byteRange)
|
||||
|
||||
be.sem.GetToken()
|
||||
debug.Log("Load(%v) send range %v", h, byteRange)
|
||||
|
||||
coreClient := minio.Core{Client: be.client}
|
||||
rd, _, err := coreClient.GetObject(be.cfg.Bucket, objName, headers)
|
||||
if err != nil {
|
||||
be.sem.ReleaseToken()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
closeRd := wrapReader{
|
||||
ReadCloser: rd,
|
||||
f: func() {
|
||||
debug.Log("Close()")
|
||||
be.sem.ReleaseToken()
|
||||
},
|
||||
}
|
||||
|
||||
return closeRd, err
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
|
||||
debug.Log("%v", h)
|
||||
|
||||
objName := be.Filename(h)
|
||||
var obj *minio.Object
|
||||
|
||||
obj, err = be.client.GetObject(be.cfg.Bucket, objName)
|
||||
if err != nil {
|
||||
debug.Log("GetObject() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
|
||||
}
|
||||
|
||||
// make sure that the object is closed properly.
|
||||
defer func() {
|
||||
e := obj.Close()
|
||||
if err == nil {
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := obj.Stat()
|
||||
if err != nil {
|
||||
debug.Log("Stat() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return restic.FileInfo{Size: fi.Size}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
found := false
|
||||
objName := be.Filename(h)
|
||||
_, err := be.client.StatObject(be.cfg.Bucket, objName)
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
|
||||
// If error, then not found
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
objName := be.Filename(h)
|
||||
err := be.client.RemoveObject(be.cfg.Bucket, objName)
|
||||
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
|
||||
|
||||
if be.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "client.RemoveObject")
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (be *Backend) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
debug.Log("listing %v", t)
|
||||
ch := make(chan string)
|
||||
|
||||
prefix := be.Dirname(restic.Handle{Type: t})
|
||||
|
||||
// make sure prefix ends with a slash
|
||||
if prefix[len(prefix)-1] != '/' {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
listresp := be.client.ListObjects(be.cfg.Bucket, prefix, true, ctx.Done())
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for obj := range listresp {
|
||||
m := strings.TrimPrefix(obj.Key, prefix)
|
||||
if m == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- path.Base(m):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Remove keys for a specified backend type.
|
||||
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
|
||||
for key := range be.List(ctx, restic.DataFile) {
|
||||
err := be.Remove(ctx, restic.Handle{Type: restic.DataFile, Name: key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
||||
func (be *Backend) Delete(ctx context.Context) error {
|
||||
alltypes := []restic.FileType{
|
||||
restic.DataFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
restic.SnapshotFile,
|
||||
restic.IndexFile}
|
||||
|
||||
for _, t := range alltypes {
|
||||
err := be.removeKeys(ctx, t)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
||||
// Rename moves a file based on the new layout l.
|
||||
func (be *Backend) Rename(h restic.Handle, l backend.Layout) error {
|
||||
debug.Log("Rename %v to %v", h, l)
|
||||
oldname := be.Filename(h)
|
||||
newname := l.Filename(h)
|
||||
|
||||
if oldname == newname {
|
||||
debug.Log(" %v is already renamed", newname)
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log(" %v -> %v", oldname, newname)
|
||||
|
||||
src := minio.NewSourceInfo(be.cfg.Bucket, oldname, nil)
|
||||
|
||||
dst, err := minio.NewDestinationInfo(be.cfg.Bucket, newname, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "NewDestinationInfo")
|
||||
}
|
||||
|
||||
err = be.client.CopyObject(dst, src)
|
||||
if err != nil && be.IsNotExist(err) {
|
||||
debug.Log("copy failed: %v, seems to already have been renamed", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debug.Log("copy failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return be.client.RemoveObject(be.cfg.Bucket, oldname)
|
||||
}
|
320
internal/backend/s3/s3_test.go
Normal file
320
internal/backend/s3/s3_test.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic/backend/s3"
|
||||
"restic/backend/test"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func mkdir(t testing.TB, dir string) {
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func runMinio(ctx context.Context, t testing.TB, dir, key, secret string) func() {
|
||||
mkdir(t, filepath.Join(dir, "config"))
|
||||
mkdir(t, filepath.Join(dir, "root"))
|
||||
|
||||
cmd := exec.CommandContext(ctx, "minio",
|
||||
"server",
|
||||
"--address", "127.0.0.1:9000",
|
||||
"--config-dir", filepath.Join(dir, "config"),
|
||||
filepath.Join(dir, "root"))
|
||||
cmd.Env = append(os.Environ(),
|
||||
"MINIO_ACCESS_KEY="+key,
|
||||
"MINIO_SECRET_KEY="+secret,
|
||||
)
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait until the TCP port is reachable
|
||||
var success bool
|
||||
for i := 0; i < 100; i++ {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
c, err := net.Dial("tcp", "localhost:9000")
|
||||
if err == nil {
|
||||
success = true
|
||||
if err := c.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
t.Fatal("unable to connect to minio server")
|
||||
return nil
|
||||
}
|
||||
|
||||
return func() {
|
||||
err = cmd.Process.Kill()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ignore errors, we've killed the process
|
||||
_ = cmd.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func newRandomCredentials(t testing.TB) (key, secret string) {
|
||||
buf := make([]byte, 10)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key = hex.EncodeToString(buf)
|
||||
|
||||
_, err = io.ReadFull(rand.Reader, buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
secret = hex.EncodeToString(buf)
|
||||
|
||||
return key, secret
|
||||
}
|
||||
|
||||
type MinioTestConfig struct {
|
||||
s3.Config
|
||||
|
||||
tempdir string
|
||||
removeTempdir func()
|
||||
stopServer func()
|
||||
}
|
||||
|
||||
func createS3(t testing.TB, cfg MinioTestConfig) (be restic.Backend, err error) {
|
||||
for i := 0; i < 10; i++ {
|
||||
be, err = s3.Create(cfg.Config)
|
||||
if err != nil {
|
||||
t.Logf("s3 open: try %d: error %v", i, err)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return be, err
|
||||
}
|
||||
|
||||
func newMinioTestSuite(ctx context.Context, t testing.TB) *test.Suite {
|
||||
return &test.Suite{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
cfg := MinioTestConfig{}
|
||||
|
||||
cfg.tempdir, cfg.removeTempdir = TempDir(t)
|
||||
key, secret := newRandomCredentials(t)
|
||||
cfg.stopServer = runMinio(ctx, t, cfg.tempdir, key, secret)
|
||||
|
||||
cfg.Config = s3.NewConfig()
|
||||
cfg.Config.Endpoint = "localhost:9000"
|
||||
cfg.Config.Bucket = "restictestbucket"
|
||||
cfg.Config.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
cfg.Config.UseHTTP = true
|
||||
cfg.Config.KeyID = key
|
||||
cfg.Config.Secret = secret
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(MinioTestConfig)
|
||||
|
||||
be, err := createS3(t, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(MinioTestConfig)
|
||||
return s3.Open(cfg.Config)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(MinioTestConfig)
|
||||
if cfg.stopServer != nil {
|
||||
cfg.stopServer()
|
||||
}
|
||||
if cfg.removeTempdir != nil {
|
||||
cfg.removeTempdir()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendMinio(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/s3.TestBackendMinio")
|
||||
}
|
||||
}()
|
||||
|
||||
// try to find a minio binary
|
||||
_, err := exec.LookPath("minio")
|
||||
if err != nil {
|
||||
t.Skip(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
newMinioTestSuite(ctx, t).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendMinio(t *testing.B) {
|
||||
// try to find a minio binary
|
||||
_, err := exec.LookPath("minio")
|
||||
if err != nil {
|
||||
t.Skip(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
newMinioTestSuite(ctx, t).RunBenchmarks(t)
|
||||
}
|
||||
|
||||
func newS3TestSuite(t testing.TB) *test.Suite {
|
||||
return &test.Suite{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
s3cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := s3cfg.(s3.Config)
|
||||
cfg.KeyID = os.Getenv("RESTIC_TEST_S3_KEY")
|
||||
cfg.Secret = os.Getenv("RESTIC_TEST_S3_SECRET")
|
||||
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(s3.Config)
|
||||
|
||||
be, err := s3.Create(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(s3.Config)
|
||||
return s3.Open(cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(s3.Config)
|
||||
|
||||
be, err := s3.Open(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendS3(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/s3.TestBackendS3")
|
||||
}
|
||||
}()
|
||||
|
||||
vars := []string{
|
||||
"RESTIC_TEST_S3_KEY",
|
||||
"RESTIC_TEST_S3_SECRET",
|
||||
"RESTIC_TEST_S3_REPOSITORY",
|
||||
}
|
||||
|
||||
for _, v := range vars {
|
||||
if os.Getenv(v) == "" {
|
||||
t.Skipf("environment variable %v not set", v)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newS3TestSuite(t).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendS3(t *testing.B) {
|
||||
vars := []string{
|
||||
"RESTIC_TEST_S3_KEY",
|
||||
"RESTIC_TEST_S3_SECRET",
|
||||
"RESTIC_TEST_S3_REPOSITORY",
|
||||
}
|
||||
|
||||
for _, v := range vars {
|
||||
if os.Getenv(v) == "" {
|
||||
t.Skipf("environment variable %v not set", v)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newS3TestSuite(t).RunBenchmarks(t)
|
||||
}
|
Reference in New Issue
Block a user