Moves files

This commit is contained in:
Alexander Neumann
2017-07-23 14:19:13 +02:00
parent d1bd160b0a
commit 83d1a46526
284 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,116 @@
package archiver
import (
"context"
"io"
"restic"
"restic/debug"
"time"
"restic/errors"
"github.com/restic/chunker"
)
// Reader allows saving a stream of data to the repository.
type Reader struct {
restic.Repository
Tags []string
Hostname string
}
// Archive reads data from the reader and saves it to the repo.
func (r *Reader) Archive(ctx context.Context, name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
if name == "" {
return nil, restic.ID{}, errors.New("no filename given")
}
debug.Log("start archiving %s", name)
sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname)
if err != nil {
return nil, restic.ID{}, err
}
p.Start()
defer p.Done()
repo := r.Repository
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
ids := restic.IDs{}
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveBlob(ctx, restic.DataBlob, chunk.Data, id)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &restic.Tree{
Nodes: []*restic.Node{
{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := repo.SaveTree(ctx, tree)
if err != nil {
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, restic.ID{}, err
}
err = repo.SaveIndex(ctx)
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}

View File

@@ -0,0 +1,201 @@
package archiver
import (
"bytes"
"context"
"errors"
"io"
"math/rand"
"restic"
"restic/checker"
"restic/repository"
"testing"
)
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
}
return n
}
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := repo.LoadTree(context.TODO(), treeID)
if err != nil {
t.Fatalf("LoadTree() returned error %v", err)
}
if len(tree.Nodes) != 1 {
t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes))
}
node := tree.Nodes[0]
if node.Name != "fakefile" {
t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name)
}
if len(node.Content) == 0 {
t.Fatalf("node.Content has length 0")
}
// check blobs
for i, id := range node.Content {
size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
t.Fatal(err)
}
buf := restic.NewBlobBuffer(int(size))
n := loadBlob(t, repo, id, buf)
if n != len(buf) {
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
}
buf2 := make([]byte, int(size))
_, err = io.ReadFull(rd, buf2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, buf2) {
t.Fatalf("blob %d (%v) is wrong", i, id.Str())
}
}
}
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
seed := rand.Int63()
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
t.Logf("seed is 0x%016x, size is %v", seed, size)
f := fakeFile(t, seed, size)
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", f, nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
checker.TestCheckRepo(t, repo)
}
func TestArchiveReaderNull(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(nil), nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checker.TestCheckRepo(t, repo)
}
type errReader string
func (e errReader) Read([]byte) (int, error) {
return 0, errors.New(string(e))
}
func countSnapshots(t testing.TB, repo restic.Repository) int {
snapshots := 0
for range repo.List(context.TODO(), restic.SnapshotFile) {
snapshots++
}
return snapshots
}
func TestArchiveReaderError(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", errReader("error returned by reading stdin"), nil)
if err == nil {
t.Errorf("expected error not returned")
}
if sn != nil {
t.Errorf("Snapshot should be nil, but isn't")
}
if !id.IsNull() {
t.Errorf("id should be null, but %v returned", id.Str())
}
n := countSnapshots(t, repo)
if n > 0 {
t.Errorf("expected zero snapshots, but got %d", n)
}
checker.TestCheckRepo(t, repo)
}
func BenchmarkArchiveReader(t *testing.B) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
const size = 50 * 1024 * 1024
buf := make([]byte, size)
_, err := io.ReadFull(fakeFile(t, 23, size), buf)
if err != nil {
t.Fatal(err)
}
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
t.SetBytes(size)
t.ResetTimer()
for i := 0; i < t.N; i++ {
_, _, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(buf), nil)
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,835 @@
package archiver
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"restic"
"sort"
"sync"
"time"
"restic/errors"
"restic/walk"
"restic/debug"
"restic/fs"
"restic/pipe"
"github.com/restic/chunker"
)
const (
maxConcurrentBlobs = 32
maxConcurrency = 10
)
var archiverPrintWarnings = func(path string, fi os.FileInfo, err error) {
fmt.Fprintf(os.Stderr, "warning for %v: %v", path, err)
}
var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories.
type Archiver struct {
repo restic.Repository
knownBlobs struct {
restic.IDSet
sync.Mutex
}
blobToken chan struct{}
Warn func(dir string, fi os.FileInfo, err error)
SelectFilter pipe.SelectFunc
Excludes []string
}
// New returns a new archiver.
func New(repo restic.Repository) *Archiver {
arch := &Archiver{
repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct {
restic.IDSet
sync.Mutex
}{
IDSet: restic.NewIDSet(),
},
}
for i := 0; i < maxConcurrentBlobs; i++ {
arch.blobToken <- struct{}{}
}
arch.Warn = archiverPrintWarnings
arch.SelectFilter = archiverAllowAllFiles
return arch
}
// isKnownBlob returns true iff the blob is not yet in the list of known blobs.
// When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save
// the blob to the backend.
func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock()
if arch.knownBlobs.Has(id) {
return true
}
arch.knownBlobs.Insert(id)
_, err := arch.repo.Index().Lookup(id, t)
if err == nil {
return true
}
return false
}
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("blob %v is known\n", id.Str())
return nil
}
_, err := arch.repo.SaveBlob(ctx, t, data, id)
if err != nil {
debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err)
return err
}
debug.Log("Save(%v, %v): new blob\n", t, id.Str())
return nil
}
// SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(ctx context.Context, tree *restic.Tree) (restic.ID, error) {
data, err := json.Marshal(tree)
if err != nil {
return restic.ID{}, errors.Wrap(err, "Marshal")
}
data = append(data, '\n')
// check if tree has been saved before
id := restic.Hash(data)
if arch.isKnownBlob(id, restic.TreeBlob) {
return id, nil
}
return arch.repo.SaveBlob(ctx, restic.TreeBlob, data, id)
}
func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
fi, err := file.Stat()
if err != nil {
return nil, errors.Wrap(err, "restic.Stat")
}
if fi.ModTime() == node.ModTime {
return node, nil
}
arch.Warn(node.Path, fi, errors.New("file has changed"))
node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil {
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
arch.Warn(node.Path, fi, err)
}
return node, nil
}
type saveResult struct {
id restic.ID
bytes uint64
}
func (arch *Archiver) saveChunk(ctx context.Context, chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data)
id := restic.Hash(chunk.Data)
err := arch.Save(ctx, restic.DataBlob, chunk.Data, id)
// TODO handle error
if err != nil {
debug.Log("Save(%v) failed: %v", id.Str(), err)
panic(err)
}
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
arch.blobToken <- token
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
}
func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error) {
results := []saveResult{}
for _, ch := range resultChannels {
results = append(results, <-ch)
}
if len(results) != len(resultChannels) {
return nil, errors.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results))
}
return results, nil
}
func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("checking size for file %s", node.Path)
var bytes uint64
node.Content = make([]restic.ID, len(results))
for i, b := range results {
node.Content[i] = b.id
bytes += b.bytes
debug.Log(" adding blob %s, %d bytes", b.id.Str(), b.bytes)
}
if bytes != node.Size {
fmt.Fprintf(os.Stderr, "warning for %v: expected %d bytes, saved %d bytes\n", node.Path, node.Size, bytes)
}
debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil
}
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(ctx context.Context, p *restic.Progress, node *restic.Node) (*restic.Node, error) {
file, err := fs.Open(node.Path)
defer file.Close()
if err != nil {
return node, errors.Wrap(err, "Open")
}
debug.RunHook("archiver.SaveFile", node.Path)
node, err = arch.reloadFileIfChanged(node, file)
if err != nil {
return node, err
}
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
resultChannels := [](<-chan saveResult){}
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return node, errors.Wrap(err, "chunker.Next")
}
resCh := make(chan saveResult, 1)
go arch.saveChunk(ctx, chunk, p, <-arch.blobToken, file, resCh)
resultChannels = append(resultChannels, resCh)
}
results, err := waitForResults(resultChannels)
if err != nil {
return node, err
}
err = updateNodeContent(node, results)
return node, err
}
func (arch *Archiver) fileWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, entCh <-chan pipe.Entry) {
defer func() {
debug.Log("done")
wg.Done()
}()
for {
select {
case e, ok := <-entCh:
if !ok {
// channel is closed
return
}
debug.Log("got job %v", e)
// check for errors
if e.Error() != nil {
debug.Log("job %v has errors: %v", e.Path(), e.Error())
// TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file
e.Result() <- nil
p.Report(restic.Stat{Errors: 1})
continue
}
node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil {
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
arch.Warn(e.Fullpath(), e.Info(), err)
}
// try to use old node, if present
if e.Node != nil {
debug.Log(" %v use old data", e.Path())
oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository
contentMissing := false
for _, blob := range oldNode.Content {
if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log(" %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true
break
}
}
if !contentMissing {
node.Content = oldNode.Content
debug.Log(" %v content is complete", e.Path())
}
} else {
debug.Log(" %v no old data", e.Path())
}
// otherwise read file normally
if node.Type == "file" && len(node.Content) == 0 {
debug.Log(" read and save %v", e.Path())
node, err = arch.SaveFile(ctx, p, node)
if err != nil {
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
arch.Warn(e.Path(), nil, err)
// ignore this file
e.Result() <- nil
p.Report(restic.Stat{Errors: 1})
continue
}
} else {
// report old data size
p.Report(restic.Stat{Bytes: node.Size})
}
debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node
p.Report(restic.Stat{Files: 1})
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
func (arch *Archiver) dirWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, dirCh <-chan pipe.Dir) {
debug.Log("start")
defer func() {
debug.Log("done")
wg.Done()
}()
for {
select {
case dir, ok := <-dirCh:
if !ok {
// channel is closed
return
}
debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
// ignore dir nodes with errors
if dir.Error() != nil {
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
dir.Result() <- nil
p.Report(restic.Stat{Errors: 1})
continue
}
tree := restic.NewTree()
// wait for all content
for _, ch := range dir.Entries {
debug.Log("receiving result from %v", ch)
res := <-ch
// if we get a nil pointer here, an error has happened while
// processing this entry. Ignore it for now.
if res == nil {
debug.Log("got nil result?")
continue
}
// else insert node
node := res.(*restic.Node)
if node.Type == "dir" {
debug.Log("got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree == nil {
debug.Log("subtree is nil for node %v", node.Path)
continue
}
if node.Subtree.IsNull() {
panic("invalid null subtree restic.ID")
}
}
tree.Insert(node)
}
node := &restic.Node{}
if dir.Path() != "" && dir.Info() != nil {
n, err := restic.NodeFromFileInfo(dir.Fullpath(), dir.Info())
if err != nil {
arch.Warn(dir.Path(), dir.Info(), err)
}
node = n
}
if err := dir.Error(); err != nil {
node.Error = err.Error()
}
id, err := arch.SaveTreeJSON(ctx, tree)
if err != nil {
panic(err)
}
debug.Log("save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() {
panic("invalid null subtree restic.ID return from SaveTreeJSON()")
}
node.Subtree = &id
debug.Log("sending result to %v", dir.Result())
dir.Result() <- node
if dir.Path() != "" {
p.Report(restic.Stat{Dirs: 1})
}
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
type archivePipe struct {
Old <-chan walk.TreeJob
New <-chan pipe.Job
}
func copyJobs(ctx context.Context, in <-chan pipe.Job, out chan<- pipe.Job) {
var (
// disable sending on the outCh until we received a job
outCh chan<- pipe.Job
// enable receiving from in
inCh = in
job pipe.Job
ok bool
)
for {
select {
case <-ctx.Done():
return
case job, ok = <-inCh:
if !ok {
// input channel closed, we're done
debug.Log("input channel closed, we're done")
return
}
inCh = nil
outCh = out
case outCh <- job:
outCh = nil
inCh = in
}
}
}
type archiveJob struct {
hasOld bool
old walk.TreeJob
new pipe.Job
}
func (a *archivePipe) compare(ctx context.Context, out chan<- pipe.Job) {
defer func() {
close(out)
debug.Log("done")
}()
debug.Log("start")
var (
loadOld, loadNew bool = true, true
ok bool
oldJob walk.TreeJob
newJob pipe.Job
)
for {
if loadOld {
oldJob, ok = <-a.Old
// if the old channel is closed, just pass through the new jobs
if !ok {
debug.Log("old channel is closed, copy from new channel")
// handle remaining newJob
if !loadNew {
out <- archiveJob{new: newJob}.Copy()
}
copyJobs(ctx, a.New, out)
return
}
loadOld = false
}
if loadNew {
newJob, ok = <-a.New
// if the new channel is closed, there are no more files in the current snapshot, return
if !ok {
debug.Log("new channel is closed, we're done")
return
}
loadNew = false
}
debug.Log("old job: %v", oldJob.Path)
debug.Log("new job: %v", newJob.Path())
// at this point we have received an old job as well as a new job, compare paths
file1 := oldJob.Path
file2 := newJob.Path()
dir1 := filepath.Dir(file1)
dir2 := filepath.Dir(file2)
if file1 == file2 {
debug.Log(" same filename %q", file1)
// send job
out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy()
loadOld = true
loadNew = true
continue
} else if dir1 < dir2 {
debug.Log(" %q < %q, file %q added", dir1, dir2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
continue
} else if dir1 == dir2 {
if file1 < file2 {
debug.Log(" %q < %q, file %q removed", file1, file2, file1)
// file has been removed, load new old
loadOld = true
continue
} else {
debug.Log(" %q > %q, file %q added", file1, file2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
continue
}
}
debug.Log(" %q > %q, file %q removed", file1, file2, file1)
// file has been removed, throw away old job and load new
loadOld = true
}
}
func (j archiveJob) Copy() pipe.Job {
if !j.hasOld {
return j.new
}
// handle files
if isRegularFile(j.new.Info()) {
debug.Log(" job %v is file", j.new.Path())
// if type has changed, return new job directly
if j.old.Node == nil {
return j.new
}
// if file is newer, return the new job
if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log(" job %v is newer", j.new.Path())
return j.new
}
debug.Log(" job %v add old data", j.new.Path())
// otherwise annotate job with old data
e := j.new.(pipe.Entry)
e.Node = j.old.Node
return e
}
// dirs and other types are just returned
return j.new
}
const saveIndexTime = 30 * time.Second
// saveIndexes regularly queries the master index for full indexes and saves them.
func (arch *Archiver) saveIndexes(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done()
ticker := time.NewTicker(saveIndexTime)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
debug.Log("saving full indexes")
err := arch.repo.SaveFullIndex(ctx)
if err != nil {
debug.Log("save indexes returned an error: %v", err)
fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err)
}
}
}
}
// unique returns a slice that only contains unique strings.
func unique(items []string) []string {
seen := make(map[string]struct{})
for _, item := range items {
seen[item] = struct{}{}
}
items = items[:0]
for item := range seen {
items = append(items, item)
}
return items
}
// baseNameSlice allows sorting paths by basename.
//
// Snapshots have contents sorted by basename, but we receive full paths.
// For the archivePipe to advance them in pairs, we traverse the given
// paths in the same order as the snapshot.
type baseNameSlice []string
func (p baseNameSlice) Len() int { return len(p) }
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
// used to compare the files to the ones archived at the time this snapshot was
// taken.
func (arch *Archiver) Snapshot(ctx context.Context, p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
paths = unique(paths)
sort.Sort(baseNameSlice(paths))
debug.Log("start for %v", paths)
debug.RunHook("Archiver.Snapshot", nil)
// signal the whole pipeline to stop
var err error
p.Start()
defer p.Done()
// create new snapshot
sn, err := restic.NewSnapshot(paths, tags, hostname)
if err != nil {
return nil, restic.ID{}, err
}
sn.Excludes = arch.Excludes
jobs := archivePipe{}
// use parent snapshot (if some was given)
if parentID != nil {
sn.Parent = parentID
// load parent snapshot
parent, err := restic.LoadSnapshot(ctx, arch.repo, *parentID)
if err != nil {
return nil, restic.ID{}, err
}
// start walker on old tree
ch := make(chan walk.TreeJob)
go walk.Tree(ctx, arch.repo, *parent.Tree, ch)
jobs.Old = ch
} else {
// use closed channel
ch := make(chan walk.TreeJob)
close(ch)
jobs.Old = ch
}
// start walker
pipeCh := make(chan pipe.Job)
resCh := make(chan pipe.Result, 1)
go func() {
pipe.Walk(ctx, paths, arch.SelectFilter, pipeCh, resCh)
debug.Log("pipe.Walk done")
}()
jobs.New = pipeCh
ch := make(chan pipe.Job)
go jobs.compare(ctx, ch)
var wg sync.WaitGroup
entCh := make(chan pipe.Entry)
dirCh := make(chan pipe.Dir)
// split
wg.Add(1)
go func() {
pipe.Split(ch, dirCh, entCh)
debug.Log("split done")
close(dirCh)
close(entCh)
wg.Done()
}()
// run workers
for i := 0; i < maxConcurrency; i++ {
wg.Add(2)
go arch.fileWorker(ctx, &wg, p, entCh)
go arch.dirWorker(ctx, &wg, p, dirCh)
}
// run index saver
var wgIndexSaver sync.WaitGroup
indexCtx, indexCancel := context.WithCancel(ctx)
wgIndexSaver.Add(1)
go arch.saveIndexes(indexCtx, &wgIndexSaver)
// wait for all workers to terminate
debug.Log("wait for workers")
wg.Wait()
// stop index saver
indexCancel()
wgIndexSaver.Wait()
debug.Log("workers terminated")
// flush repository
err = arch.repo.Flush()
if err != nil {
return nil, restic.ID{}, err
}
// receive the top-level tree
root := (<-resCh).(*restic.Node)
debug.Log("root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree
// load top-level tree again to see if it is empty
toptree, err := arch.repo.LoadTree(ctx, *root.Subtree)
if err != nil {
return nil, restic.ID{}, err
}
if len(toptree.Nodes) == 0 {
return nil, restic.ID{}, errors.Fatal("no files/dirs saved, refusing to create empty snapshot")
}
// save index
err = arch.repo.SaveIndex(ctx)
if err != nil {
debug.Log("error saving index: %v", err)
return nil, restic.ID{}, err
}
debug.Log("saved indexes")
// save snapshot
id, err := arch.repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("saved snapshot %v", id.Str())
return sn, id, nil
}
func isRegularFile(fi os.FileInfo) bool {
if fi == nil {
return false
}
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// Scan traverses the dirs to collect restic.Stat information while emitting progress
// information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
p.Start()
defer p.Done()
var stat restic.Stat
for _, dir := range dirs {
debug.Log("Start for %v", dir)
err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error {
// TODO: integrate error reporting
if err != nil {
fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err)
return nil
}
if fi == nil {
fmt.Fprintf(os.Stderr, "error for %v: FileInfo is nil\n", str)
return nil
}
if !filter(str, fi) {
debug.Log("path %v excluded", str)
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
s := restic.Stat{}
if fi.IsDir() {
s.Dirs++
} else {
s.Files++
if isRegularFile(fi) {
s.Bytes += uint64(fi.Size())
}
}
p.Report(s)
stat.Add(s)
// TODO: handle error?
return nil
})
debug.Log("Done for %v, err: %v", dir, err)
if err != nil {
return restic.Stat{}, errors.Wrap(err, "fs.Walk")
}
}
return stat, nil
}

View File

@@ -0,0 +1,157 @@
package archiver_test
import (
"context"
"crypto/rand"
"io"
mrand "math/rand"
"sync"
"testing"
"time"
"restic/errors"
"restic"
"restic/archiver"
"restic/mock"
"restic/repository"
)
const parallelSaves = 50
const testSaveIndexTime = 100 * time.Millisecond
const testTimeout = 2 * time.Second
var DupID restic.ID
func randomID() restic.ID {
if mrand.Float32() < 0.5 {
return DupID
}
id := restic.ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}
// forgetfulBackend returns a backend that forgets everything.
func forgetfulBackend() restic.Backend {
be := &mock.Backend{}
be.TestFn = func(ctx context.Context, h restic.Handle) (bool, error) {
return false, nil
}
be.LoadFn = func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
return nil, errors.New("not found")
}
be.SaveFn = func(ctx context.Context, h restic.Handle, rd io.Reader) error {
return nil
}
be.StatFn = func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
return restic.FileInfo{}, errors.New("not found")
}
be.RemoveFn = func(ctx context.Context, h restic.Handle) error {
return nil
}
be.ListFn = func(ctx context.Context, t restic.FileType) <-chan string {
ch := make(chan string)
close(ch)
return ch
}
be.DeleteFn = func(ctx context.Context) error {
return nil
}
return be
}
func testArchiverDuplication(t *testing.T) {
_, err := io.ReadFull(rand.Reader, DupID[:])
if err != nil {
t.Fatal(err)
}
repo := repository.New(forgetfulBackend())
err = repo.Init(context.TODO(), "foo")
if err != nil {
t.Fatal(err)
}
arch := archiver.New(repo)
wg := &sync.WaitGroup{}
done := make(chan struct{})
for i := 0; i < parallelSaves; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-done:
return
default:
}
id := randomID()
if repo.Index().Has(id, restic.DataBlob) {
continue
}
buf := make([]byte, 50)
err := arch.Save(context.TODO(), restic.DataBlob, buf, id)
if err != nil {
t.Fatal(err)
}
}
}()
}
saveIndex := func() {
defer wg.Done()
ticker := time.NewTicker(testSaveIndexTime)
defer ticker.Stop()
for {
select {
case <-done:
return
case <-ticker.C:
err := repo.SaveFullIndex(context.TODO())
if err != nil {
t.Fatal(err)
}
}
}
}
wg.Add(1)
go saveIndex()
<-time.After(testTimeout)
close(done)
wg.Wait()
err = repo.Flush()
if err != nil {
t.Fatal(err)
}
}
func TestArchiverDuplication(t *testing.T) {
for i := 0; i < 5; i++ {
testArchiverDuplication(t)
}
}

View File

@@ -0,0 +1,145 @@
package archiver
import (
"context"
"os"
"testing"
"restic/pipe"
"restic/walk"
)
var treeJobs = []string{
"foo/baz/subdir",
"foo/baz",
"foo",
"quu/bar/file1",
"quu/bar/file2",
"quu/foo/file1",
"quu/foo/file2",
"quu/foo/file3",
"quu/foo",
"quu/fooz",
"quu",
"yy/a",
"yy/b",
"yy",
}
var pipeJobs = []string{
"foo/baz/subdir",
"foo/baz/subdir2", // subdir2 added
"foo/baz",
"foo",
"quu/bar/.file1.swp", // file with . added
"quu/bar/file1",
"quu/bar/file2",
"quu/foo/file1", // file2 removed
"quu/foo/file3",
"quu/foo",
"quu",
"quv/file1", // files added and removed
"quv/file2",
"quv",
"yy",
"zz/file1", // files removed and added at the end
"zz/file2",
"zz",
}
var resultJobs = []struct {
path string
action string
}{
{"foo/baz/subdir", "same, not a file"},
{"foo/baz/subdir2", "new, no old job"},
{"foo/baz", "same, not a file"},
{"foo", "same, not a file"},
{"quu/bar/.file1.swp", "new, no old job"},
{"quu/bar/file1", "same, not a file"},
{"quu/bar/file2", "same, not a file"},
{"quu/foo/file1", "same, not a file"},
{"quu/foo/file3", "same, not a file"},
{"quu/foo", "same, not a file"},
{"quu", "same, not a file"},
{"quv/file1", "new, no old job"},
{"quv/file2", "new, no old job"},
{"quv", "new, no old job"},
{"yy", "same, not a file"},
{"zz/file1", "testPipeJob"},
{"zz/file2", "testPipeJob"},
{"zz", "testPipeJob"},
}
type testPipeJob struct {
path string
err error
fi os.FileInfo
res chan<- pipe.Result
}
func (j testPipeJob) Path() string { return j.path }
func (j testPipeJob) Fullpath() string { return j.path }
func (j testPipeJob) Error() error { return j.err }
func (j testPipeJob) Info() os.FileInfo { return j.fi }
func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
func testTreeWalker(ctx context.Context, out chan<- walk.TreeJob) {
for _, e := range treeJobs {
select {
case <-ctx.Done():
return
case out <- walk.TreeJob{Path: e}:
}
}
close(out)
}
func testPipeWalker(ctx context.Context, out chan<- pipe.Job) {
for _, e := range pipeJobs {
select {
case <-ctx.Done():
return
case out <- testPipeJob{path: e}:
}
}
close(out)
}
func TestArchivePipe(t *testing.T) {
ctx := context.TODO()
treeCh := make(chan walk.TreeJob)
pipeCh := make(chan pipe.Job)
go testTreeWalker(ctx, treeCh)
go testPipeWalker(ctx, pipeCh)
p := archivePipe{Old: treeCh, New: pipeCh}
ch := make(chan pipe.Job)
go p.compare(ctx, ch)
i := 0
for job := range ch {
if job.Path() != resultJobs[i].path {
t.Fatalf("wrong job received: wanted %v, got %v", resultJobs[i], job)
}
// switch j := job.(type) {
// case archivePipeJob:
// if j.action != resultJobs[i].action {
// t.Fatalf("wrong action for %v detected: wanted %q, got %q", job.Path(), resultJobs[i].action, j.action)
// }
// case testPipeJob:
// if resultJobs[i].action != "testPipeJob" {
// t.Fatalf("unexpected testPipeJob, expected %q: %v", resultJobs[i].action, j)
// }
// }
i++
}
}

View File

@@ -0,0 +1,314 @@
package archiver_test
import (
"bytes"
"context"
"io"
"testing"
"time"
"restic"
"restic/archiver"
"restic/checker"
"restic/crypto"
"restic/repository"
. "restic/test"
"restic/errors"
"github.com/restic/chunker"
)
var testPol = chunker.Pol(0x3DA3358B4DC173)
type Rdr interface {
io.ReadSeeker
io.ReaderAt
}
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
rd.Seek(0, 0)
ch := chunker.New(rd, testPol)
for {
chunk, err := ch.Next(buf)
if errors.Cause(err) == io.EOF {
break
}
OK(b, err)
// reduce length of buf
Assert(b, uint(len(chunk.Data)) == chunk.Length,
"invalid length: got %d, expected %d", len(chunk.Data), chunk.Length)
_, err = key.Encrypt(buf2, chunk.Data)
OK(b, err)
}
}
func BenchmarkChunkEncrypt(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
data := Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data)
buf := make([]byte, chunker.MaxSize)
buf2 := make([]byte, chunker.MaxSize)
b.ResetTimer()
b.SetBytes(int64(len(data)))
for i := 0; i < b.N; i++ {
benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key())
}
}
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
ch := chunker.New(rd, testPol)
for {
chunk, err := ch.Next(buf)
if errors.Cause(err) == io.EOF {
break
}
// reduce length of chunkBuf
key.Encrypt(chunk.Data, chunk.Data)
}
}
func BenchmarkChunkEncryptParallel(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
data := Random(23, 10<<20) // 10MiB
buf := make([]byte, chunker.MaxSize)
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rd := bytes.NewReader(data)
benchmarkChunkEncryptP(pb, buf, rd, repo.Key())
}
})
}
func archiveDirectory(b testing.TB) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
arch := archiver.New(repo)
_, id, err := arch.Snapshot(context.TODO(), nil, []string{BenchArchiveDirectory}, nil, "localhost", nil)
OK(b, err)
b.Logf("snapshot archived as %v", id)
}
func TestArchiveDirectory(t *testing.T) {
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiveDirectory")
}
archiveDirectory(t)
}
func BenchmarkArchiveDirectory(b *testing.B) {
if BenchArchiveDirectory == "" {
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
}
for i := 0; i < b.N; i++ {
archiveDirectory(b)
}
}
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
for range repo.Backend().List(context.TODO(), t) {
n++
}
return n
}
func archiveWithDedup(t testing.TB) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup")
}
var cnt struct {
before, after, after2 struct {
packs, dataBlobs, treeBlobs uint
}
}
// archive a few files
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats
cnt.before.packs = countPacks(repo, restic.DataFile)
cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
// archive the same files again, without parent snapshot
sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again
cnt.after.packs = countPacks(repo, restic.DataFile)
cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
// if there are more data blobs, something is wrong
if cnt.after.dataBlobs > cnt.before.dataBlobs {
t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
cnt.before.dataBlobs, cnt.after.dataBlobs)
}
// archive the same files again, with a parent snapshot
sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again
cnt.after2.packs = countPacks(repo, restic.DataFile)
cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
// if there are more data blobs, something is wrong
if cnt.after2.dataBlobs > cnt.before.dataBlobs {
t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
cnt.before.dataBlobs, cnt.after2.dataBlobs)
}
}
func TestArchiveDedup(t *testing.T) {
archiveWithDedup(t)
}
// Saves several identical chunks concurrently and later checks that there are no
// unreferenced packs in the repository. See also #292 and #358.
func TestParallelSaveWithDuplication(t *testing.T) {
for seed := 0; seed < 10; seed++ {
testParallelSaveWithDuplication(t, seed)
}
}
func testParallelSaveWithDuplication(t *testing.T, seed int) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
dataSizeMb := 128
duplication := 7
arch := archiver.New(repo)
chunks := getRandomData(seed, dataSizeMb*1024*1024)
errChannels := [](<-chan error){}
// interwoven processing of subsequent chunks
maxParallel := 2*duplication - 1
barrier := make(chan struct{}, maxParallel)
for _, c := range chunks {
for dupIdx := 0; dupIdx < duplication; dupIdx++ {
errChan := make(chan error)
errChannels = append(errChannels, errChan)
go func(c chunker.Chunk, errChan chan<- error) {
barrier <- struct{}{}
id := restic.Hash(c.Data)
time.Sleep(time.Duration(id[0]))
err := arch.Save(context.TODO(), restic.DataBlob, c.Data, id)
<-barrier
errChan <- err
}(c, errChan)
}
}
for _, errChan := range errChannels {
OK(t, <-errChan)
}
OK(t, repo.Flush())
OK(t, repo.SaveIndex(context.TODO()))
chkr := createAndInitChecker(t, repo)
assertNoUnreferencedPacks(t, chkr)
}
func getRandomData(seed int, size int) []chunker.Chunk {
buf := Random(seed, size)
var chunks []chunker.Chunk
chunker := chunker.New(bytes.NewReader(buf), testPol)
for {
c, err := chunker.Next(nil)
if errors.Cause(err) == io.EOF {
break
}
chunks = append(chunks, c)
}
return chunks
}
func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
return chkr
}
func assertNoUnreferencedPacks(t *testing.T, chkr *checker.Checker) {
errChan := make(chan error)
go chkr.Packs(context.TODO(), errChan)
for err := range errChan {
OK(t, err)
}
}
func TestArchiveEmptySnapshot(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := archiver.New(repo)
sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil)
if err == nil {
t.Errorf("expected error for empty snapshot, got nil")
}
if !id.IsNull() {
t.Errorf("expected null ID for empty snapshot, got %v", id.Str())
}
if sn != nil {
t.Errorf("expected null snapshot for empty snapshot, got %v", sn)
}
}

View File

@@ -0,0 +1,21 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View File

@@ -0,0 +1,17 @@
package archiver
import (
"context"
"restic"
"testing"
)
// TestSnapshot creates a new snapshot of path.
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := New(repo)
sn, _, err := arch.Snapshot(context.TODO(), nil, []string{path}, []string{"test"}, "localhost", parent)
if err != nil {
t.Fatal(err)
}
return sn
}

47
internal/backend.go Normal file
View File

@@ -0,0 +1,47 @@
package restic
import (
"context"
"io"
)
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a File with the name and type exists.
Test(ctx context.Context, h Handle) (bool, error)
// Remove removes a File with type t and name.
Remove(ctx context.Context, h Handle) error
// Close the backend
Close() error
// Save stores the data in the backend under the given handle.
Save(ctx context.Context, h Handle, rd io.Reader) error
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is larger than zero, only a portion of the file
// is returned. rd must be closed after use. If an error is returned, the
// ReadCloser must be nil.
Load(ctx context.Context, h Handle, length int, offset int64) (io.ReadCloser, error)
// Stat returns information about the File identified by h.
Stat(ctx context.Context, h Handle) (FileInfo, error)
// List returns a channel that yields all names of files of type t in an
// arbitrary order. A goroutine is started for this, which is stopped when
// ctx is cancelled.
List(ctx context.Context, t FileType) <-chan string
// IsNotExist returns true if the error was caused by a non-existing file
// in the backend.
IsNotExist(err error) bool
}
// FileInfo is returned by Stat() and contains information about a file in the
// backend.
type FileInfo struct{ Size int64 }

377
internal/backend/b2/b2.go Normal file
View File

@@ -0,0 +1,377 @@
package b2
import (
"context"
"io"
"path"
"restic"
"strings"
"restic/backend"
"restic/debug"
"restic/errors"
"github.com/kurin/blazer/b2"
)
// b2Backend is a backend which stores its data on Backblaze B2.
type b2Backend struct {
client *b2.Client
bucket *b2.Bucket
cfg Config
backend.Layout
sem *backend.Semaphore
}
// ensure statically that *b2Backend implements restic.Backend.
var _ restic.Backend = &b2Backend{}
func newClient(ctx context.Context, cfg Config) (*b2.Client, error) {
opts := []b2.ClientOption{b2.Transport(backend.Transport())}
c, err := b2.NewClient(ctx, cfg.AccountID, cfg.Key, opts...)
if err != nil {
return nil, errors.Wrap(err, "b2.NewClient")
}
return c, nil
}
// Open opens a connection to the B2 service.
func Open(cfg Config) (restic.Backend, error) {
debug.Log("cfg %#v", cfg)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
client, err := newClient(ctx, cfg)
if err != nil {
return nil, err
}
bucket, err := client.Bucket(ctx, cfg.Bucket)
if err != nil {
return nil, errors.Wrap(err, "Bucket")
}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
be := &b2Backend{
client: client,
bucket: bucket,
cfg: cfg,
Layout: &backend.DefaultLayout{
Join: path.Join,
Path: cfg.Prefix,
},
sem: sem,
}
return be, nil
}
// Create opens a connection to the B2 service. If the bucket does not exist yet,
// it is created.
func Create(cfg Config) (restic.Backend, error) {
debug.Log("cfg %#v", cfg)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
client, err := newClient(ctx, cfg)
if err != nil {
return nil, err
}
attr := b2.BucketAttrs{
Type: b2.Private,
}
bucket, err := client.NewBucket(ctx, cfg.Bucket, &attr)
if err != nil {
return nil, errors.Wrap(err, "NewBucket")
}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
be := &b2Backend{
client: client,
bucket: bucket,
cfg: cfg,
Layout: &backend.DefaultLayout{
Join: path.Join,
Path: cfg.Prefix,
},
sem: sem,
}
present, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if present {
return nil, errors.New("config already exists")
}
return be, nil
}
// Location returns the location for the backend.
func (be *b2Backend) Location() string {
return be.cfg.Bucket
}
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
type wrapReader struct {
io.ReadCloser
eofSeen bool
f func()
}
func (wr *wrapReader) Read(p []byte) (int, error) {
if wr.eofSeen {
return 0, io.EOF
}
n, err := wr.ReadCloser.Read(p)
if err == io.EOF {
wr.eofSeen = true
}
return n, err
}
func (wr *wrapReader) Close() error {
err := wr.ReadCloser.Close()
wr.f()
return err
}
// IsNotExist returns true if the error is caused by a non-existing file.
func (be *b2Backend) IsNotExist(err error) bool {
return b2.IsNotExist(errors.Cause(err))
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
ctx, cancel := context.WithCancel(ctx)
be.sem.GetToken()
name := be.Layout.Filename(h)
obj := be.bucket.Object(name)
if offset == 0 && length == 0 {
rd := obj.NewReader(ctx)
wrapper := &wrapReader{
ReadCloser: rd,
f: func() {
cancel()
be.sem.ReleaseToken()
},
}
return wrapper, nil
}
// pass a negative length to NewRangeReader so that the remainder of the
// file is read.
if length == 0 {
length = -1
}
rd := obj.NewRangeReader(ctx, offset, int64(length))
wrapper := &wrapReader{
ReadCloser: rd,
f: func() {
cancel()
be.sem.ReleaseToken()
},
}
return wrapper, nil
}
// Save stores data in the backend at the handle.
func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := h.Valid(); err != nil {
return err
}
be.sem.GetToken()
defer be.sem.ReleaseToken()
name := be.Filename(h)
debug.Log("Save %v, name %v", h, name)
obj := be.bucket.Object(name)
_, err = obj.Attrs(ctx)
if err == nil {
debug.Log(" %v already exists", h)
return errors.New("key already exists")
}
w := obj.NewWriter(ctx)
n, err := io.Copy(w, rd)
debug.Log(" saved %d bytes, err %v", n, err)
if err != nil {
_ = w.Close()
return errors.Wrap(err, "Copy")
}
return errors.Wrap(w.Close(), "Close")
}
// Stat returns information about a blob.
func (be *b2Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("Stat %v", h)
be.sem.GetToken()
defer be.sem.ReleaseToken()
name := be.Filename(h)
obj := be.bucket.Object(name)
info, err := obj.Attrs(ctx)
if err != nil {
debug.Log("Attrs() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: info.Size}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *b2Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
debug.Log("Test %v", h)
be.sem.GetToken()
defer be.sem.ReleaseToken()
found := false
name := be.Filename(h)
obj := be.bucket.Object(name)
info, err := obj.Attrs(ctx)
if err == nil && info != nil && info.Status == b2.Uploaded {
found = true
}
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *b2Backend) Remove(ctx context.Context, h restic.Handle) error {
debug.Log("Remove %v", h)
be.sem.GetToken()
defer be.sem.ReleaseToken()
obj := be.bucket.Object(be.Filename(h))
return errors.Wrap(obj.Delete(ctx), "Delete")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *b2Backend) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("List %v", t)
ch := make(chan string)
ctx, cancel := context.WithCancel(ctx)
be.sem.GetToken()
go func() {
defer close(ch)
defer cancel()
defer be.sem.ReleaseToken()
prefix := be.Dirname(restic.Handle{Type: t})
cur := &b2.Cursor{Prefix: prefix}
for {
objs, c, err := be.bucket.ListCurrentObjects(ctx, 1000, cur)
if err != nil && err != io.EOF {
return
}
for _, obj := range objs {
// Skip objects returned that do not have the specified prefix.
if !strings.HasPrefix(obj.Name(), prefix) {
continue
}
m := path.Base(obj.Name())
if m == "" {
continue
}
select {
case ch <- m:
case <-ctx.Done():
return
}
}
if err == io.EOF {
return
}
cur = c
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *b2Backend) removeKeys(ctx context.Context, t restic.FileType) error {
debug.Log("removeKeys %v", t)
for key := range be.List(ctx, t) {
err := be.Remove(ctx, restic.Handle{Type: t, Name: key})
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *b2Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(ctx, t)
if err != nil {
return nil
}
}
err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil && b2.IsNotExist(errors.Cause(err)) {
err = nil
}
return err
}
// Close does nothing
func (be *b2Backend) Close() error { return nil }

View File

@@ -0,0 +1,97 @@
package b2_test
import (
"context"
"fmt"
"os"
"testing"
"time"
"restic"
"restic/backend/b2"
"restic/backend/test"
. "restic/test"
)
func newB2TestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// do not use excessive data
MinimalData: true,
// wait for at most 10 seconds for removed files to disappear
WaitForDelayedRemoval: 10 * time.Second,
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
b2cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY"))
if err != nil {
return nil, err
}
cfg := b2cfg.(b2.Config)
cfg.AccountID = os.Getenv("RESTIC_TEST_B2_ACCOUNT_ID")
cfg.Key = os.Getenv("RESTIC_TEST_B2_ACCOUNT_KEY")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(b2.Config)
return b2.Create(cfg)
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(b2.Config)
return b2.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(b2.Config)
be, err := b2.Open(cfg)
if err != nil {
return err
}
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
return err
}
return nil
},
}
}
func testVars(t testing.TB) {
vars := []string{
"RESTIC_TEST_B2_ACCOUNT_ID",
"RESTIC_TEST_B2_ACCOUNT_KEY",
"RESTIC_TEST_B2_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
}
func TestBackendB2(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/b2.TestBackendB2")
}
}()
testVars(t)
newB2TestSuite(t).RunTests(t)
}
func BenchmarkBackendb2(t *testing.B) {
testVars(t)
newB2TestSuite(t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,93 @@
package b2
import (
"path"
"regexp"
"strings"
"restic/errors"
"restic/options"
)
// Config contains all configuration necessary to connect to an b2 compatible
// server.
type Config struct {
AccountID string
Key string
Bucket string
Prefix string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
// NewConfig returns a new config with default options applied.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
func init() {
options.Register("b2", Config{})
}
var bucketName = regexp.MustCompile("^[a-zA-Z0-9-]+$")
// checkBucketName tests the bucket name against the rules at
// https://help.backblaze.com/hc/en-us/articles/217666908-What-you-need-to-know-about-B2-Bucket-names
func checkBucketName(name string) error {
if name == "" {
return errors.New("bucket name is empty")
}
if len(name) < 6 {
return errors.New("bucket name is too short")
}
if len(name) > 50 {
return errors.New("bucket name is too long")
}
if !bucketName.MatchString(name) {
return errors.New("bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)")
}
return nil
}
// ParseConfig parses the string s and extracts the b2 config. The supported
// configuration format is b2:bucketname/prefix. If no prefix is given the
// prefix "restic" will be used.
func ParseConfig(s string) (interface{}, error) {
if !strings.HasPrefix(s, "b2:") {
return nil, errors.New("invalid format, want: b2:bucket-name[:path]")
}
s = s[3:]
data := strings.SplitN(s, ":", 2)
if len(data) == 0 || len(data[0]) == 0 {
return nil, errors.New("bucket name not found")
}
cfg := NewConfig()
cfg.Bucket = data[0]
if err := checkBucketName(cfg.Bucket); err != nil {
return nil, err
}
if len(data) == 2 {
p := data[1]
if len(p) > 0 {
p = path.Clean(p)
}
if len(p) > 0 && path.IsAbs(p) {
p = p[1:]
}
cfg.Prefix = p
}
return cfg, nil
}

View File

@@ -0,0 +1,92 @@
package b2
import "testing"
var configTests = []struct {
s string
cfg Config
}{
{"b2:bucketname", Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
{"b2:bucketname:", Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
{"b2:bucketname:/prefix/directory", Config{
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
{"b2:foobar", Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
{"b2:foobar:", Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
{"b2:foobar:/", Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
}
func TestParseConfig(t *testing.T) {
for _, test := range configTests {
t.Run("", func(t *testing.T) {
cfg, err := ParseConfig(test.s)
if err != nil {
t.Fatalf("%s failed: %v", test.s, err)
}
if cfg != test.cfg {
t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v",
test.s, test.cfg, cfg)
}
})
}
}
var invalidConfigTests = []struct {
s string
err string
}{
{
"b2",
"invalid format, want: b2:bucket-name[:path]",
},
{
"b2:",
"bucket name not found",
},
{
"b2:bucket_name",
"bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)",
},
{
"b2:bucketname/prefix/directory/",
"bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)",
},
}
func TestInvalidConfig(t *testing.T) {
for _, test := range invalidConfigTests {
t.Run("", func(t *testing.T) {
cfg, err := ParseConfig(test.s)
if err == nil {
t.Fatalf("expected error not found for invalid config: %v, cfg is:\n%#v", test.s, cfg)
}
if err.Error() != test.err {
t.Fatalf("unexpected error found, want:\n %v\ngot:\n %v", test.err, err.Error())
}
})
}
}

4
internal/backend/doc.go Normal file
View File

@@ -0,0 +1,4 @@
// Package backend provides local and remote storage for restic repositories.
// All backends need to implement the Backend interface. There is a MemBackend,
// which stores all data in a map internally and can be used for testing.
package backend

View File

@@ -0,0 +1,29 @@
package backend
import (
"net"
"net/http"
"restic/debug"
"time"
)
// Transport returns a new http.RoundTripper with default settings applied.
func Transport() http.RoundTripper {
// copied from net/http
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// wrap in the debug round tripper
return debug.RoundTripper(tr)
}

167
internal/backend/layout.go Normal file
View File

@@ -0,0 +1,167 @@
package backend
import (
"fmt"
"os"
"path/filepath"
"regexp"
"restic"
"restic/debug"
"restic/errors"
"restic/fs"
)
// Layout computes paths for file name storage.
type Layout interface {
Filename(restic.Handle) string
Dirname(restic.Handle) string
Basedir(restic.FileType) string
Paths() []string
Name() string
}
// Filesystem is the abstraction of a file system used for a backend.
type Filesystem interface {
Join(...string) string
ReadDir(string) ([]os.FileInfo, error)
IsNotExist(error) bool
}
// ensure statically that *LocalFilesystem implements Filesystem.
var _ Filesystem = &LocalFilesystem{}
// LocalFilesystem implements Filesystem in a local path.
type LocalFilesystem struct {
}
// ReadDir returns all entries of a directory.
func (l *LocalFilesystem) ReadDir(dir string) ([]os.FileInfo, error) {
f, err := fs.Open(dir)
if err != nil {
return nil, err
}
entries, err := f.Readdir(-1)
if err != nil {
return nil, errors.Wrap(err, "Readdir")
}
err = f.Close()
if err != nil {
return nil, errors.Wrap(err, "Close")
}
return entries, nil
}
// Join combines several path components to one.
func (l *LocalFilesystem) Join(paths ...string) string {
return filepath.Join(paths...)
}
// IsNotExist returns true for errors that are caused by not existing files.
func (l *LocalFilesystem) IsNotExist(err error) bool {
return os.IsNotExist(err)
}
var backendFilenameLength = len(restic.ID{}) * 2
var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength))
func hasBackendFile(fs Filesystem, dir string) (bool, error) {
entries, err := fs.ReadDir(dir)
if err != nil && fs.IsNotExist(errors.Cause(err)) {
return false, nil
}
if err != nil {
return false, errors.Wrap(err, "ReadDir")
}
for _, e := range entries {
if backendFilename.MatchString(e.Name()) {
return true, nil
}
}
return false, nil
}
// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout
// cannot be detected automatically.
var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed")
// DetectLayout tries to find out which layout is used in a local (or sftp)
// filesystem at the given path. If repo is nil, an instance of LocalFilesystem
// is used.
func DetectLayout(repo Filesystem, dir string) (Layout, error) {
debug.Log("detect layout at %v", dir)
if repo == nil {
repo = &LocalFilesystem{}
}
// key file in the "keys" dir (DefaultLayout)
foundKeysFile, err := hasBackendFile(repo, repo.Join(dir, defaultLayoutPaths[restic.KeyFile]))
if err != nil {
return nil, err
}
// key file in the "key" dir (S3LegacyLayout)
foundKeyFile, err := hasBackendFile(repo, repo.Join(dir, s3LayoutPaths[restic.KeyFile]))
if err != nil {
return nil, err
}
if foundKeysFile && !foundKeyFile {
debug.Log("found default layout at %v", dir)
return &DefaultLayout{
Path: dir,
Join: repo.Join,
}, nil
}
if foundKeyFile && !foundKeysFile {
debug.Log("found s3 layout at %v", dir)
return &S3LegacyLayout{
Path: dir,
Join: repo.Join,
}, nil
}
debug.Log("layout detection failed")
return nil, ErrLayoutDetectionFailed
}
// ParseLayout parses the config string and returns a Layout. When layout is
// the empty string, DetectLayout is used. If that fails, defaultLayout is used.
func ParseLayout(repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) {
debug.Log("parse layout string %q for backend at %v", layout, path)
switch layout {
case "default":
l = &DefaultLayout{
Path: path,
Join: repo.Join,
}
case "s3legacy":
l = &S3LegacyLayout{
Path: path,
Join: repo.Join,
}
case "":
l, err = DetectLayout(repo, path)
// use the default layout if auto detection failed
if errors.Cause(err) == ErrLayoutDetectionFailed && defaultLayout != "" {
debug.Log("error: %v, use default layout %v", err, defaultLayout)
return ParseLayout(repo, defaultLayout, "", path)
}
if err != nil {
return nil, err
}
debug.Log("layout detected: %v", l)
default:
return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout)
}
return l, nil
}

View File

@@ -0,0 +1,73 @@
package backend
import (
"encoding/hex"
"restic"
)
// DefaultLayout implements the default layout for local and sftp backends, as
// described in the Design document. The `data` directory has one level of
// subdirs, two characters each (taken from the first two characters of the
// file name).
type DefaultLayout struct {
Path string
Join func(...string) string
}
var defaultLayoutPaths = map[restic.FileType]string{
restic.DataFile: "data",
restic.SnapshotFile: "snapshots",
restic.IndexFile: "index",
restic.LockFile: "locks",
restic.KeyFile: "keys",
}
func (l *DefaultLayout) String() string {
return "<DefaultLayout>"
}
// Name returns the name for this layout.
func (l *DefaultLayout) Name() string {
return "default"
}
// Dirname returns the directory path for a given file type and name.
func (l *DefaultLayout) Dirname(h restic.Handle) string {
p := defaultLayoutPaths[h.Type]
if h.Type == restic.DataFile && len(h.Name) > 2 {
p = l.Join(p, h.Name[:2]) + "/"
}
return l.Join(l.Path, p) + "/"
}
// Filename returns a path to a file, including its name.
func (l *DefaultLayout) Filename(h restic.Handle) string {
name := h.Name
if h.Type == restic.ConfigFile {
return l.Join(l.Path, "config")
}
return l.Join(l.Dirname(h), name)
}
// Paths returns all directory names needed for a repo.
func (l *DefaultLayout) Paths() (dirs []string) {
for _, p := range defaultLayoutPaths {
dirs = append(dirs, l.Join(l.Path, p))
}
// also add subdirs
for i := 0; i < 256; i++ {
subdir := hex.EncodeToString([]byte{byte(i)})
dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[restic.DataFile], subdir))
}
return dirs
}
// Basedir returns the base dir name for type t.
func (l *DefaultLayout) Basedir(t restic.FileType) string {
return l.Join(l.Path, defaultLayoutPaths[t])
}

View File

@@ -0,0 +1,54 @@
package backend
import "restic"
// RESTLayout implements the default layout for the REST protocol.
type RESTLayout struct {
URL string
Path string
Join func(...string) string
}
var restLayoutPaths = defaultLayoutPaths
func (l *RESTLayout) String() string {
return "<RESTLayout>"
}
// Name returns the name for this layout.
func (l *RESTLayout) Name() string {
return "rest"
}
// Dirname returns the directory path for a given file type and name.
func (l *RESTLayout) Dirname(h restic.Handle) string {
if h.Type == restic.ConfigFile {
return l.URL + l.Join(l.Path, "/")
}
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/"
}
// Filename returns a path to a file, including its name.
func (l *RESTLayout) Filename(h restic.Handle) string {
name := h.Name
if h.Type == restic.ConfigFile {
name = "config"
}
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name)
}
// Paths returns all directory names
func (l *RESTLayout) Paths() (dirs []string) {
for _, p := range restLayoutPaths {
dirs = append(dirs, l.URL+l.Join(l.Path, p))
}
return dirs
}
// Basedir returns the base dir name for files of type t.
func (l *RESTLayout) Basedir(t restic.FileType) string {
return l.URL + l.Join(l.Path, restLayoutPaths[t])
}

View File

@@ -0,0 +1,77 @@
package backend
import "restic"
// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as
// described in the Design document.
type S3LegacyLayout struct {
URL string
Path string
Join func(...string) string
}
var s3LayoutPaths = map[restic.FileType]string{
restic.DataFile: "data",
restic.SnapshotFile: "snapshot",
restic.IndexFile: "index",
restic.LockFile: "lock",
restic.KeyFile: "key",
}
func (l *S3LegacyLayout) String() string {
return "<S3LegacyLayout>"
}
// Name returns the name for this layout.
func (l *S3LegacyLayout) Name() string {
return "s3legacy"
}
// join calls Join with the first empty elements removed.
func (l *S3LegacyLayout) join(url string, items ...string) string {
for len(items) > 0 && items[0] == "" {
items = items[1:]
}
path := l.Join(items...)
if path == "" || path[0] != '/' {
if url != "" && url[len(url)-1] != '/' {
url += "/"
}
}
return url + path
}
// Dirname returns the directory path for a given file type and name.
func (l *S3LegacyLayout) Dirname(h restic.Handle) string {
if h.Type == restic.ConfigFile {
return l.URL + l.Join(l.Path, "/")
}
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/"
}
// Filename returns a path to a file, including its name.
func (l *S3LegacyLayout) Filename(h restic.Handle) string {
name := h.Name
if h.Type == restic.ConfigFile {
name = "config"
}
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name)
}
// Paths returns all directory names
func (l *S3LegacyLayout) Paths() (dirs []string) {
for _, p := range s3LayoutPaths {
dirs = append(dirs, l.Join(l.Path, p))
}
return dirs
}
// Basedir returns the base dir name for type t.
func (l *S3LegacyLayout) Basedir(t restic.FileType) string {
return l.Join(l.Path, s3LayoutPaths[t])
}

View File

@@ -0,0 +1,449 @@
package backend
import (
"fmt"
"path"
"path/filepath"
"reflect"
"restic"
. "restic/test"
"sort"
"testing"
)
func TestDefaultLayout(t *testing.T) {
tempdir, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
path string
join func(...string) string
restic.Handle
filename string
}{
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.DataFile, Name: "0123456"},
filepath.Join(tempdir, "data", "01", "0123456"),
},
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.ConfigFile, Name: "CFG"},
filepath.Join(tempdir, "config"),
},
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.SnapshotFile, Name: "123456"},
filepath.Join(tempdir, "snapshots", "123456"),
},
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.IndexFile, Name: "123456"},
filepath.Join(tempdir, "index", "123456"),
},
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.LockFile, Name: "123456"},
filepath.Join(tempdir, "locks", "123456"),
},
{
tempdir,
filepath.Join,
restic.Handle{Type: restic.KeyFile, Name: "123456"},
filepath.Join(tempdir, "keys", "123456"),
},
{
"",
path.Join,
restic.Handle{Type: restic.DataFile, Name: "0123456"},
"data/01/0123456",
},
{
"",
path.Join,
restic.Handle{Type: restic.ConfigFile, Name: "CFG"},
"config",
},
{
"",
path.Join,
restic.Handle{Type: restic.SnapshotFile, Name: "123456"},
"snapshots/123456",
},
{
"",
path.Join,
restic.Handle{Type: restic.IndexFile, Name: "123456"},
"index/123456",
},
{
"",
path.Join,
restic.Handle{Type: restic.LockFile, Name: "123456"},
"locks/123456",
},
{
"",
path.Join,
restic.Handle{Type: restic.KeyFile, Name: "123456"},
"keys/123456",
},
}
t.Run("Paths", func(t *testing.T) {
l := &DefaultLayout{
Path: tempdir,
Join: filepath.Join,
}
dirs := l.Paths()
want := []string{
filepath.Join(tempdir, "data"),
filepath.Join(tempdir, "snapshots"),
filepath.Join(tempdir, "index"),
filepath.Join(tempdir, "locks"),
filepath.Join(tempdir, "keys"),
}
for i := 0; i < 256; i++ {
want = append(want, filepath.Join(tempdir, "data", fmt.Sprintf("%02x", i)))
}
sort.Sort(sort.StringSlice(want))
sort.Sort(sort.StringSlice(dirs))
if !reflect.DeepEqual(dirs, want) {
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
}
})
for _, test := range tests {
t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) {
l := &DefaultLayout{
Path: test.path,
Join: test.join,
}
filename := l.Filename(test.Handle)
if filename != test.filename {
t.Fatalf("wrong filename, want %v, got %v", test.filename, filename)
}
})
}
}
func TestRESTLayout(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
restic.Handle
filename string
}{
{
restic.Handle{Type: restic.DataFile, Name: "0123456"},
filepath.Join(path, "data", "0123456"),
},
{
restic.Handle{Type: restic.ConfigFile, Name: "CFG"},
filepath.Join(path, "config"),
},
{
restic.Handle{Type: restic.SnapshotFile, Name: "123456"},
filepath.Join(path, "snapshots", "123456"),
},
{
restic.Handle{Type: restic.IndexFile, Name: "123456"},
filepath.Join(path, "index", "123456"),
},
{
restic.Handle{Type: restic.LockFile, Name: "123456"},
filepath.Join(path, "locks", "123456"),
},
{
restic.Handle{Type: restic.KeyFile, Name: "123456"},
filepath.Join(path, "keys", "123456"),
},
}
l := &RESTLayout{
Path: path,
Join: filepath.Join,
}
t.Run("Paths", func(t *testing.T) {
dirs := l.Paths()
want := []string{
filepath.Join(path, "data"),
filepath.Join(path, "snapshots"),
filepath.Join(path, "index"),
filepath.Join(path, "locks"),
filepath.Join(path, "keys"),
}
sort.Sort(sort.StringSlice(want))
sort.Sort(sort.StringSlice(dirs))
if !reflect.DeepEqual(dirs, want) {
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
}
})
for _, test := range tests {
t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) {
filename := l.Filename(test.Handle)
if filename != test.filename {
t.Fatalf("wrong filename, want %v, got %v", test.filename, filename)
}
})
}
}
func TestRESTLayoutURLs(t *testing.T) {
var tests = []struct {
l Layout
h restic.Handle
fn string
dir string
}{
{
&RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"},
"https://hostname.foo/data/foobar",
"https://hostname.foo/data/",
},
{
&RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
restic.Handle{Type: restic.LockFile, Name: "foobar"},
"https://hostname.foo:1234/prefix/repo/locks/foobar",
"https://hostname.foo:1234/prefix/repo/locks/",
},
{
&RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
restic.Handle{Type: restic.ConfigFile, Name: "foobar"},
"https://hostname.foo:1234/prefix/repo/config",
"https://hostname.foo:1234/prefix/repo/",
},
{
&S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"},
"https://hostname.foo/data/foobar",
"https://hostname.foo/data/",
},
{
&S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "", Join: path.Join},
restic.Handle{Type: restic.LockFile, Name: "foobar"},
"https://hostname.foo:1234/prefix/repo/lock/foobar",
"https://hostname.foo:1234/prefix/repo/lock/",
},
{
&S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
restic.Handle{Type: restic.ConfigFile, Name: "foobar"},
"https://hostname.foo:1234/prefix/repo/config",
"https://hostname.foo:1234/prefix/repo/",
},
{
&S3LegacyLayout{URL: "", Path: "", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"},
"data/foobar",
"data/",
},
{
&S3LegacyLayout{URL: "", Path: "", Join: path.Join},
restic.Handle{Type: restic.LockFile, Name: "foobar"},
"lock/foobar",
"lock/",
},
{
&S3LegacyLayout{URL: "", Path: "/", Join: path.Join},
restic.Handle{Type: restic.ConfigFile, Name: "foobar"},
"/config",
"/",
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%T", test.l), func(t *testing.T) {
fn := test.l.Filename(test.h)
if fn != test.fn {
t.Fatalf("wrong filename, want %v, got %v", test.fn, fn)
}
dir := test.l.Dirname(test.h)
if dir != test.dir {
t.Fatalf("wrong dirname, want %v, got %v", test.dir, dir)
}
})
}
}
func TestS3LegacyLayout(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
restic.Handle
filename string
}{
{
restic.Handle{Type: restic.DataFile, Name: "0123456"},
filepath.Join(path, "data", "0123456"),
},
{
restic.Handle{Type: restic.ConfigFile, Name: "CFG"},
filepath.Join(path, "config"),
},
{
restic.Handle{Type: restic.SnapshotFile, Name: "123456"},
filepath.Join(path, "snapshot", "123456"),
},
{
restic.Handle{Type: restic.IndexFile, Name: "123456"},
filepath.Join(path, "index", "123456"),
},
{
restic.Handle{Type: restic.LockFile, Name: "123456"},
filepath.Join(path, "lock", "123456"),
},
{
restic.Handle{Type: restic.KeyFile, Name: "123456"},
filepath.Join(path, "key", "123456"),
},
}
l := &S3LegacyLayout{
Path: path,
Join: filepath.Join,
}
t.Run("Paths", func(t *testing.T) {
dirs := l.Paths()
want := []string{
filepath.Join(path, "data"),
filepath.Join(path, "snapshot"),
filepath.Join(path, "index"),
filepath.Join(path, "lock"),
filepath.Join(path, "key"),
}
sort.Sort(sort.StringSlice(want))
sort.Sort(sort.StringSlice(dirs))
if !reflect.DeepEqual(dirs, want) {
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
}
})
for _, test := range tests {
t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) {
filename := l.Filename(test.Handle)
if filename != test.filename {
t.Fatalf("wrong filename, want %v, got %v", test.filename, filename)
}
})
}
}
func TestDetectLayout(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
filename string
want string
}{
{"repo-layout-default.tar.gz", "*backend.DefaultLayout"},
{"repo-layout-s3legacy.tar.gz", "*backend.S3LegacyLayout"},
}
var fs = &LocalFilesystem{}
for _, test := range tests {
for _, fs := range []Filesystem{fs, nil} {
t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) {
SetupTarTestFixture(t, path, filepath.Join("testdata", test.filename))
layout, err := DetectLayout(fs, filepath.Join(path, "repo"))
if err != nil {
t.Fatal(err)
}
if layout == nil {
t.Fatal("wanted some layout, but detect returned nil")
}
layoutName := fmt.Sprintf("%T", layout)
if layoutName != test.want {
t.Fatalf("want layout %v, got %v", test.want, layoutName)
}
RemoveAll(t, filepath.Join(path, "repo"))
})
}
}
}
func TestParseLayout(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
layoutName string
defaultLayoutName string
want string
}{
{"default", "", "*backend.DefaultLayout"},
{"s3legacy", "", "*backend.S3LegacyLayout"},
{"", "", "*backend.DefaultLayout"},
}
SetupTarTestFixture(t, path, filepath.Join("testdata", "repo-layout-default.tar.gz"))
for _, test := range tests {
t.Run(test.layoutName, func(t *testing.T) {
layout, err := ParseLayout(&LocalFilesystem{}, test.layoutName, test.defaultLayoutName, filepath.Join(path, "repo"))
if err != nil {
t.Fatal(err)
}
if layout == nil {
t.Fatal("wanted some layout, but detect returned nil")
}
// test that the functions work (and don't panic)
_ = layout.Dirname(restic.Handle{Type: restic.DataFile})
_ = layout.Filename(restic.Handle{Type: restic.DataFile, Name: "1234"})
_ = layout.Paths()
layoutName := fmt.Sprintf("%T", layout)
if layoutName != test.want {
t.Fatalf("want layout %v, got %v", test.want, layoutName)
}
})
}
}
func TestParseLayoutInvalid(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var invalidNames = []string{
"foo", "bar", "local",
}
for _, name := range invalidNames {
t.Run(name, func(t *testing.T) {
layout, err := ParseLayout(nil, name, "", path)
if err == nil {
t.Fatalf("expected error not found for layout name %v, layout is %v", name, layout)
}
})
}
}

View File

@@ -0,0 +1,27 @@
package local
import (
"strings"
"restic/errors"
"restic/options"
)
// Config holds all information needed to open a local repository.
type Config struct {
Path string
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"`
}
func init() {
options.Register("local", Config{})
}
// ParseConfig parses a local backend config.
func ParseConfig(cfg string) (interface{}, error) {
if !strings.HasPrefix(cfg, "local:") {
return nil, errors.New(`invalid format, prefix "local" not found`)
}
return Config{Path: cfg[6:]}, nil
}

View File

@@ -0,0 +1,2 @@
// Package local implements repository storage in a local directory.
package local

View File

@@ -0,0 +1,80 @@
package local
import (
"context"
"path/filepath"
"restic"
. "restic/test"
"testing"
)
func TestLayout(t *testing.T) {
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
filename string
layout string
failureExpected bool
datafiles map[string]bool
}{
{"repo-layout-default.tar.gz", "", false, map[string]bool{
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
}},
{"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
}},
}
for _, test := range tests {
t.Run(test.filename, func(t *testing.T) {
SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename))
repo := filepath.Join(path, "repo")
be, err := Open(Config{
Path: repo,
Layout: test.layout,
})
if err != nil {
t.Fatal(err)
}
if be == nil {
t.Fatalf("Open() returned nil but no error")
}
datafiles := make(map[string]bool)
for id := range be.List(context.TODO(), restic.DataFile) {
datafiles[id] = false
}
if len(datafiles) == 0 {
t.Errorf("List() returned zero data files")
}
for id := range test.datafiles {
if _, ok := datafiles[id]; !ok {
t.Errorf("datafile with id %v not found", id)
}
datafiles[id] = true
}
for id, v := range datafiles {
if !v {
t.Errorf("unexpected id %v found", id)
}
}
if err = be.Close(); err != nil {
t.Errorf("Close() returned error %v", err)
}
RemoveAll(t, filepath.Join(path, "repo"))
})
}
}

View File

@@ -0,0 +1,256 @@
package local
import (
"context"
"io"
"os"
"path/filepath"
"restic"
"restic/errors"
"restic/backend"
"restic/debug"
"restic/fs"
)
// Local is a backend in a local directory.
type Local struct {
Config
backend.Layout
}
// ensure statically that *Local implements restic.Backend.
var _ restic.Backend = &Local{}
const defaultLayout = "default"
// Open opens the local backend as specified by config.
func Open(cfg Config) (*Local, error) {
debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout)
l, err := backend.ParseLayout(&backend.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path)
if err != nil {
return nil, err
}
be := &Local{Config: cfg, Layout: l}
// create paths for data and refs. MkdirAll does nothing if the directory already exists.
for _, d := range be.Paths() {
err := fs.MkdirAll(d, backend.Modes.Dir)
if err != nil {
return nil, errors.Wrap(err, "MkdirAll")
}
}
return be, nil
}
// Create creates all the necessary files and directories for a new local
// backend at dir. Afterwards a new config blob should be created.
func Create(cfg Config) (*Local, error) {
debug.Log("create local backend at %v (layout %q)", cfg.Path, cfg.Layout)
l, err := backend.ParseLayout(&backend.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path)
if err != nil {
return nil, err
}
be := &Local{
Config: cfg,
Layout: l,
}
// test if config file already exists
_, err = fs.Lstat(be.Filename(restic.Handle{Type: restic.ConfigFile}))
if err == nil {
return nil, errors.New("config file already exists")
}
// create paths for data and refs
for _, d := range be.Paths() {
err := fs.MkdirAll(d, backend.Modes.Dir)
if err != nil {
return nil, errors.Wrap(err, "MkdirAll")
}
}
return be, nil
}
// Location returns this backend's location (the directory name).
func (b *Local) Location() string {
return b.Path
}
// IsNotExist returns true if the error is caused by a non existing file.
func (b *Local) IsNotExist(err error) bool {
return os.IsNotExist(errors.Cause(err))
}
// Save stores data in the backend at the handle.
func (b *Local) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
debug.Log("Save %v", h)
if err := h.Valid(); err != nil {
return err
}
filename := b.Filename(h)
// create new file
f, err := fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File)
if err != nil {
return errors.Wrap(err, "OpenFile")
}
// save data, then sync
_, err = io.Copy(f, rd)
if err != nil {
_ = f.Close()
return errors.Wrap(err, "Write")
}
if err = f.Sync(); err != nil {
_ = f.Close()
return errors.Wrap(err, "Sync")
}
err = f.Close()
if err != nil {
return errors.Wrap(err, "Close")
}
// set mode to read-only
fi, err := fs.Stat(filename)
if err != nil {
return errors.Wrap(err, "Stat")
}
return setNewFileMode(filename, fi)
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (b *Local) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
f, err := fs.Open(b.Filename(h))
if err != nil {
return nil, err
}
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
_ = f.Close()
return nil, err
}
}
if length > 0 {
return backend.LimitReadCloser(f, int64(length)), nil
}
return f, nil
}
// Stat returns information about a blob.
func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
debug.Log("Stat %v", h)
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
fi, err := fs.Stat(b.Filename(h))
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: fi.Size()}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(ctx context.Context, h restic.Handle) (bool, error) {
debug.Log("Test %v", h)
_, err := fs.Stat(b.Filename(h))
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
return false, nil
}
return false, errors.Wrap(err, "Stat")
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *Local) Remove(ctx context.Context, h restic.Handle) error {
debug.Log("Remove %v", h)
fn := b.Filename(h)
// reset read-only flag
err := fs.Chmod(fn, 0666)
if err != nil {
return errors.Wrap(err, "Chmod")
}
return fs.Remove(fn)
}
func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this.
func (b *Local) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("List %v", t)
ch := make(chan string)
go func() {
defer close(ch)
fs.Walk(b.Basedir(t), func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !isFile(fi) {
return err
}
select {
case ch <- filepath.Base(path):
case <-ctx.Done():
return err
}
return err
})
}()
return ch
}
// Delete removes the repository and all files.
func (b *Local) Delete() error {
debug.Log("Delete()")
return fs.RemoveAll(b.Path)
}
// Close closes all open files.
func (b *Local) Close() error {
debug.Log("Close()")
// this does not need to do anything, all open files are closed within the
// same function.
return nil
}

View File

@@ -0,0 +1,61 @@
package local_test
import (
"io/ioutil"
"restic"
"testing"
"restic/backend/local"
"restic/backend/test"
. "restic/test"
)
func newTestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
dir, err := ioutil.TempDir(TestTempDir, "restic-test-local-")
if err != nil {
t.Fatal(err)
}
t.Logf("create new backend at %v", dir)
cfg := local.Config{
Path: dir,
}
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(local.Config)
return local.Create(cfg)
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(local.Config)
return local.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(local.Config)
if !TestCleanupTempDirs {
t.Logf("leaving test backend dir at %v", cfg.Path)
}
RemoveAll(t, cfg.Path)
return nil
},
}
}
func TestBackend(t *testing.T) {
newTestSuite(t).RunTests(t)
}
func BenchmarkBackend(t *testing.B) {
newTestSuite(t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,13 @@
// +build !windows
package local
import (
"os"
"restic/fs"
)
// set file to readonly
func setNewFileMode(f string, fi os.FileInfo) error {
return fs.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
}

View File

@@ -0,0 +1,12 @@
package local
import (
"os"
)
// We don't modify read-only on windows,
// since it will make us unable to delete the file,
// and this isn't common practice on this platform.
func setNewFileMode(f string, fi os.FileInfo) error {
return nil
}

View File

@@ -0,0 +1,107 @@
// Package location implements parsing the restic repository location from a string.
package location
import (
"strings"
"restic/backend/b2"
"restic/backend/local"
"restic/backend/rest"
"restic/backend/s3"
"restic/backend/sftp"
"restic/backend/swift"
"restic/errors"
)
// Location specifies the location of a repository, including the method of
// access and (possibly) credentials needed for access.
type Location struct {
Scheme string
Config interface{}
}
type parser struct {
scheme string
parse func(string) (interface{}, error)
}
// parsers is a list of valid config parsers for the backends. The first parser
// is the fallback and should always be set to the local backend.
var parsers = []parser{
{"b2", b2.ParseConfig},
{"local", local.ParseConfig},
{"sftp", sftp.ParseConfig},
{"s3", s3.ParseConfig},
{"swift", swift.ParseConfig},
{"rest", rest.ParseConfig},
}
func isPath(s string) bool {
if strings.HasPrefix(s, "../") || strings.HasPrefix(s, `..\`) {
return true
}
if strings.HasPrefix(s, "/") || strings.HasPrefix(s, `\`) {
return true
}
if len(s) < 3 {
return false
}
// check for drive paths
drive := s[0]
if !(drive >= 'a' && drive <= 'z') && !(drive >= 'A' && drive <= 'Z') {
return false
}
if s[1] != ':' {
return false
}
if s[2] != '\\' && s[2] != '/' {
return false
}
return true
}
// Parse extracts repository location information from the string s. If s
// starts with a backend name followed by a colon, that backend's Parse()
// function is called. Otherwise, the local backend is used which interprets s
// as the name of a directory.
func Parse(s string) (u Location, err error) {
scheme := extractScheme(s)
u.Scheme = scheme
for _, parser := range parsers {
if parser.scheme != scheme {
continue
}
u.Config, err = parser.parse(s)
if err != nil {
return Location{}, err
}
return u, nil
}
// if s is not a path or contains ":", it's ambiguous
if !isPath(s) && strings.ContainsRune(s, ':') {
return Location{}, errors.New("invalid backend\nIf the repo is in a local directory, you need to add a `local:` prefix")
}
u.Scheme = "local"
u.Config, err = local.ParseConfig("local:" + s)
if err != nil {
return Location{}, err
}
return u, nil
}
func extractScheme(s string) string {
data := strings.SplitN(s, ":", 2)
return data[0]
}

View File

@@ -0,0 +1,339 @@
package location
import (
"net/url"
"reflect"
"testing"
"restic/backend/b2"
"restic/backend/local"
"restic/backend/rest"
"restic/backend/s3"
"restic/backend/sftp"
"restic/backend/swift"
)
func parseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
return u
}
var parseTests = []struct {
s string
u Location
}{
{
"local:/srv/repo",
Location{Scheme: "local",
Config: local.Config{
Path: "/srv/repo",
},
},
},
{
"local:dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "dir1/dir2",
},
},
},
{
"local:dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "dir1/dir2",
},
},
},
{
"dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "dir1/dir2",
},
},
},
{
"/dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "/dir1/dir2",
},
},
},
{
"local:../dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "../dir1/dir2",
},
},
},
{
"/dir1/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "/dir1/dir2",
},
},
},
{
"/dir1:foobar/dir2",
Location{Scheme: "local",
Config: local.Config{
Path: "/dir1:foobar/dir2",
},
},
},
{
`\dir1\foobar\dir2`,
Location{Scheme: "local",
Config: local.Config{
Path: `\dir1\foobar\dir2`,
},
},
},
{
`c:\dir1\foobar\dir2`,
Location{Scheme: "local",
Config: local.Config{
Path: `c:\dir1\foobar\dir2`,
},
},
},
{
`C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
Location{Scheme: "local",
Config: local.Config{
Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
},
},
},
{
`c:/dir1/foobar/dir2`,
Location{Scheme: "local",
Config: local.Config{
Path: `c:/dir1/foobar/dir2`,
},
},
},
{
"sftp:user@host:/srv/repo",
Location{Scheme: "sftp",
Config: sftp.Config{
User: "user",
Host: "host",
Path: "/srv/repo",
},
},
},
{
"sftp:host:/srv/repo",
Location{Scheme: "sftp",
Config: sftp.Config{
User: "",
Host: "host",
Path: "/srv/repo",
},
},
},
{
"sftp://user@host/srv/repo",
Location{Scheme: "sftp",
Config: sftp.Config{
User: "user",
Host: "host",
Path: "srv/repo",
},
},
},
{
"sftp://user@host//srv/repo",
Location{Scheme: "sftp",
Config: sftp.Config{
User: "user",
Host: "host",
Path: "/srv/repo",
},
},
},
{
"s3://eu-central-1/bucketname",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 5,
},
},
},
{
"s3://hostname.foo/bucketname",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "hostname.foo",
Bucket: "bucketname",
Prefix: "restic",
Connections: 5,
},
},
},
{
"s3://hostname.foo/bucketname/prefix/directory",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "hostname.foo",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
},
},
},
{
"s3:eu-central-1/repo",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "eu-central-1",
Bucket: "repo",
Prefix: "restic",
Connections: 5,
},
},
},
{
"s3:eu-central-1/repo/prefix/directory",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "eu-central-1",
Bucket: "repo",
Prefix: "prefix/directory",
Connections: 5,
},
},
},
{
"s3:https://hostname.foo/repo",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "hostname.foo",
Bucket: "repo",
Prefix: "restic",
Connections: 5,
},
},
},
{
"s3:https://hostname.foo/repo/prefix/directory",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "hostname.foo",
Bucket: "repo",
Prefix: "prefix/directory",
Connections: 5,
},
},
},
{
"s3:http://hostname.foo/repo",
Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "hostname.foo",
Bucket: "repo",
Prefix: "restic",
UseHTTP: true,
Connections: 5,
},
},
},
{
"swift:container17:/",
Location{Scheme: "swift",
Config: swift.Config{
Container: "container17",
Prefix: "",
Connections: 5,
},
},
},
{
"swift:container17:/prefix97",
Location{Scheme: "swift",
Config: swift.Config{
Container: "container17",
Prefix: "prefix97",
Connections: 5,
},
},
},
{
"rest:http://hostname.foo:1234/",
Location{Scheme: "rest",
Config: rest.Config{
URL: parseURL("http://hostname.foo:1234/"),
Connections: 5,
},
},
},
{
"b2:bucketname:/prefix", Location{Scheme: "b2",
Config: b2.Config{
Bucket: "bucketname",
Prefix: "prefix",
Connections: 5,
},
},
},
{
"b2:bucketname", Location{Scheme: "b2",
Config: b2.Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
},
},
},
}
func TestParse(t *testing.T) {
for i, test := range parseTests {
t.Run(test.s, func(t *testing.T) {
u, err := Parse(test.s)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.u.Scheme != u.Scheme {
t.Errorf("test %d: scheme does not match, want %q, got %q",
i, test.u.Scheme, u.Scheme)
}
if !reflect.DeepEqual(test.u.Config, u.Config) {
t.Errorf("test %d: cfg map does not match, want:\n %#v\ngot: \n %#v",
i, test.u.Config, u.Config)
}
})
}
}
func TestInvalidScheme(t *testing.T) {
var invalidSchemes = []string{
"foobar:xxx",
"foobar:/dir/dir2",
}
for _, s := range invalidSchemes {
t.Run(s, func(t *testing.T) {
_, err := Parse(s)
if err == nil {
t.Fatalf("error for invalid location %q not found", s)
}
})
}
}

View File

@@ -0,0 +1,213 @@
package mem
import (
"bytes"
"context"
"io"
"io/ioutil"
"restic"
"sync"
"restic/errors"
"restic/debug"
)
type memMap map[restic.Handle][]byte
// make sure that MemoryBackend implements backend.Backend
var _ restic.Backend = &MemoryBackend{}
var errNotFound = errors.New("not found")
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
}
// New returns a new backend that saves all data in a map in memory.
func New() *MemoryBackend {
be := &MemoryBackend{
data: make(memMap),
}
debug.Log("created new memory backend")
return be
}
// Test returns whether a file exists.
func (be *MemoryBackend) Test(ctx context.Context, h restic.Handle) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("Test %v", h)
if _, ok := be.data[h]; ok {
return true, nil
}
return false, nil
}
// IsNotExist returns true if the file does not exist.
func (be *MemoryBackend) IsNotExist(err error) bool {
return errors.Cause(err) == errNotFound
}
// Save adds new Data to the backend.
func (be *MemoryBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
if err := h.Valid(); err != nil {
return err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == restic.ConfigFile {
h.Name = ""
}
if _, ok := be.data[h]; ok {
return errors.New("file already exists")
}
buf, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
be.data[h] = buf
debug.Log("saved %v bytes at %v", len(buf), h)
return nil
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *MemoryBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
if err := h.Valid(); err != nil {
return nil, err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == restic.ConfigFile {
h.Name = ""
}
debug.Log("Load %v offset %v len %v", h, offset, length)
if offset < 0 {
return nil, errors.New("offset is negative")
}
if _, ok := be.data[h]; !ok {
return nil, errNotFound
}
buf := be.data[h]
if offset > int64(len(buf)) {
return nil, errors.New("offset beyond end of file")
}
buf = buf[offset:]
if length > 0 && len(buf) > length {
buf = buf[:length]
}
return ioutil.NopCloser(bytes.NewReader(buf)), nil
}
// Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
be.m.Lock()
defer be.m.Unlock()
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
if h.Type == restic.ConfigFile {
h.Name = ""
}
debug.Log("stat %v", h)
e, ok := be.data[h]
if !ok {
return restic.FileInfo{}, errNotFound
}
return restic.FileInfo{Size: int64(len(e))}, nil
}
// Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(ctx context.Context, h restic.Handle) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("Remove %v", h)
if _, ok := be.data[h]; !ok {
return errNotFound
}
delete(be.data, h)
return nil
}
// List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(ctx context.Context, t restic.FileType) <-chan string {
be.m.Lock()
defer be.m.Unlock()
ch := make(chan string)
var ids []string
for entry := range be.data {
if entry.Type != t {
continue
}
ids = append(ids, entry.Name)
}
debug.Log("list %v: %v", t, ids)
go func() {
defer close(ch)
for _, id := range ids {
select {
case ch <- id:
case <-ctx.Done():
return
}
}
}()
return ch
}
// Location returns the location of the backend (RAM).
func (be *MemoryBackend) Location() string {
return "RAM"
}
// Delete removes all data in the backend.
func (be *MemoryBackend) Delete(ctx context.Context) error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
// Close closes the backend.
func (be *MemoryBackend) Close() error {
return nil
}

View File

@@ -0,0 +1,66 @@
package mem_test
import (
"context"
"restic"
"testing"
"restic/errors"
"restic/backend/mem"
"restic/backend/test"
)
type memConfig struct {
be restic.Backend
}
func newTestSuite() *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
return &memConfig{}, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(cfg interface{}) (restic.Backend, error) {
c := cfg.(*memConfig)
if c.be != nil {
ok, err := c.be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if ok {
return nil, errors.New("config already exists")
}
}
c.be = mem.New()
return c.be, nil
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(cfg interface{}) (restic.Backend, error) {
c := cfg.(*memConfig)
if c.be == nil {
c.be = mem.New()
}
return c.be, nil
},
// CleanupFn removes data created during the tests.
Cleanup: func(cfg interface{}) error {
// no cleanup needed
return nil
},
}
}
func TestSuiteBackendMem(t *testing.T) {
newTestSuite().RunTests(t)
}
func BenchmarkSuiteBackendMem(t *testing.B) {
newTestSuite().RunBenchmarks(t)
}

26
internal/backend/paths.go Normal file
View File

@@ -0,0 +1,26 @@
package backend
import "os"
// Paths contains the default paths for file-based backends (e.g. local).
var Paths = struct {
Data string
Snapshots string
Index string
Locks string
Keys string
Temp string
Config string
}{
"data",
"snapshots",
"index",
"locks",
"keys",
"tmp",
"config",
}
// Modes holds the default modes for directories and files for file-based
// backends.
var Modes = struct{ Dir, File os.FileMode }{0700, 0600}

View File

@@ -0,0 +1,44 @@
package rest
import (
"net/url"
"strings"
"restic/errors"
"restic/options"
)
// Config contains all configuration necessary to connect to a REST server.
type Config struct {
URL *url.URL
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
func init() {
options.Register("rest", Config{})
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
// ParseConfig parses the string s and extracts the REST server URL.
func ParseConfig(s string) (interface{}, error) {
if !strings.HasPrefix(s, "rest:") {
return nil, errors.New("invalid REST backend specification")
}
s = s[5:]
u, err := url.Parse(s)
if err != nil {
return nil, errors.Wrap(err, "url.Parse")
}
cfg := NewConfig()
cfg.URL = u
return cfg, nil
}

View File

@@ -0,0 +1,42 @@
package rest
import (
"net/url"
"reflect"
"testing"
)
func parseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
panic(err)
}
return u
}
var configTests = []struct {
s string
cfg Config
}{
{"rest:http://localhost:1234", Config{
URL: parseURL("http://localhost:1234"),
Connections: 5,
}},
}
func TestParseConfig(t *testing.T) {
for i, test := range configTests {
cfg, err := ParseConfig(test.s)
if err != nil {
t.Errorf("test %d:%s failed: %v", i, test.s, err)
continue
}
if !reflect.DeepEqual(cfg, test.cfg) {
t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
i, test.s, test.cfg, cfg)
continue
}
}
}

View File

@@ -0,0 +1,351 @@
package rest
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"restic"
"strings"
"golang.org/x/net/context/ctxhttp"
"restic/debug"
"restic/errors"
"restic/backend"
)
// make sure the rest backend implements restic.Backend
var _ restic.Backend = &restBackend{}
type restBackend struct {
url *url.URL
sem *backend.Semaphore
client *http.Client
backend.Layout
}
// Open opens the REST backend with the given config.
func Open(cfg Config) (restic.Backend, error) {
client := &http.Client{Transport: backend.Transport()}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
// use url without trailing slash for layout
url := cfg.URL.String()
if url[len(url)-1] == '/' {
url = url[:len(url)-1]
}
be := &restBackend{
url: cfg.URL,
client: client,
Layout: &backend.RESTLayout{URL: url, Join: path.Join},
sem: sem,
}
return be, nil
}
// Create creates a new REST on server configured in config.
func Create(cfg Config) (restic.Backend, error) {
be, err := Open(cfg)
if err != nil {
return nil, err
}
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err == nil {
return nil, errors.Fatal("config file already exists")
}
url := *cfg.URL
values := url.Query()
values.Set("create", "true")
url.RawQuery = values.Encode()
resp, err := http.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
return nil, err
}
return be, nil
}
// Location returns this backend's location (the server's URL).
func (b *restBackend) Location() string {
return b.url.String()
}
// Save stores data in the backend at the handle.
func (b *restBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// make sure that client.Post() cannot close the reader by wrapping it
rd = ioutil.NopCloser(rd)
b.sem.GetToken()
resp, err := ctxhttp.Post(ctx, b.client, b.Filename(h), "binary/octet-stream", rd)
b.sem.ReleaseToken()
if resp != nil {
defer func() {
_, _ = io.Copy(ioutil.Discard, resp.Body)
e := resp.Body.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {
return errors.Wrap(err, "client.Post")
}
if resp.StatusCode != 200 {
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
return nil
}
// ErrIsNotExist is returned whenever the requested file does not exist on the
// server.
type ErrIsNotExist struct {
restic.Handle
}
func (e ErrIsNotExist) Error() string {
return fmt.Sprintf("%v does not exist", e.Handle)
}
// IsNotExist returns true if the error was caused by a non-existing file.
func (b *restBackend) IsNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrIsNotExist)
return ok
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (b *restBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
req, err := http.NewRequest("GET", b.Filename(h), nil)
if err != nil {
return nil, errors.Wrap(err, "http.NewRequest")
}
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
req.Header.Add("Range", byteRange)
debug.Log("Load(%v) send range %v", h, byteRange)
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
if resp != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
}
return nil, errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return nil, ErrIsNotExist{h}
}
if resp.StatusCode != 200 && resp.StatusCode != 206 {
_ = resp.Body.Close()
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
return resp.Body, nil
}
// Stat returns information about a blob.
func (b *restBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
b.sem.GetToken()
resp, err := ctxhttp.Head(ctx, b.client, b.Filename(h))
b.sem.ReleaseToken()
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
}
_, _ = io.Copy(ioutil.Discard, resp.Body)
if err = resp.Body.Close(); err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Close")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return restic.FileInfo{}, ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
if resp.ContentLength < 0 {
return restic.FileInfo{}, errors.New("negative content length")
}
bi := restic.FileInfo{
Size: resp.ContentLength,
}
return bi, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *restBackend) Test(ctx context.Context, h restic.Handle) (bool, error) {
_, err := b.Stat(ctx, h)
if err != nil {
return false, nil
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *restBackend) Remove(ctx context.Context, h restic.Handle) error {
if err := h.Valid(); err != nil {
return err
}
req, err := http.NewRequest("DELETE", b.Filename(h), nil)
if err != nil {
return errors.Wrap(err, "http.NewRequest")
}
b.sem.GetToken()
resp, err := ctxhttp.Do(ctx, b.client, req)
b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "client.Do")
}
if resp.StatusCode == http.StatusNotFound {
_ = resp.Body.Close()
return ErrIsNotExist{h}
}
if resp.StatusCode != 200 {
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
return errors.Wrap(err, "Copy")
}
return errors.Wrap(resp.Body.Close(), "Close")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *restBackend) List(ctx context.Context, t restic.FileType) <-chan string {
ch := make(chan string)
url := b.Dirname(restic.Handle{Type: t})
if !strings.HasSuffix(url, "/") {
url += "/"
}
b.sem.GetToken()
resp, err := ctxhttp.Get(ctx, b.client, url)
b.sem.ReleaseToken()
if resp != nil {
defer func() {
_, _ = io.Copy(ioutil.Discard, resp.Body)
e := resp.Body.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {
close(ch)
return ch
}
dec := json.NewDecoder(resp.Body)
var list []string
if err = dec.Decode(&list); err != nil {
close(ch)
return ch
}
go func() {
defer close(ch)
for _, m := range list {
select {
case ch <- m:
case <-ctx.Done():
return
}
}
}()
return ch
}
// Close closes all open files.
func (b *restBackend) Close() error {
// this does not need to do anything, all open files are closed within the
// same function.
return nil
}

View File

@@ -0,0 +1,133 @@
package rest_test
import (
"context"
"io/ioutil"
"net"
"net/url"
"os"
"os/exec"
"restic"
"testing"
"time"
"restic/backend/rest"
"restic/backend/test"
. "restic/test"
)
func runRESTServer(ctx context.Context, t testing.TB, dir string) func() {
srv, err := exec.LookPath("rest-server")
if err != nil {
t.Skip(err)
}
cmd := exec.CommandContext(ctx, srv, "--path", dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// wait until the TCP port is reachable
var success bool
for i := 0; i < 10; i++ {
time.Sleep(200 * time.Millisecond)
c, err := net.Dial("tcp", "localhost:8000")
if err != nil {
continue
}
success = true
if err := c.Close(); err != nil {
t.Fatal(err)
}
}
if !success {
t.Fatal("unable to connect to rest server")
return nil
}
return func() {
if err := cmd.Process.Kill(); err != nil {
t.Fatal(err)
}
// ignore errors, we've killed the process
_ = cmd.Wait()
}
}
func newTestSuite(ctx context.Context, t testing.TB) *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
dir, err := ioutil.TempDir(TestTempDir, "restic-test-rest-")
if err != nil {
t.Fatal(err)
}
t.Logf("create new backend at %v", dir)
url, err := url.Parse("http://localhost:8000/restic-test")
if err != nil {
t.Fatal(err)
}
cfg := rest.NewConfig()
cfg.URL = url
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(rest.Config)
return rest.Create(cfg)
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(rest.Config)
return rest.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
return nil
},
}
}
func TestBackendREST(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/rest.TestBackendREST")
}
}()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dir, cleanup := TempDir(t)
defer cleanup()
cleanup = runRESTServer(ctx, t, dir)
defer cleanup()
newTestSuite(ctx, t).RunTests(t)
}
func BenchmarkBackendREST(t *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dir, cleanup := TempDir(t)
defer cleanup()
cleanup = runRESTServer(ctx, t, dir)
defer cleanup()
newTestSuite(ctx, t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,89 @@
package s3
import (
"net/url"
"path"
"strings"
"restic/errors"
"restic/options"
)
// Config contains all configuration necessary to connect to an s3 compatible
// server.
type Config struct {
Endpoint string
UseHTTP bool
KeyID, Secret string
Bucket string
Prefix string
Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
func init() {
options.Register("s3", Config{})
}
const defaultPrefix = "restic"
// ParseConfig parses the string s and extracts the s3 config. The two
// supported configuration formats are s3://host/bucketname/prefix and
// s3:host:bucketname/prefix. The host can also be a valid s3 region
// name. If no prefix is given the prefix "restic" will be used.
func ParseConfig(s string) (interface{}, error) {
switch {
case strings.HasPrefix(s, "s3:http"):
// assume that a URL has been specified, parse it and
// use the host as the endpoint and the path as the
// bucket name and prefix
url, err := url.Parse(s[3:])
if err != nil {
return nil, errors.Wrap(err, "url.Parse")
}
if url.Path == "" {
return nil, errors.New("s3: bucket name not found")
}
path := strings.SplitN(url.Path[1:], "/", 2)
return createConfig(url.Host, path, url.Scheme == "http")
case strings.HasPrefix(s, "s3://"):
s = s[5:]
case strings.HasPrefix(s, "s3:"):
s = s[3:]
default:
return nil, errors.New("s3: invalid format")
}
// use the first entry of the path as the endpoint and the
// remainder as bucket name and prefix
path := strings.SplitN(s, "/", 3)
return createConfig(path[0], path[1:], false)
}
func createConfig(endpoint string, p []string, useHTTP bool) (interface{}, error) {
var prefix string
switch {
case len(p) < 1:
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
case len(p) == 1 || p[1] == "":
prefix = defaultPrefix
default:
prefix = path.Clean(p[1])
}
cfg := NewConfig()
cfg.Endpoint = endpoint
cfg.UseHTTP = useHTTP
cfg.Bucket = p[0]
cfg.Prefix = prefix
return cfg, nil
}

View File

@@ -0,0 +1,113 @@
package s3
import "testing"
var configTests = []struct {
s string
cfg Config
}{
{"s3://eu-central-1/bucketname", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 5,
}},
{"s3://eu-central-1/bucketname/", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 5,
}},
{"s3://eu-central-1/bucketname/prefix/directory", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
{"s3://eu-central-1/bucketname/prefix/directory/", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
{"s3:eu-central-1/foobar", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "restic",
Connections: 5,
}},
{"s3:eu-central-1/foobar/", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "restic",
Connections: 5,
}},
{"s3:eu-central-1/foobar/prefix/directory", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 5,
}},
{"s3:eu-central-1/foobar/prefix/directory/", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 5,
}},
{"s3:https://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
Connections: 5,
}},
{"s3:https://hostname:9999/foobar/", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
Connections: 5,
}},
{"s3:http://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
UseHTTP: true,
Connections: 5,
}},
{"s3:http://hostname:9999/foobar/", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
UseHTTP: true,
Connections: 5,
}},
{"s3:http://hostname:9999/bucket/prefix/directory", Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
Connections: 5,
}},
{"s3:http://hostname:9999/bucket/prefix/directory/", Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
Connections: 5,
}},
}
func TestParseConfig(t *testing.T) {
for i, test := range configTests {
cfg, err := ParseConfig(test.s)
if err != nil {
t.Errorf("test %d:%s failed: %v", i, test.s, err)
continue
}
if cfg != test.cfg {
t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
i, test.s, test.cfg, cfg)
continue
}
}
}

447
internal/backend/s3/s3.go Normal file
View File

@@ -0,0 +1,447 @@
package s3
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"restic"
"strings"
"time"
"restic/backend"
"restic/errors"
"github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
"restic/debug"
)
// Backend stores data on an S3 endpoint.
type Backend struct {
client *minio.Client
sem *backend.Semaphore
cfg Config
backend.Layout
}
// make sure that *Backend implements backend.Backend
var _ restic.Backend = &Backend{}
const defaultLayout = "default"
func open(cfg Config) (*Backend, error) {
debug.Log("open, config %#v", cfg)
if cfg.MaxRetries > 0 {
minio.MaxRetry = int(cfg.MaxRetries)
}
var client *minio.Client
var err error
if cfg.KeyID == "" || cfg.Secret == "" {
debug.Log("key/secret not found, trying to get them from IAM")
creds := credentials.NewIAM("")
client, err = minio.NewWithCredentials(cfg.Endpoint, creds, !cfg.UseHTTP, "")
if err != nil {
return nil, errors.Wrap(err, "minio.NewWithCredentials")
}
} else {
debug.Log("key/secret found")
client, err = minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil {
return nil, errors.Wrap(err, "minio.New")
}
}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
be := &Backend{
client: client,
sem: sem,
cfg: cfg,
}
client.SetCustomTransport(backend.Transport())
l, err := backend.ParseLayout(be, cfg.Layout, defaultLayout, cfg.Prefix)
if err != nil {
return nil, err
}
be.Layout = l
return be, nil
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (restic.Backend, error) {
return open(cfg)
}
// Create opens the S3 backend at bucket and region and creates the bucket if
// it does not exist yet.
func Create(cfg Config) (restic.Backend, error) {
be, err := open(cfg)
if err != nil {
return nil, errors.Wrap(err, "open")
}
found, err := be.client.BucketExists(cfg.Bucket)
if err != nil {
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
return nil, errors.Wrap(err, "client.BucketExists")
}
if !found {
// create new bucket with default ACL in default region
err = be.client.MakeBucket(cfg.Bucket, "")
if err != nil {
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
debug.Log("IsNotExist(%T, %#v)", err, err)
if os.IsNotExist(errors.Cause(err)) {
return true
}
if e, ok := errors.Cause(err).(minio.ErrorResponse); ok && e.Code == "NoSuchKey" {
return true
}
return false
}
// Join combines path components with slashes.
func (be *Backend) Join(p ...string) string {
return path.Join(p...)
}
type fileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi fileInfo) Name() string { return fi.name } // base name of the file
func (fi fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others
func (fi fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits
func (fi fileInfo) ModTime() time.Time { return fi.modTime } // modification time
func (fi fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir()
func (fi fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
// ReadDir returns the entries for a directory.
func (be *Backend) ReadDir(dir string) (list []os.FileInfo, err error) {
debug.Log("ReadDir(%v)", dir)
// make sure dir ends with a slash
if dir[len(dir)-1] != '/' {
dir += "/"
}
done := make(chan struct{})
defer close(done)
for obj := range be.client.ListObjects(be.cfg.Bucket, dir, false, done) {
if obj.Key == "" {
continue
}
name := strings.TrimPrefix(obj.Key, dir)
if name == "" {
return nil, errors.Errorf("invalid key name %v, removing prefix %v yielded empty string", obj.Key, dir)
}
entry := fileInfo{
name: name,
size: obj.Size,
modTime: obj.LastModified,
}
if name[len(name)-1] == '/' {
entry.isDir = true
entry.mode = os.ModeDir | 0755
entry.name = name[:len(name)-1]
} else {
entry.mode = 0644
}
list = append(list, entry)
}
return list, nil
}
// Location returns this backend's location (the bucket name).
func (be *Backend) Location() string {
return be.Join(be.cfg.Bucket, be.cfg.Prefix)
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
return be.cfg.Prefix
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
debug.Log("Save %v", h)
if err := h.Valid(); err != nil {
return err
}
objName := be.Filename(h)
// Check key does not already exist
_, err = be.client.StatObject(be.cfg.Bucket, objName)
if err == nil {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
// prevent the HTTP client from closing a file
rd = ioutil.NopCloser(rd)
be.sem.GetToken()
debug.Log("PutObject(%v, %v)", be.cfg.Bucket, objName)
n, err := be.client.PutObject(be.cfg.Bucket, objName, rd, "application/octet-stream")
be.sem.ReleaseToken()
debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err)
return errors.Wrap(err, "client.PutObject")
}
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
type wrapReader struct {
io.ReadCloser
f func()
}
func (wr wrapReader) Close() error {
err := wr.ReadCloser.Close()
wr.f()
return err
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
objName := be.Filename(h)
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
headers := minio.NewGetReqHeaders()
headers.Add("Range", byteRange)
be.sem.GetToken()
debug.Log("Load(%v) send range %v", h, byteRange)
coreClient := minio.Core{Client: be.client}
rd, _, err := coreClient.GetObject(be.cfg.Bucket, objName, headers)
if err != nil {
be.sem.ReleaseToken()
return nil, err
}
closeRd := wrapReader{
ReadCloser: rd,
f: func() {
debug.Log("Close()")
be.sem.ReleaseToken()
},
}
return closeRd, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
objName := be.Filename(h)
var obj *minio.Object
obj, err = be.client.GetObject(be.cfg.Bucket, objName)
if err != nil {
debug.Log("GetObject() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
fi, err := obj.Stat()
if err != nil {
debug.Log("Stat() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: fi.Size}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
found := false
objName := be.Filename(h)
_, err := be.client.StatObject(be.cfg.Bucket, objName)
if err == nil {
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
err := be.client.RemoveObject(be.cfg.Bucket, objName)
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
if be.IsNotExist(err) {
err = nil
}
return errors.Wrap(err, "client.RemoveObject")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *Backend) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("listing %v", t)
ch := make(chan string)
prefix := be.Dirname(restic.Handle{Type: t})
// make sure prefix ends with a slash
if prefix[len(prefix)-1] != '/' {
prefix += "/"
}
listresp := be.client.ListObjects(be.cfg.Bucket, prefix, true, ctx.Done())
go func() {
defer close(ch)
for obj := range listresp {
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
}
select {
case ch <- path.Base(m):
case <-ctx.Done():
return
}
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
for key := range be.List(ctx, restic.DataFile) {
err := be.Remove(ctx, restic.Handle{Type: restic.DataFile, Name: key})
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(ctx, t)
if err != nil {
return nil
}
}
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
}
// Close does nothing
func (be *Backend) Close() error { return nil }
// Rename moves a file based on the new layout l.
func (be *Backend) Rename(h restic.Handle, l backend.Layout) error {
debug.Log("Rename %v to %v", h, l)
oldname := be.Filename(h)
newname := l.Filename(h)
if oldname == newname {
debug.Log(" %v is already renamed", newname)
return nil
}
debug.Log(" %v -> %v", oldname, newname)
src := minio.NewSourceInfo(be.cfg.Bucket, oldname, nil)
dst, err := minio.NewDestinationInfo(be.cfg.Bucket, newname, nil, nil)
if err != nil {
return errors.Wrap(err, "NewDestinationInfo")
}
err = be.client.CopyObject(dst, src)
if err != nil && be.IsNotExist(err) {
debug.Log("copy failed: %v, seems to already have been renamed", err)
return nil
}
if err != nil {
debug.Log("copy failed: %v", err)
return err
}
return be.client.RemoveObject(be.cfg.Bucket, oldname)
}

View File

@@ -0,0 +1,320 @@
package s3_test
import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"restic"
"testing"
"time"
"restic/backend/s3"
"restic/backend/test"
. "restic/test"
)
func mkdir(t testing.TB, dir string) {
err := os.MkdirAll(dir, 0700)
if err != nil {
t.Fatal(err)
}
}
func runMinio(ctx context.Context, t testing.TB, dir, key, secret string) func() {
mkdir(t, filepath.Join(dir, "config"))
mkdir(t, filepath.Join(dir, "root"))
cmd := exec.CommandContext(ctx, "minio",
"server",
"--address", "127.0.0.1:9000",
"--config-dir", filepath.Join(dir, "config"),
filepath.Join(dir, "root"))
cmd.Env = append(os.Environ(),
"MINIO_ACCESS_KEY="+key,
"MINIO_SECRET_KEY="+secret,
)
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
t.Fatal(err)
}
// wait until the TCP port is reachable
var success bool
for i := 0; i < 100; i++ {
time.Sleep(200 * time.Millisecond)
c, err := net.Dial("tcp", "localhost:9000")
if err == nil {
success = true
if err := c.Close(); err != nil {
t.Fatal(err)
}
break
}
}
if !success {
t.Fatal("unable to connect to minio server")
return nil
}
return func() {
err = cmd.Process.Kill()
if err != nil {
t.Fatal(err)
}
// ignore errors, we've killed the process
_ = cmd.Wait()
}
}
func newRandomCredentials(t testing.TB) (key, secret string) {
buf := make([]byte, 10)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
t.Fatal(err)
}
key = hex.EncodeToString(buf)
_, err = io.ReadFull(rand.Reader, buf)
if err != nil {
t.Fatal(err)
}
secret = hex.EncodeToString(buf)
return key, secret
}
type MinioTestConfig struct {
s3.Config
tempdir string
removeTempdir func()
stopServer func()
}
func createS3(t testing.TB, cfg MinioTestConfig) (be restic.Backend, err error) {
for i := 0; i < 10; i++ {
be, err = s3.Create(cfg.Config)
if err != nil {
t.Logf("s3 open: try %d: error %v", i, err)
time.Sleep(500 * time.Millisecond)
continue
}
break
}
return be, err
}
func newMinioTestSuite(ctx context.Context, t testing.TB) *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
cfg := MinioTestConfig{}
cfg.tempdir, cfg.removeTempdir = TempDir(t)
key, secret := newRandomCredentials(t)
cfg.stopServer = runMinio(ctx, t, cfg.tempdir, key, secret)
cfg.Config = s3.NewConfig()
cfg.Config.Endpoint = "localhost:9000"
cfg.Config.Bucket = "restictestbucket"
cfg.Config.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
cfg.Config.UseHTTP = true
cfg.Config.KeyID = key
cfg.Config.Secret = secret
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(MinioTestConfig)
be, err := createS3(t, cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if exists {
return nil, errors.New("config already exists")
}
return be, nil
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(MinioTestConfig)
return s3.Open(cfg.Config)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(MinioTestConfig)
if cfg.stopServer != nil {
cfg.stopServer()
}
if cfg.removeTempdir != nil {
cfg.removeTempdir()
}
return nil
},
}
}
func TestBackendMinio(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/s3.TestBackendMinio")
}
}()
// try to find a minio binary
_, err := exec.LookPath("minio")
if err != nil {
t.Skip(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
newMinioTestSuite(ctx, t).RunTests(t)
}
func BenchmarkBackendMinio(t *testing.B) {
// try to find a minio binary
_, err := exec.LookPath("minio")
if err != nil {
t.Skip(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
newMinioTestSuite(ctx, t).RunBenchmarks(t)
}
func newS3TestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
s3cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY"))
if err != nil {
return nil, err
}
cfg := s3cfg.(s3.Config)
cfg.KeyID = os.Getenv("RESTIC_TEST_S3_KEY")
cfg.Secret = os.Getenv("RESTIC_TEST_S3_SECRET")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(s3.Config)
be, err := s3.Create(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if exists {
return nil, errors.New("config already exists")
}
return be, nil
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(s3.Config)
return s3.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(s3.Config)
be, err := s3.Open(cfg)
if err != nil {
return err
}
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
return err
}
return nil
},
}
}
func TestBackendS3(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/s3.TestBackendS3")
}
}()
vars := []string{
"RESTIC_TEST_S3_KEY",
"RESTIC_TEST_S3_SECRET",
"RESTIC_TEST_S3_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newS3TestSuite(t).RunTests(t)
}
func BenchmarkBackendS3(t *testing.B) {
vars := []string{
"RESTIC_TEST_S3_KEY",
"RESTIC_TEST_S3_SECRET",
"RESTIC_TEST_S3_REPOSITORY",
}
for _, v := range vars {
if os.Getenv(v) == "" {
t.Skipf("environment variable %v not set", v)
return
}
}
t.Logf("run tests")
newS3TestSuite(t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,28 @@
package backend
import "restic/errors"
// Semaphore limits access to a restricted resource.
type Semaphore struct {
ch chan struct{}
}
// NewSemaphore returns a new semaphore with capacity n.
func NewSemaphore(n uint) (*Semaphore, error) {
if n <= 0 {
return nil, errors.New("must be a positive number")
}
return &Semaphore{
ch: make(chan struct{}, n),
}, nil
}
// GetToken blocks until a Token is available.
func (s *Semaphore) GetToken() {
s.ch <- struct{}{}
}
// ReleaseToken returns a token.
func (s *Semaphore) ReleaseToken() {
<-s.ch
}

View File

@@ -0,0 +1,72 @@
package sftp
import (
"net/url"
"path"
"strings"
"restic/errors"
"restic/options"
)
// Config collects all information required to connect to an sftp server.
type Config struct {
User, Host, Path string
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"`
Command string `option:"command" help:"specify command to create sftp connection"`
}
func init() {
options.Register("sftp", Config{})
}
// ParseConfig parses the string s and extracts the sftp config. The
// supported configuration formats are sftp://user@host/directory
// and sftp:user@host:directory. The directory will be path Cleaned and can
// be an absolute path if it starts with a '/' (e.g.
// sftp://user@host//absolute and sftp:user@host:/absolute).
func ParseConfig(s string) (interface{}, error) {
var user, host, dir string
switch {
case strings.HasPrefix(s, "sftp://"):
// parse the "sftp://user@host/path" url format
url, err := url.Parse(s)
if err != nil {
return nil, errors.Wrap(err, "url.Parse")
}
if url.User != nil {
user = url.User.Username()
}
host = url.Host
dir = url.Path
if dir == "" {
return nil, errors.Errorf("invalid backend %q, no directory specified", s)
}
dir = dir[1:]
case strings.HasPrefix(s, "sftp:"):
// parse the sftp:user@host:path format, which means we'll get
// "user@host:path" in s
s = s[5:]
// split user@host and path at the colon
data := strings.SplitN(s, ":", 2)
if len(data) < 2 {
return nil, errors.New("sftp: invalid format, hostname or path not found")
}
host = data[0]
dir = data[1]
// split user and host at the "@"
data = strings.SplitN(host, "@", 2)
if len(data) == 2 {
user = data[0]
host = data[1]
}
default:
return nil, errors.New(`invalid format, does not start with "sftp:"`)
}
return Config{
User: user,
Host: host,
Path: path.Clean(dir),
}, nil
}

View File

@@ -0,0 +1,90 @@
package sftp
import "testing"
var configTests = []struct {
in string
cfg Config
}{
// first form, user specified sftp://user@host/dir
{
"sftp://user@host/dir/subdir",
Config{User: "user", Host: "host", Path: "dir/subdir"},
},
{
"sftp://host/dir/subdir",
Config{Host: "host", Path: "dir/subdir"},
},
{
"sftp://host//dir/subdir",
Config{Host: "host", Path: "/dir/subdir"},
},
{
"sftp://host:10022//dir/subdir",
Config{Host: "host:10022", Path: "/dir/subdir"},
},
{
"sftp://user@host:10022//dir/subdir",
Config{User: "user", Host: "host:10022", Path: "/dir/subdir"},
},
{
"sftp://user@host/dir/subdir/../other",
Config{User: "user", Host: "host", Path: "dir/other"},
},
{
"sftp://user@host/dir///subdir",
Config{User: "user", Host: "host", Path: "dir/subdir"},
},
// second form, user specified sftp:user@host:/dir
{
"sftp:user@host:/dir/subdir",
Config{User: "user", Host: "host", Path: "/dir/subdir"},
},
{
"sftp:host:../dir/subdir",
Config{Host: "host", Path: "../dir/subdir"},
},
{
"sftp:user@host:dir/subdir:suffix",
Config{User: "user", Host: "host", Path: "dir/subdir:suffix"},
},
{
"sftp:user@host:dir/subdir/../other",
Config{User: "user", Host: "host", Path: "dir/other"},
},
{
"sftp:user@host:dir///subdir",
Config{User: "user", Host: "host", Path: "dir/subdir"},
},
}
func TestParseConfig(t *testing.T) {
for i, test := range configTests {
cfg, err := ParseConfig(test.in)
if err != nil {
t.Errorf("test %d:%s failed: %v", i, test.in, err)
continue
}
if cfg != test.cfg {
t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
i, test.in, test.cfg, cfg)
continue
}
}
}
var configTestsInvalid = []string{
"sftp://host:dir",
}
func TestParseConfigInvalid(t *testing.T) {
for i, test := range configTestsInvalid {
_, err := ParseConfig(test)
if err == nil {
t.Errorf("test %d: invalid config %s did not return an error", i, test)
continue
}
}
}

View File

@@ -0,0 +1,3 @@
// Package sftp implements repository storage in a directory on a remote server
// via the sftp protocol.
package sftp

View File

@@ -0,0 +1,87 @@
package sftp_test
import (
"context"
"fmt"
"path/filepath"
"restic"
"restic/backend/sftp"
. "restic/test"
"testing"
)
func TestLayout(t *testing.T) {
if sftpServer == "" {
t.Skip("sftp server binary not available")
}
path, cleanup := TempDir(t)
defer cleanup()
var tests = []struct {
filename string
layout string
failureExpected bool
datafiles map[string]bool
}{
{"repo-layout-default.tar.gz", "", false, map[string]bool{
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
}},
{"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
}},
}
for _, test := range tests {
t.Run(test.filename, func(t *testing.T) {
SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename))
repo := filepath.Join(path, "repo")
be, err := sftp.Open(sftp.Config{
Command: fmt.Sprintf("%q -e", sftpServer),
Path: repo,
Layout: test.layout,
})
if err != nil {
t.Fatal(err)
}
if be == nil {
t.Fatalf("Open() returned nil but no error")
}
datafiles := make(map[string]bool)
for id := range be.List(context.TODO(), restic.DataFile) {
datafiles[id] = false
}
if len(datafiles) == 0 {
t.Errorf("List() returned zero data files")
}
for id := range test.datafiles {
if _, ok := datafiles[id]; !ok {
t.Errorf("datafile with id %v not found", id)
}
datafiles[id] = true
}
for id, v := range datafiles {
if !v {
t.Errorf("unexpected id %v found", id)
}
}
if err = be.Close(); err != nil {
t.Errorf("Close() returned error %v", err)
}
RemoveAll(t, filepath.Join(path, "repo"))
})
}
}

View File

@@ -0,0 +1,497 @@
package sftp
import (
"bufio"
"context"
"fmt"
"io"
"os"
"os/exec"
"path"
"restic"
"strings"
"time"
"restic/errors"
"restic/backend"
"restic/debug"
"github.com/pkg/sftp"
)
// SFTP is a backend in a directory accessed via SFTP.
type SFTP struct {
c *sftp.Client
p string
cmd *exec.Cmd
result <-chan error
backend.Layout
Config
}
var _ restic.Backend = &SFTP{}
const defaultLayout = "default"
func startClient(program string, args ...string) (*SFTP, error) {
debug.Log("start client %v %v", program, args)
// Connect to a remote host and request the sftp subsystem via the 'ssh'
// command. This assumes that passwordless login is correctly configured.
cmd := exec.Command(program, args...)
// prefix the errors with the program name
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StderrPipe")
}
go func() {
sc := bufio.NewScanner(stderr)
for sc.Scan() {
fmt.Fprintf(os.Stderr, "subprocess %v: %v\n", program, sc.Text())
}
}()
// ignore signals sent to the parent (e.g. SIGINT)
cmd.SysProcAttr = ignoreSigIntProcAttr()
// get stdin and stdout
wr, err := cmd.StdinPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StdinPipe")
}
rd, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StdoutPipe")
}
// start the process
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, "cmd.Start")
}
// wait in a different goroutine
ch := make(chan error, 1)
go func() {
err := cmd.Wait()
debug.Log("ssh command exited, err %v", err)
ch <- errors.Wrap(err, "cmd.Wait")
}()
// open the SFTP session
client, err := sftp.NewClientPipe(rd, wr)
if err != nil {
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
}
return &SFTP{c: client, cmd: cmd, result: ch}, nil
}
// clientError returns an error if the client has exited. Otherwise, nil is
// returned immediately.
func (r *SFTP) clientError() error {
select {
case err := <-r.result:
debug.Log("client has exited with err %v", err)
return err
default:
}
return nil
}
// Open opens an sftp backend as described by the config by running
// "ssh" with the appropriate arguments (or cfg.Command, if set).
func Open(cfg Config) (*SFTP, error) {
debug.Log("open backend with config %#v", cfg)
cmd, args, err := buildSSHCommand(cfg)
if err != nil {
return nil, err
}
sftp, err := startClient(cmd, args...)
if err != nil {
debug.Log("unable to start program: %v", err)
return nil, err
}
sftp.Layout, err = backend.ParseLayout(sftp, cfg.Layout, defaultLayout, cfg.Path)
if err != nil {
return nil, err
}
debug.Log("layout: %v\n", sftp.Layout)
if err := sftp.checkDataSubdirs(); err != nil {
debug.Log("checkDataSubdirs returned %v", err)
return nil, err
}
sftp.Config = cfg
sftp.p = cfg.Path
return sftp, nil
}
func (r *SFTP) checkDataSubdirs() error {
datadir := r.Dirname(restic.Handle{Type: restic.DataFile})
// check if all paths for data/ exist
entries, err := r.c.ReadDir(datadir)
if err != nil {
return err
}
subdirs := make(map[string]struct{}, len(entries))
for _, entry := range entries {
subdirs[entry.Name()] = struct{}{}
}
for i := 0; i < 256; i++ {
subdir := fmt.Sprintf("%02x", i)
if _, ok := subdirs[subdir]; !ok {
debug.Log("subdir %v is missing, creating", subdir)
err := r.mkdirAll(path.Join(datadir, subdir), backend.Modes.Dir)
if err != nil {
return err
}
}
}
return nil
}
func (r *SFTP) mkdirAllDataSubdirs() error {
for _, d := range r.Paths() {
err := r.mkdirAll(d, backend.Modes.Dir)
debug.Log("mkdirAll %v -> %v", d, err)
if err != nil {
return err
}
}
return nil
}
// Join combines path components with slashes (according to the sftp spec).
func (r *SFTP) Join(p ...string) string {
return path.Join(p...)
}
// ReadDir returns the entries for a directory.
func (r *SFTP) ReadDir(dir string) ([]os.FileInfo, error) {
return r.c.ReadDir(dir)
}
// IsNotExist returns true if the error is caused by a not existing file.
func (r *SFTP) IsNotExist(err error) bool {
if os.IsNotExist(err) {
return true
}
statusError, ok := err.(*sftp.StatusError)
if !ok {
return false
}
return statusError.Error() == `sftp: "No such file" (SSH_FX_NO_SUCH_FILE)`
}
func buildSSHCommand(cfg Config) (cmd string, args []string, err error) {
if cfg.Command != "" {
return SplitShellArgs(cfg.Command)
}
cmd = "ssh"
hostport := strings.Split(cfg.Host, ":")
args = []string{hostport[0]}
if len(hostport) > 1 {
args = append(args, "-p", hostport[1])
}
if cfg.User != "" {
args = append(args, "-l")
args = append(args, cfg.User)
}
args = append(args, "-s")
args = append(args, "sftp")
return cmd, args, nil
}
// Create creates an sftp backend as described by the config by running
// "ssh" with the appropriate arguments (or cfg.Command, if set).
func Create(cfg Config) (*SFTP, error) {
cmd, args, err := buildSSHCommand(cfg)
if err != nil {
return nil, err
}
sftp, err := startClient(cmd, args...)
if err != nil {
debug.Log("unable to start program: %v", err)
return nil, err
}
sftp.Layout, err = backend.ParseLayout(sftp, cfg.Layout, defaultLayout, cfg.Path)
if err != nil {
return nil, err
}
// test if config file already exists
_, err = sftp.c.Lstat(Join(cfg.Path, backend.Paths.Config))
if err == nil {
return nil, errors.New("config file already exists")
}
// create paths for data and refs
if err = sftp.mkdirAllDataSubdirs(); err != nil {
return nil, err
}
err = sftp.Close()
if err != nil {
return nil, errors.Wrap(err, "Close")
}
// open backend
return Open(cfg)
}
// Location returns this backend's location (the directory name).
func (r *SFTP) Location() string {
return r.p
}
func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
// check if directory already exists
fi, err := r.c.Lstat(dir)
if err == nil {
if fi.IsDir() {
return nil
}
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
}
// create parent directories
errMkdirAll := r.mkdirAll(path.Dir(dir), backend.Modes.Dir)
// create directory
errMkdir := r.c.Mkdir(dir)
// test if directory was created successfully
fi, err = r.c.Lstat(dir)
if err != nil {
// return previous errors
return errors.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir)
}
if !fi.IsDir() {
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
}
// set mode
return r.c.Chmod(dir, mode)
}
// Join joins the given paths and cleans them afterwards. This always uses
// forward slashes, which is required by sftp.
func Join(parts ...string) string {
return path.Clean(path.Join(parts...))
}
// Save stores data in the backend at the handle.
func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
debug.Log("Save %v", h)
if err := r.clientError(); err != nil {
return err
}
if err := h.Valid(); err != nil {
return err
}
filename := r.Filename(h)
// create new file
f, err := r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
if r.IsNotExist(errors.Cause(err)) {
// create the locks dir, then try again
err = r.mkdirAll(r.Dirname(h), backend.Modes.Dir)
if err != nil {
return errors.Wrap(err, "MkdirAll")
}
return r.Save(ctx, h, rd)
}
if err != nil {
return errors.Wrap(err, "OpenFile")
}
// save data
_, err = io.Copy(f, rd)
if err != nil {
_ = f.Close()
return errors.Wrap(err, "Write")
}
err = f.Close()
if err != nil {
return errors.Wrap(err, "Close")
}
// set mode to read-only
fi, err := r.c.Lstat(filename)
if err != nil {
return errors.Wrap(err, "Lstat")
}
err = r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
return errors.Wrap(err, "Chmod")
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (r *SFTP) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
f, err := r.c.Open(r.Filename(h))
if err != nil {
return nil, err
}
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
_ = f.Close()
return nil, err
}
}
if length > 0 {
return backend.LimitReadCloser(f, int64(length)), nil
}
return f, nil
}
// Stat returns information about a blob.
func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
debug.Log("Stat(%v)", h)
if err := r.clientError(); err != nil {
return restic.FileInfo{}, err
}
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
fi, err := r.c.Lstat(r.Filename(h))
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Lstat")
}
return restic.FileInfo{Size: fi.Size()}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(ctx context.Context, h restic.Handle) (bool, error) {
debug.Log("Test(%v)", h)
if err := r.clientError(); err != nil {
return false, err
}
_, err := r.c.Lstat(r.Filename(h))
if os.IsNotExist(errors.Cause(err)) {
return false, nil
}
if err != nil {
return false, errors.Wrap(err, "Lstat")
}
return true, nil
}
// Remove removes the content stored at name.
func (r *SFTP) Remove(ctx context.Context, h restic.Handle) error {
debug.Log("Remove(%v)", h)
if err := r.clientError(); err != nil {
return err
}
return r.c.Remove(r.Filename(h))
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (r *SFTP) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("List %v", t)
ch := make(chan string)
go func() {
defer close(ch)
walker := r.c.Walk(r.Basedir(t))
for walker.Step() {
if walker.Err() != nil {
continue
}
if !walker.Stat().Mode().IsRegular() {
continue
}
select {
case ch <- path.Base(walker.Path()):
case <-ctx.Done():
return
}
}
}()
return ch
}
var closeTimeout = 2 * time.Second
// Close closes the sftp connection and terminates the underlying command.
func (r *SFTP) Close() error {
debug.Log("")
if r == nil {
return nil
}
err := r.c.Close()
debug.Log("Close returned error %v", err)
// wait for closeTimeout before killing the process
select {
case err := <-r.result:
return err
case <-time.After(closeTimeout):
}
if err := r.cmd.Process.Kill(); err != nil {
return err
}
// get the error, but ignore it
<-r.result
return nil
}

View File

@@ -0,0 +1,95 @@
package sftp_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"restic"
"restic/backend/sftp"
"restic/backend/test"
"restic/errors"
"strings"
"testing"
. "restic/test"
)
func findSFTPServerBinary() string {
for _, dir := range strings.Split(TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
_, err := os.Stat(testpath)
if !os.IsNotExist(errors.Cause(err)) {
return testpath
}
}
return ""
}
var sftpServer = findSFTPServerBinary()
func newTestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
dir, err := ioutil.TempDir(TestTempDir, "restic-test-sftp-")
if err != nil {
t.Fatal(err)
}
t.Logf("create new backend at %v", dir)
cfg := sftp.Config{
Path: dir,
Command: fmt.Sprintf("%q -e", sftpServer),
}
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(sftp.Config)
return sftp.Create(cfg)
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(sftp.Config)
return sftp.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(sftp.Config)
if !TestCleanupTempDirs {
t.Logf("leaving test backend dir at %v", cfg.Path)
}
RemoveAll(t, cfg.Path)
return nil
},
}
}
func TestBackendSFTP(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/sftp.TestBackendSFTP")
}
}()
if sftpServer == "" {
t.Skip("sftp server binary not found")
}
newTestSuite(t).RunTests(t)
}
func BenchmarkBackendSFTP(t *testing.B) {
if sftpServer == "" {
t.Skip("sftp server binary not found")
}
newTestSuite(t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,13 @@
// +build !windows
package sftp
import (
"syscall"
)
// ignoreSigIntProcAttr returns a syscall.SysProcAttr that
// disables SIGINT on parent.
func ignoreSigIntProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{Setsid: true}
}

View File

@@ -0,0 +1,11 @@
package sftp
import (
"syscall"
)
// ignoreSigIntProcAttr returns a default syscall.SysProcAttr
// on Windows.
func ignoreSigIntProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{}
}

View File

@@ -0,0 +1,77 @@
package sftp
import (
"restic/errors"
"unicode"
)
// shellSplitter splits a command string into separater arguments. It supports
// single and double quoted strings.
type shellSplitter struct {
quote rune
lastChar rune
}
func (s *shellSplitter) isSplitChar(c rune) bool {
// only test for quotes if the last char was not a backslash
if s.lastChar != '\\' {
// quote ended
if s.quote != 0 && c == s.quote {
s.quote = 0
return true
}
// quote starts
if s.quote == 0 && (c == '"' || c == '\'') {
s.quote = c
return true
}
}
s.lastChar = c
// within quote
if s.quote != 0 {
return false
}
// outside quote
return c == '\\' || unicode.IsSpace(c)
}
// SplitShellArgs returns the list of arguments from a shell command string.
func SplitShellArgs(data string) (cmd string, args []string, err error) {
s := &shellSplitter{}
// derived from strings.SplitFunc
fieldStart := -1 // Set to -1 when looking for start of field.
for i, rune := range data {
if s.isSplitChar(rune) {
if fieldStart >= 0 {
args = append(args, data[fieldStart:i])
fieldStart = -1
}
} else if fieldStart == -1 {
fieldStart = i
}
}
if fieldStart >= 0 { // Last field might end at EOF.
args = append(args, data[fieldStart:])
}
switch s.quote {
case '\'':
return "", nil, errors.New("single-quoted string not terminated")
case '"':
return "", nil, errors.New("double-quoted string not terminated")
}
if len(args) == 0 {
return "", nil, errors.New("command string is empty")
}
cmd, args = args[0], args[1:]
return cmd, args, nil
}

View File

@@ -0,0 +1,115 @@
package sftp
import (
"reflect"
"testing"
)
func TestShellSplitter(t *testing.T) {
var tests = []struct {
data string
cmd string
args []string
}{
{
`foo`,
"foo", []string{},
},
{
`'foo'`,
"foo", []string{},
},
{
`foo bar baz`,
"foo", []string{"bar", "baz"},
},
{
`foo 'bar' baz`,
"foo", []string{"bar", "baz"},
},
{
`'bar box' baz`,
"bar box", []string{"baz"},
},
{
`"bar 'box'" baz`,
"bar 'box'", []string{"baz"},
},
{
`'bar "box"' baz`,
`bar "box"`, []string{"baz"},
},
{
`\"bar box baz`,
`"bar`, []string{"box", "baz"},
},
{
`"bar/foo/x" "box baz"`,
"bar/foo/x", []string{"box baz"},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
cmd, args, err := SplitShellArgs(test.data)
if err != nil {
t.Fatal(err)
}
if cmd != test.cmd {
t.Fatalf("wrong cmd returned, want:\n %#v\ngot:\n %#v",
test.cmd, cmd)
}
if !reflect.DeepEqual(args, test.args) {
t.Fatalf("wrong args returned, want:\n %#v\ngot:\n %#v",
test.args, args)
}
})
}
}
func TestShellSplitterInvalid(t *testing.T) {
var tests = []struct {
data string
err string
}{
{
"foo'",
"single-quoted string not terminated",
},
{
`foo"`,
"double-quoted string not terminated",
},
{
"foo 'bar",
"single-quoted string not terminated",
},
{
`foo "bar`,
"double-quoted string not terminated",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
cmd, args, err := SplitShellArgs(test.data)
if err == nil {
t.Fatalf("expected error not found: %v", test.err)
}
if err.Error() != test.err {
t.Fatalf("expected error not found, want:\n %q\ngot:\n %q", test.err, err.Error())
}
if cmd != "" {
t.Fatalf("splitter returned cmd from invalid data: %v", cmd)
}
if len(args) > 0 {
t.Fatalf("splitter returned fields from invalid data: %v", args)
}
})
}
}

View File

@@ -0,0 +1,52 @@
package sftp
import (
"reflect"
"testing"
)
var sshcmdTests = []struct {
cfg Config
cmd string
args []string
}{
{
Config{User: "user", Host: "host", Path: "dir/subdir"},
"ssh",
[]string{"host", "-l", "user", "-s", "sftp"},
},
{
Config{Host: "host", Path: "dir/subdir"},
"ssh",
[]string{"host", "-s", "sftp"},
},
{
Config{Host: "host:10022", Path: "/dir/subdir"},
"ssh",
[]string{"host", "-p", "10022", "-s", "sftp"},
},
{
Config{User: "user", Host: "host:10022", Path: "/dir/subdir"},
"ssh",
[]string{"host", "-p", "10022", "-l", "user", "-s", "sftp"},
},
}
func TestBuildSSHCommand(t *testing.T) {
for _, test := range sshcmdTests {
t.Run("", func(t *testing.T) {
cmd, args, err := buildSSHCommand(test.cfg)
if err != nil {
t.Fatal(err)
}
if cmd != test.cmd {
t.Fatalf("cmd: want %v, got %v", test.cmd, cmd)
}
if !reflect.DeepEqual(test.args, args) {
t.Fatalf("wrong args, want:\n %v\ngot:\n %v", test.args, args)
}
})
}
}

View File

@@ -0,0 +1,109 @@
package swift
import (
"os"
"restic/errors"
"restic/options"
"strings"
)
// Config contains basic configuration needed to specify swift location for a swift server
type Config struct {
UserName string
Domain string
APIKey string
AuthURL string
Region string
Tenant string
TenantID string
TenantDomain string
TrustID string
StorageURL string
AuthToken string
Container string
Prefix string
DefaultContainerPolicy string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
func init() {
options.Register("swift", Config{})
}
// NewConfig returns a new config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
}
}
// ParseConfig parses the string s and extract swift's container name and prefix.
func ParseConfig(s string) (interface{}, error) {
data := strings.SplitN(s, ":", 3)
if len(data) != 3 {
return nil, errors.New("invalid URL, expected: swift:container-name:/[prefix]")
}
scheme, container, prefix := data[0], data[1], data[2]
if scheme != "swift" {
return nil, errors.Errorf("unexpected prefix: %s", data[0])
}
if len(prefix) == 0 {
return nil, errors.Errorf("prefix is empty")
}
if prefix[0] != '/' {
return nil, errors.Errorf("prefix does not start with slash (/)")
}
prefix = prefix[1:]
cfg := NewConfig()
cfg.Container = container
cfg.Prefix = prefix
return cfg, nil
}
// ApplyEnvironment saves values from the environment to the config.
func ApplyEnvironment(prefix string, cfg interface{}) error {
c := cfg.(*Config)
for _, val := range []struct {
s *string
env string
}{
// v2/v3 specific
{&c.UserName, prefix + "OS_USERNAME"},
{&c.APIKey, prefix + "OS_PASSWORD"},
{&c.Region, prefix + "OS_REGION_NAME"},
{&c.AuthURL, prefix + "OS_AUTH_URL"},
// v3 specific
{&c.Domain, prefix + "OS_USER_DOMAIN_NAME"},
{&c.Tenant, prefix + "OS_PROJECT_NAME"},
{&c.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"},
// v2 specific
{&c.TenantID, prefix + "OS_TENANT_ID"},
{&c.Tenant, prefix + "OS_TENANT_NAME"},
// v1 specific
{&c.AuthURL, prefix + "ST_AUTH"},
{&c.UserName, prefix + "ST_USER"},
{&c.APIKey, prefix + "ST_KEY"},
// Manual authentication
{&c.StorageURL, prefix + "OS_STORAGE_URL"},
{&c.AuthToken, prefix + "OS_AUTH_TOKEN"},
{&c.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"},
} {
if *val.s == "" {
*val.s = os.Getenv(val.env)
}
}
return nil
}

View File

@@ -0,0 +1,72 @@
package swift
import "testing"
var configTests = []struct {
s string
cfg Config
}{
{
"swift:cnt1:/",
Config{
Container: "cnt1",
Prefix: "",
Connections: 5,
},
},
{
"swift:cnt2:/prefix",
Config{Container: "cnt2",
Prefix: "prefix",
Connections: 5,
},
},
{
"swift:cnt3:/prefix/longer",
Config{Container: "cnt3",
Prefix: "prefix/longer",
Connections: 5,
},
},
}
func TestParseConfig(t *testing.T) {
for _, test := range configTests {
t.Run("", func(t *testing.T) {
v, err := ParseConfig(test.s)
if err != nil {
t.Fatalf("parsing %q failed: %v", test.s, err)
}
cfg, ok := v.(Config)
if !ok {
t.Fatalf("wrong type returned, want Config, got %T", cfg)
}
if cfg != test.cfg {
t.Fatalf("wrong output for %q, want:\n %#v\ngot:\n %#v",
test.s, test.cfg, cfg)
}
})
}
}
var configTestsInvalid = []string{
"swift://hostname/container",
"swift:////",
"swift://",
"swift:////prefix",
"swift:container",
"swift:container:",
"swift:container/prefix",
}
func TestParseConfigInvalid(t *testing.T) {
for i, test := range configTestsInvalid {
_, err := ParseConfig(test)
if err == nil {
t.Errorf("test %d: invalid config %s did not return an error", i, test)
continue
}
}
}

View File

@@ -0,0 +1,321 @@
package swift
import (
"context"
"fmt"
"io"
"net/http"
"path"
"path/filepath"
"restic"
"restic/backend"
"restic/debug"
"restic/errors"
"strings"
"time"
"github.com/ncw/swift"
)
const connLimit = 10
// beSwift is a backend which stores the data on a swift endpoint.
type beSwift struct {
conn *swift.Connection
sem *backend.Semaphore
container string // Container name
prefix string // Prefix of object names in the container
backend.Layout
}
// ensure statically that *beSwift implements restic.Backend.
var _ restic.Backend = &beSwift{}
// Open opens the swift backend at a container in region. The container is
// created if it does not exist yet.
func Open(cfg Config) (restic.Backend, error) {
debug.Log("config %#v", cfg)
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
be := &beSwift{
conn: &swift.Connection{
UserName: cfg.UserName,
Domain: cfg.Domain,
ApiKey: cfg.APIKey,
AuthUrl: cfg.AuthURL,
Region: cfg.Region,
Tenant: cfg.Tenant,
TenantId: cfg.TenantID,
TenantDomain: cfg.TenantDomain,
TrustId: cfg.TrustID,
StorageUrl: cfg.StorageURL,
AuthToken: cfg.AuthToken,
ConnectTimeout: time.Minute,
Timeout: time.Minute,
Transport: backend.Transport(),
},
sem: sem,
container: cfg.Container,
prefix: cfg.Prefix,
Layout: &backend.DefaultLayout{
Path: cfg.Prefix,
Join: path.Join,
},
}
// Authenticate if needed
if !be.conn.Authenticated() {
if err := be.conn.Authenticate(); err != nil {
return nil, errors.Wrap(err, "conn.Authenticate")
}
}
// Ensure container exists
switch _, _, err := be.conn.Container(be.container); err {
case nil:
// Container exists
case swift.ContainerNotFound:
err = be.createContainer(cfg.DefaultContainerPolicy)
if err != nil {
return nil, errors.Wrap(err, "beSwift.createContainer")
}
default:
return nil, errors.Wrap(err, "conn.Container")
}
return be, nil
}
func (be *beSwift) createContainer(policy string) error {
var h swift.Headers
if policy != "" {
h = swift.Headers{
"X-Storage-Policy": policy,
}
}
return be.conn.ContainerCreate(be.container, h)
}
// Location returns this backend's location (the container name).
func (be *beSwift) Location() string {
return be.container
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
objName := be.Filename(h)
be.sem.GetToken()
defer func() {
be.sem.ReleaseToken()
}()
headers := swift.Headers{}
if offset > 0 {
headers["Range"] = fmt.Sprintf("bytes=%d-", offset)
}
if length > 0 {
headers["Range"] = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
if _, ok := headers["Range"]; ok {
debug.Log("Load(%v) send range %v", h, headers["Range"])
}
obj, _, err := be.conn.ObjectOpen(be.container, objName, false, headers)
if err != nil {
debug.Log(" err %v", err)
return nil, errors.Wrap(err, "conn.ObjectOpen")
}
return obj, nil
}
// Save stores data in the backend at the handle.
func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
if err = h.Valid(); err != nil {
return err
}
objName := be.Filename(h)
debug.Log("Save %v at %v", h, objName)
// Check key does not already exist
switch _, _, err = be.conn.Object(be.container, objName); err {
case nil:
debug.Log("%v already exists", h)
return errors.New("key already exists")
case swift.ObjectNotFound:
// Ok, that's what we want
default:
return errors.Wrap(err, "conn.Object")
}
be.sem.GetToken()
defer func() {
be.sem.ReleaseToken()
}()
encoding := "binary/octet-stream"
debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding)
_, err = be.conn.ObjectPut(be.container, objName, rd, true, "", encoding, nil)
debug.Log("%v, err %#v", objName, err)
return errors.Wrap(err, "client.PutObject")
}
// Stat returns information about a blob.
func (be *beSwift) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
objName := be.Filename(h)
obj, _, err := be.conn.Object(be.container, objName)
if err != nil {
debug.Log("Object() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "conn.Object")
}
return restic.FileInfo{Size: obj.Bytes}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *beSwift) Test(ctx context.Context, h restic.Handle) (bool, error) {
objName := be.Filename(h)
switch _, _, err := be.conn.Object(be.container, objName); err {
case nil:
return true, nil
case swift.ObjectNotFound:
return false, nil
default:
return false, errors.Wrap(err, "conn.Object")
}
}
// Remove removes the blob with the given name and type.
func (be *beSwift) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
err := be.conn.ObjectDelete(be.container, objName)
debug.Log("Remove(%v) -> err %v", h, err)
return errors.Wrap(err, "conn.ObjectDelete")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *beSwift) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("listing %v", t)
ch := make(chan string)
prefix := be.Filename(restic.Handle{Type: t}) + "/"
go func() {
defer close(ch)
err := be.conn.ObjectsWalk(be.container, &swift.ObjectsOpts{Prefix: prefix},
func(opts *swift.ObjectsOpts) (interface{}, error) {
newObjects, err := be.conn.ObjectNames(be.container, opts)
if err != nil {
return nil, errors.Wrap(err, "conn.ObjectNames")
}
for _, obj := range newObjects {
m := filepath.Base(strings.TrimPrefix(obj, prefix))
if m == "" {
continue
}
select {
case ch <- m:
case <-ctx.Done():
return nil, io.EOF
}
}
return newObjects, nil
})
if err != nil {
debug.Log("ObjectsWalk returned error: %v", err)
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *beSwift) removeKeys(ctx context.Context, t restic.FileType) error {
for key := range be.List(ctx, t) {
err := be.Remove(ctx, restic.Handle{Type: t, Name: key})
if err != nil {
return err
}
}
return nil
}
// IsNotExist returns true if the error is caused by a not existing file.
func (be *beSwift) IsNotExist(err error) bool {
if e, ok := errors.Cause(err).(*swift.Error); ok {
return e.StatusCode == http.StatusNotFound
}
return false
}
// Delete removes all restic objects in the container.
// It will not remove the container itself.
func (be *beSwift) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(ctx, t)
if err != nil {
return nil
}
}
err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil && !be.IsNotExist(err) {
return err
}
return nil
}
// Close does nothing
func (be *beSwift) Close() error { return nil }

View File

@@ -0,0 +1,111 @@
package swift_test
import (
"context"
"fmt"
"os"
"restic"
"testing"
"time"
"restic/errors"
. "restic/test"
"restic/backend/swift"
"restic/backend/test"
)
func newSwiftTestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// do not use excessive data
MinimalData: true,
// wait for removals for at least 60s
WaitForDelayedRemoval: 60 * time.Second,
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
swiftcfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT"))
if err != nil {
return nil, err
}
cfg := swiftcfg.(swift.Config)
if err = swift.ApplyEnvironment("RESTIC_TEST_", &cfg); err != nil {
return nil, err
}
cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano())
t.Logf("using prefix %v", cfg.Prefix)
return cfg, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(config interface{}) (restic.Backend, error) {
cfg := config.(swift.Config)
be, err := swift.Open(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if exists {
return nil, errors.New("config already exists")
}
return be, nil
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(config interface{}) (restic.Backend, error) {
cfg := config.(swift.Config)
return swift.Open(cfg)
},
// CleanupFn removes data created during the tests.
Cleanup: func(config interface{}) error {
cfg := config.(swift.Config)
be, err := swift.Open(cfg)
if err != nil {
return err
}
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
return err
}
return nil
},
}
}
func TestBackendSwift(t *testing.T) {
defer func() {
if t.Skipped() {
SkipDisallowed(t, "restic/backend/swift.TestBackendSwift")
}
}()
if os.Getenv("RESTIC_TEST_SWIFT") == "" {
t.Skip("RESTIC_TEST_SWIFT unset, skipping test")
return
}
t.Logf("run tests")
newSwiftTestSuite(t).RunTests(t)
}
func BenchmarkBackendSwift(t *testing.B) {
if os.Getenv("RESTIC_TEST_SWIFT") == "" {
t.Skip("RESTIC_TEST_SWIFT unset, skipping test")
return
}
t.Logf("run tests")
newSwiftTestSuite(t).RunBenchmarks(t)
}

View File

@@ -0,0 +1,183 @@
package test
import (
"bytes"
"context"
"io"
"restic"
"restic/test"
"testing"
)
func saveRandomFile(t testing.TB, be restic.Backend, length int) ([]byte, restic.Handle) {
data := test.Random(23, length)
id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
if err := be.Save(context.TODO(), handle, bytes.NewReader(data)); err != nil {
t.Fatalf("Save() error: %+v", err)
}
return data, handle
}
func remove(t testing.TB, be restic.Backend, h restic.Handle) {
if err := be.Remove(context.TODO(), h); err != nil {
t.Fatalf("Remove() returned error: %v", err)
}
}
// BenchmarkLoadFile benchmarks the Load() method of a backend by
// loading a complete file.
func (s *Suite) BenchmarkLoadFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
length := 1<<24 + 2123
data, handle := saveRandomFile(t, be, length)
defer remove(t, be, handle)
buf := make([]byte, length)
t.SetBytes(int64(length))
t.ResetTimer()
for i := 0; i < t.N; i++ {
rd, err := be.Load(context.TODO(), handle, 0, 0)
if err != nil {
t.Fatal(err)
}
n, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatal(err)
}
if err = rd.Close(); err != nil {
t.Fatalf("Close() returned error: %v", err)
}
if n != length {
t.Fatalf("wrong number of bytes read: want %v, got %v", length, n)
}
if !bytes.Equal(data, buf) {
t.Fatalf("wrong bytes returned")
}
}
}
// BenchmarkLoadPartialFile benchmarks the Load() method of a backend by
// loading the remainder of a file starting at a given offset.
func (s *Suite) BenchmarkLoadPartialFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
datalength := 1<<24 + 2123
data, handle := saveRandomFile(t, be, datalength)
defer remove(t, be, handle)
testLength := datalength/4 + 555
buf := make([]byte, testLength)
t.SetBytes(int64(testLength))
t.ResetTimer()
for i := 0; i < t.N; i++ {
rd, err := be.Load(context.TODO(), handle, testLength, 0)
if err != nil {
t.Fatal(err)
}
n, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatal(err)
}
if err = rd.Close(); err != nil {
t.Fatalf("Close() returned error: %v", err)
}
if n != testLength {
t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n)
}
if !bytes.Equal(data[:testLength], buf) {
t.Fatalf("wrong bytes returned")
}
}
}
// BenchmarkLoadPartialFileOffset benchmarks the Load() method of a
// backend by loading a number of bytes of a file starting at a given offset.
func (s *Suite) BenchmarkLoadPartialFileOffset(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
datalength := 1<<24 + 2123
data, handle := saveRandomFile(t, be, datalength)
defer remove(t, be, handle)
testLength := datalength/4 + 555
testOffset := 8273
buf := make([]byte, testLength)
t.SetBytes(int64(testLength))
t.ResetTimer()
for i := 0; i < t.N; i++ {
rd, err := be.Load(context.TODO(), handle, testLength, int64(testOffset))
if err != nil {
t.Fatal(err)
}
n, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatal(err)
}
if err = rd.Close(); err != nil {
t.Fatalf("Close() returned error: %v", err)
}
if n != testLength {
t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n)
}
if !bytes.Equal(data[testOffset:testOffset+testLength], buf) {
t.Fatalf("wrong bytes returned")
}
}
}
// BenchmarkSave benchmarks the Save() method of a backend.
func (s *Suite) BenchmarkSave(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
length := 1<<24 + 2123
data := test.Random(23, length)
id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
rd := bytes.NewReader(data)
t.SetBytes(int64(length))
t.ResetTimer()
for i := 0; i < t.N; i++ {
if _, err := rd.Seek(0, 0); err != nil {
t.Fatal(err)
}
if err := be.Save(context.TODO(), handle, rd); err != nil {
t.Fatal(err)
}
if err := be.Remove(context.TODO(), handle); err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,42 @@
// Package test contains a test suite with benchmarks for restic backends.
//
// Overview
//
// For the test suite to work a few functions need to be implemented to create
// new config, create a backend, open it and run cleanup tasks afterwards. The
// Suite struct has fields for each function.
//
// So for a new backend, a Suite needs to be built with callback functions,
// then the methods RunTests() and RunBenchmarks() can be used to run the
// individual tests and benchmarks as subtests/subbenchmarks.
//
// Example
//
// Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks
// can be run like this:
// func newTestSuite(t testing.TB) *test.Suite {
// return &test.Suite{
// Create: func(cfg interface{}) (restic.Backend, error) {
// [...]
// },
// [...]
// }
// }
//
// func TestSuiteBackendMem(t *testing.T) {
// newTestSuite(t).RunTests(t)
// }
//
// func BenchmarkSuiteBackendMem(b *testing.B) {
// newTestSuite(b).RunBenchmarks(b)
// }
//
// The functions are run in alphabetical order.
//
// Add new tests
//
// A new test or benchmark can be added by implementing a method on *Suite
// with the name starting with "Test" and a single *testing.T parameter for
// test. For benchmarks, the name must start with "Benchmark" and the parameter
// is a *testing.B
package test

View File

@@ -0,0 +1,181 @@
package test
import (
"reflect"
"restic"
"restic/test"
"strings"
"testing"
"time"
)
// Suite implements a test suite for restic backends.
type Suite struct {
// Config should be used to configure the backend.
Config interface{}
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig func() (interface{}, error)
// CreateFn is a function that creates a temporary repository for the tests.
Create func(cfg interface{}) (restic.Backend, error)
// OpenFn is a function that opens a previously created temporary repository.
Open func(cfg interface{}) (restic.Backend, error)
// CleanupFn removes data created during the tests.
Cleanup func(cfg interface{}) error
// MinimalData instructs the tests to not use excessive data.
MinimalData bool
// WaitForDelayedRemoval is set to a non-zero value to instruct the test
// suite to wait for this amount of time until a file that was removed
// really disappeared.
WaitForDelayedRemoval time.Duration
}
// RunTests executes all defined tests as subtests of t.
func (s *Suite) RunTests(t *testing.T) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
t.Fatal(err)
}
// test create/open functions first
be := s.create(t)
s.close(t, be)
for _, test := range s.testFuncs(t) {
t.Run(test.Name, test.Fn)
}
if !test.TestCleanupTempDirs {
t.Logf("not cleaning up backend")
return
}
if err = s.Cleanup(s.Config); err != nil {
t.Fatal(err)
}
}
type testFunction struct {
Name string
Fn func(*testing.T)
}
func (s *Suite) testFuncs(t testing.TB) (funcs []testFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
for i := 0; i < tpe.NumMethod(); i++ {
methodType := tpe.Method(i)
name := methodType.Name
// discard functions which do not have the right name
if !strings.HasPrefix(name, "Test") {
continue
}
iface := v.Method(i).Interface()
f, ok := iface.(func(*testing.T))
if !ok {
t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T",
name, iface)
continue
}
funcs = append(funcs, testFunction{
Name: name,
Fn: f,
})
}
return funcs
}
type benchmarkFunction struct {
Name string
Fn func(*testing.B)
}
func (s *Suite) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
for i := 0; i < tpe.NumMethod(); i++ {
methodType := tpe.Method(i)
name := methodType.Name
// discard functions which do not have the right name
if !strings.HasPrefix(name, "Benchmark") {
continue
}
iface := v.Method(i).Interface()
f, ok := iface.(func(*testing.B))
if !ok {
t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T",
name, iface)
continue
}
funcs = append(funcs, benchmarkFunction{
Name: name,
Fn: f,
})
}
return funcs
}
// RunBenchmarks executes all defined benchmarks as subtests of b.
func (s *Suite) RunBenchmarks(b *testing.B) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
b.Fatal(err)
}
// test create/open functions first
be := s.create(b)
s.close(b, be)
for _, test := range s.benchmarkFuncs(b) {
b.Run(test.Name, test.Fn)
}
if !test.TestCleanupTempDirs {
b.Logf("not cleaning up backend")
return
}
if err = s.Cleanup(s.Config); err != nil {
b.Fatal(err)
}
}
func (s *Suite) create(t testing.TB) restic.Backend {
be, err := s.Create(s.Config)
if err != nil {
t.Fatal(err)
}
return be
}
func (s *Suite) open(t testing.TB) restic.Backend {
be, err := s.Open(s.Config)
if err != nil {
t.Fatal(err)
}
return be
}
func (s *Suite) close(t testing.TB, be restic.Backend) {
err := be.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,638 @@
package test
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"reflect"
"restic"
"restic/errors"
"sort"
"strings"
"testing"
"time"
"restic/test"
"restic/backend"
)
func seedRand(t testing.TB) {
seed := time.Now().UnixNano()
rand.Seed(seed)
t.Logf("rand initialized with seed %d", seed)
}
// TestCreateWithConfig tests that creating a backend in a location which already
// has a config file fails.
func (s *Suite) TestCreateWithConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
// remove a config if present
cfgHandle := restic.Handle{Type: restic.ConfigFile}
cfgPresent, err := b.Test(context.TODO(), cfgHandle)
if err != nil {
t.Fatalf("unable to test for config: %+v", err)
}
if cfgPresent {
remove(t, b, cfgHandle)
}
// save a config
store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail
_, err = s.Create(s.Config)
if err == nil {
t.Fatalf("expected error not found for creating a backend with an existing config file")
}
// remove config
err = b.Remove(context.TODO(), restic.Handle{Type: restic.ConfigFile, Name: ""})
if err != nil {
t.Fatalf("unexpected error removing config: %+v", err)
}
}
// TestLocation tests that a location string is returned.
func (s *Suite) TestLocation(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
l := b.Location()
if l == "" {
t.Fatalf("invalid location string %q", l)
}
}
// TestConfig saves and loads a config from the backend.
func (s *Suite) TestConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
var testString = "Config"
// create config and read it back
_, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.ConfigFile})
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
err = b.Save(context.TODO(), restic.Handle{Type: restic.ConfigFile}, strings.NewReader(testString))
if err != nil {
t.Fatalf("Save() error: %+v", err)
}
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := restic.Handle{Type: restic.ConfigFile, Name: name}
buf, err := backend.LoadAll(context.TODO(), b, h)
if err != nil {
t.Fatalf("unable to read config with name %q: %+v", name, err)
}
if string(buf) != testString {
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
}
}
// remove the config
remove(t, b, restic.Handle{Type: restic.ConfigFile})
}
// TestLoad tests the backend's Load function.
func (s *Suite) TestLoad(t *testing.T) {
seedRand(t)
b := s.open(t)
defer s.close(t, b)
rd, err := b.Load(context.TODO(), restic.Handle{}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for invalid handle")
}
if rd != nil {
_ = rd.Close()
}
err = testLoad(b, restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
length := rand.Intn(1<<24) + 2000
data := test.Random(23, length)
id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = b.Save(context.TODO(), handle, bytes.NewReader(data))
if err != nil {
t.Fatalf("Save() error: %+v", err)
}
t.Logf("saved %d bytes as %v", length, handle)
rd, err = b.Load(context.TODO(), handle, 100, -1)
if err == nil {
t.Fatalf("Load() returned no error for negative offset!")
}
if rd != nil {
t.Fatalf("Load() returned a non-nil reader for negative offset!")
}
loadTests := 50
if s.MinimalData {
loadTests = 10
}
for i := 0; i < loadTests; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
t.Logf("offset == length, skipping test")
continue
}
getlen := l
if l >= len(d) && rand.Float32() >= 0.5 {
getlen = 0
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd, err := b.Load(context.TODO(), handle, getlen, int64(o))
if err != nil {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err)
continue
}
buf, err := ioutil.ReadAll(rd)
if err != nil {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) ReadAll() returned unexpected error: %+v", l, o, err)
if err = rd.Close(); err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned error: %+v", l, o, err)
}
continue
}
if l == 0 && len(buf) != len(d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf))
if err = rd.Close(); err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned error: %+v", l, o, err)
}
continue
}
if l > 0 && l <= len(d) && len(buf) != l {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
if err = rd.Close(); err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned error: %+v", l, o, err)
}
continue
}
if l > len(d) && len(buf) != len(d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
if err = rd.Close(); err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned error: %+v", l, o, err)
}
continue
}
if !bytes.Equal(buf, d) {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
if err = rd.Close(); err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned error: %+v", l, o, err)
}
continue
}
err = rd.Close()
if err != nil {
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
t.Errorf("Load(%d, %d) rd.Close() returned unexpected error: %+v", l, o, err)
continue
}
}
test.OK(t, b.Remove(context.TODO(), handle))
}
type errorCloser struct {
io.Reader
l int
t testing.TB
}
func (ec errorCloser) Close() error {
ec.t.Error("forbidden method close was called")
return errors.New("forbidden method close was called")
}
func (ec errorCloser) Len() int {
return ec.l
}
// TestSave tests saving data in the backend.
func (s *Suite) TestSave(t *testing.T) {
seedRand(t)
b := s.open(t)
defer s.close(t, b)
var id restic.ID
saveTests := 10
if s.MinimalData {
saveTests = 2
}
for i := 0; i < saveTests; i++ {
length := rand.Intn(1<<23) + 200000
data := test.Random(23, length)
// use the first 32 byte as the ID
copy(id[:], data)
h := restic.Handle{
Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i),
}
err := b.Save(context.TODO(), h, bytes.NewReader(data))
test.OK(t, err)
buf, err := backend.LoadAll(context.TODO(), b, h)
test.OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("data not equal")
}
fi, err := b.Stat(context.TODO(), h)
test.OK(t, err)
if fi.Size != int64(len(data)) {
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
}
err = b.Remove(context.TODO(), h)
if err != nil {
t.Fatalf("error removing item: %+v", err)
}
}
// test saving from a tempfile
tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-")
if err != nil {
t.Fatal(err)
}
length := rand.Intn(1<<23) + 200000
data := test.Random(23, length)
copy(id[:], data)
if _, err = tmpfile.Write(data); err != nil {
t.Fatal(err)
}
if _, err = tmpfile.Seek(0, io.SeekStart); err != nil {
t.Fatal(err)
}
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
// wrap the tempfile in an errorCloser, so we can detect if the backend
// closes the reader
err = b.Save(context.TODO(), h, errorCloser{t: t, l: length, Reader: tmpfile})
if err != nil {
t.Fatal(err)
}
err = delayedRemove(t, b, s.WaitForDelayedRemoval, h)
if err != nil {
t.Fatalf("error removing item: %+v", err)
}
// try again directly with the temp file
if _, err = tmpfile.Seek(588, io.SeekStart); err != nil {
t.Fatal(err)
}
err = b.Save(context.TODO(), h, tmpfile)
if err != nil {
t.Fatal(err)
}
if err = tmpfile.Close(); err != nil {
t.Fatal(err)
}
err = b.Remove(context.TODO(), h)
if err != nil {
t.Fatalf("error removing item: %+v", err)
}
if err = os.Remove(tmpfile.Name()); err != nil {
t.Fatal(err)
}
}
var filenameTests = []struct {
name string
data string
}{
{"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e", "x"},
{"f00b4r", "foobar"},
{
"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e4bf8f2d9144cc5420a80f04a4880ad6155fc58903a4fb6457c476c43541dcaa6-5",
"foobar content of data blob",
},
}
// TestSaveFilenames tests saving data with various file names in the backend.
func (s *Suite) TestSaveFilenames(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
for i, test := range filenameTests {
h := restic.Handle{Name: test.name, Type: restic.DataFile}
err := b.Save(context.TODO(), h, strings.NewReader(test.data))
if err != nil {
t.Errorf("test %d failed: Save() returned %+v", i, err)
continue
}
buf, err := backend.LoadAll(context.TODO(), b, h)
if err != nil {
t.Errorf("test %d failed: Load() returned %+v", i, err)
continue
}
if !bytes.Equal(buf, []byte(test.data)) {
t.Errorf("test %d: returned wrong bytes", i)
}
err = b.Remove(context.TODO(), h)
if err != nil {
t.Errorf("test %d failed: Remove() returned %+v", i, err)
continue
}
}
}
var testStrings = []struct {
id string
data string
}{
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle {
id := restic.Hash(data)
h := restic.Handle{Name: id.String(), Type: tpe}
err := b.Save(context.TODO(), h, bytes.NewReader(data))
test.OK(t, err)
return h
}
// testLoad loads a blob (but discards its contents).
func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
rd, err := b.Load(context.TODO(), h, 0, 0)
if err != nil {
return err
}
_, err = io.Copy(ioutil.Discard, rd)
cerr := rd.Close()
if err == nil {
err = cerr
}
return err
}
func delayedRemove(t testing.TB, be restic.Backend, maxwait time.Duration, handles ...restic.Handle) error {
// Some backend (swift, I'm looking at you) may implement delayed
// removal of data. Let's wait a bit if this happens.
for _, h := range handles {
err := be.Remove(context.TODO(), h)
if err != nil {
return err
}
}
for _, h := range handles {
start := time.Now()
attempt := 0
var found bool
var err error
for time.Since(start) <= maxwait {
found, err = be.Test(context.TODO(), h)
if err != nil {
return err
}
if !found {
break
}
time.Sleep(2 * time.Second)
attempt++
}
if found {
t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt)
}
}
return nil
}
func delayedList(t testing.TB, b restic.Backend, tpe restic.FileType, max int, maxwait time.Duration) restic.IDs {
list := restic.NewIDSet()
start := time.Now()
for i := 0; i < max; i++ {
for s := range b.List(context.TODO(), tpe) {
id := restic.TestParseID(s)
list.Insert(id)
}
if len(list) < max && time.Since(start) < maxwait {
time.Sleep(500 * time.Millisecond)
}
}
return list.List()
}
// TestBackend tests all functions of the backend.
func (s *Suite) TestBackend(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
for _, tpe := range []restic.FileType{
restic.DataFile, restic.KeyFile, restic.LockFile,
restic.SnapshotFile, restic.IndexFile,
} {
// detect non-existing files
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
// test if blob is already in repository
h := restic.Handle{Type: tpe, Name: id.String()}
ret, err := b.Test(context.TODO(), h)
test.OK(t, err)
test.Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob
_, err = b.Stat(context.TODO(), h)
test.Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
err = testLoad(b, h, 0, 0)
test.Assert(t, err != nil, "blob could be read before creation")
// try to get string out, should fail
ret, err = b.Test(context.TODO(), h)
test.OK(t, err)
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
}
// add files
for _, ts := range testStrings {
store(t, b, tpe, []byte(ts.data))
// test Load()
h := restic.Handle{Type: tpe, Name: ts.id}
buf, err := backend.LoadAll(context.TODO(), b, h)
test.OK(t, err)
test.Equals(t, ts.data, string(buf))
// try to read it out with an offset and a length
start := 1
end := len(ts.data) - 2
length := end - start
buf2 := make([]byte, length)
rd, err := b.Load(context.TODO(), h, len(buf2), int64(start))
test.OK(t, err)
n, err := io.ReadFull(rd, buf2)
test.OK(t, err)
test.Equals(t, len(buf2), n)
remaining, err := io.Copy(ioutil.Discard, rd)
test.OK(t, err)
test.Equals(t, int64(0), remaining)
test.OK(t, rd.Close())
test.Equals(t, ts.data[start:end], string(buf2))
}
// test adding the first file again
ts := testStrings[0]
// create blob
h := restic.Handle{Type: tpe, Name: ts.id}
err := b.Save(context.TODO(), h, strings.NewReader(ts.data))
test.Assert(t, err != nil, "expected error for %v, got %v", h, err)
// remove and recreate
err = delayedRemove(t, b, s.WaitForDelayedRemoval, h)
test.OK(t, err)
// test that the blob is gone
ok, err := b.Test(context.TODO(), h)
test.OK(t, err)
test.Assert(t, !ok, "removed blob still present")
// create blob
err = b.Save(context.TODO(), h, strings.NewReader(ts.data))
test.OK(t, err)
// list items
IDs := restic.IDs{}
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
IDs = append(IDs, id)
}
list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval)
if len(IDs) != len(list) {
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
}
sort.Sort(IDs)
sort.Sort(list)
if !reflect.DeepEqual(IDs, list) {
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
}
// remove content if requested
if test.TestCleanupTempDirs {
var handles []restic.Handle
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
h := restic.Handle{Type: tpe, Name: id.String()}
found, err := b.Test(context.TODO(), h)
test.OK(t, err)
test.Assert(t, found, fmt.Sprintf("id %q not found", id))
handles = append(handles, h)
}
test.OK(t, delayedRemove(t, b, s.WaitForDelayedRemoval, handles...))
}
}
}
// TestDelete tests the Delete function.
func (s *Suite) TestDelete(t *testing.T) {
if !test.TestCleanupTempDirs {
t.Skipf("not removing backend, TestCleanupTempDirs is false")
}
b := s.open(t)
defer s.close(t, b)
be, ok := b.(restic.Deleter)
if !ok {
return
}
err := be.Delete(context.TODO())
if err != nil {
t.Fatalf("error deleting backend: %+v", err)
}
}

View File

@@ -0,0 +1,67 @@
package test_test
import (
"context"
"restic"
"restic/errors"
"testing"
"restic/backend/mem"
"restic/backend/test"
)
//go:generate go run generate_test_list.go
type memConfig struct {
be restic.Backend
}
func newTestSuite(t testing.TB) *test.Suite {
return &test.Suite{
// NewConfig returns a config for a new temporary backend that will be used in tests.
NewConfig: func() (interface{}, error) {
return &memConfig{}, nil
},
// CreateFn is a function that creates a temporary repository for the tests.
Create: func(cfg interface{}) (restic.Backend, error) {
c := cfg.(*memConfig)
if c.be != nil {
ok, err := c.be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
if ok {
return nil, errors.New("config already exists")
}
}
c.be = mem.New()
return c.be, nil
},
// OpenFn is a function that opens a previously created temporary repository.
Open: func(cfg interface{}) (restic.Backend, error) {
c := cfg.(*memConfig)
if c.be == nil {
c.be = mem.New()
}
return c.be, nil
},
// CleanupFn removes data created during the tests.
Cleanup: func(cfg interface{}) error {
// no cleanup needed
return nil
},
}
}
func TestSuiteBackendMem(t *testing.T) {
newTestSuite(t).RunTests(t)
}
func BenchmarkSuiteBackendMem(b *testing.B) {
newTestSuite(b).RunBenchmarks(b)
}

Binary file not shown.

Binary file not shown.

47
internal/backend/utils.go Normal file
View File

@@ -0,0 +1,47 @@
package backend
import (
"context"
"io"
"io/ioutil"
"restic"
)
// LoadAll reads all data stored in the backend for the handle.
func LoadAll(ctx context.Context, be restic.Backend, h restic.Handle) (buf []byte, err error) {
rd, err := be.Load(ctx, h, 0, 0)
if err != nil {
return nil, err
}
defer func() {
_, e := io.Copy(ioutil.Discard, rd)
if err == nil {
err = e
}
e = rd.Close()
if err == nil {
err = e
}
}()
return ioutil.ReadAll(rd)
}
// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method.
type LimitedReadCloser struct {
io.ReadCloser
io.Reader
}
// Read reads data from the limited reader.
func (l *LimitedReadCloser) Read(p []byte) (int, error) {
return l.Reader.Read(p)
}
// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also
// exposes the Close() method.
func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser {
return &LimitedReadCloser{ReadCloser: r, Reader: io.LimitReader(r, n)}
}

View File

@@ -0,0 +1,91 @@
package backend_test
import (
"bytes"
"context"
"math/rand"
"restic"
"testing"
"restic/backend"
"restic/backend/mem"
. "restic/test"
)
const KiB = 1 << 10
const MiB = 1 << 20
func TestLoadAll(t *testing.T) {
b := mem.New()
for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
err := b.Save(context.TODO(), restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
continue
}
if !bytes.Equal(buf, data) {
t.Errorf("wrong data returned")
continue
}
}
}
func TestLoadSmallBuffer(t *testing.T) {
b := mem.New()
for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
err := b.Save(context.TODO(), restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
continue
}
if !bytes.Equal(buf, data) {
t.Errorf("wrong data returned")
continue
}
}
}
func TestLoadLargeBuffer(t *testing.T) {
b := mem.New()
for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data)
err := b.Save(context.TODO(), restic.Handle{Name: id.String(), Type: restic.DataFile}, bytes.NewReader(data))
OK(t, err)
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
OK(t, err)
if len(buf) != len(data) {
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
continue
}
if !bytes.Equal(buf, data) {
t.Errorf("wrong data returned")
continue
}
}
}

68
internal/backend_find.go Normal file
View File

@@ -0,0 +1,68 @@
package restic
import (
"context"
"restic/errors"
)
// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
// could be found.
var ErrNoIDPrefixFound = errors.New("no ID found")
// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given
// prefix are found.
var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
// Find loads the list of all files of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
// If more than one is found, nil and ErrMultipleIDMatches is returned.
func Find(be Lister, t FileType, prefix string) (string, error) {
match := ""
// TODO: optimize by sorting list etc.
for name := range be.List(context.TODO(), t) {
if prefix == name[:len(prefix)] {
if match == "" {
match = name
} else {
return "", ErrMultipleIDMatches
}
}
}
if match != "" {
return match, nil
}
return "", ErrNoIDPrefixFound
}
const minPrefixLength = 8
// PrefixLength returns the number of bytes required so that all prefixes of
// all names of type t are unique.
func PrefixLength(be Lister, t FileType) (int, error) {
// load all IDs of the given type
list := make([]string, 0, 100)
for name := range be.List(context.TODO(), t) {
list = append(list, name)
}
// select prefixes of length l, test if the last one is the same as the current one
id := ID{}
outer:
for l := minPrefixLength; l < len(id); l++ {
var last string
for _, name := range list {
if last == name[:l] {
continue outer
}
last = name[:l]
}
return l, nil
}
return len(id), nil
}

View File

@@ -0,0 +1,71 @@
package restic
import (
"context"
"testing"
)
type mockBackend struct {
list func(context.Context, FileType) <-chan string
}
func (m mockBackend) List(ctx context.Context, t FileType) <-chan string {
return m.list(ctx, t)
}
var samples = IDs{
TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
}
func TestPrefixLength(t *testing.T) {
list := samples
m := mockBackend{}
m.list = func(ctx context.Context, t FileType) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
for _, id := range list {
select {
case ch <- id.String():
case <-ctx.Done():
return
}
}
}()
return ch
}
l, err := PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 19 {
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
}
list = samples[:3]
l, err = PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 19 {
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
}
list = samples[3:]
l, err = PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 8 {
t.Errorf("wrong prefix length returned, want %d, got %d", 8, l)
}
}

120
internal/blob.go Normal file
View File

@@ -0,0 +1,120 @@
package restic
import (
"fmt"
"restic/errors"
)
// Blob is one part of a file or a tree.
type Blob struct {
Type BlobType
Length uint
ID ID
Offset uint
}
func (b Blob) String() string {
return fmt.Sprintf("<Blob (%v) %v, offset %v, length %v>",
b.Type, b.ID.Str(), b.Offset, b.Length)
}
// PackedBlob is a blob stored within a file.
type PackedBlob struct {
Blob
PackID ID
}
// BlobHandle identifies a blob of a given type.
type BlobHandle struct {
ID ID
Type BlobType
}
func (h BlobHandle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8
// These are the blob types that can be stored in a pack.
const (
InvalidBlob BlobType = iota
DataBlob
TreeBlob
)
func (t BlobType) String() string {
switch t {
case DataBlob:
return "data"
case TreeBlob:
return "tree"
case InvalidBlob:
return "invalid"
}
return fmt.Sprintf("<BlobType %d>", t)
}
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) {
switch t {
case DataBlob:
return []byte(`"data"`), nil
case TreeBlob:
return []byte(`"tree"`), nil
}
return nil, errors.New("unknown blob type")
}
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) {
case `"data"`:
*t = DataBlob
case `"tree"`:
*t = TreeBlob
default:
return errors.New("unknown blob type")
}
return nil
}
// BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
type BlobHandles []BlobHandle
func (h BlobHandles) Len() int {
return len(h)
}
func (h BlobHandles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
if b < h[j].ID[k] {
return true
}
return false
}
return h[i].Type < h[j].Type
}
func (h BlobHandles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h BlobHandles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprintf("%v", elements)
}

109
internal/blob_set.go Normal file
View File

@@ -0,0 +1,109 @@
package restic
import "sort"
// BlobSet is a set of blobs.
type BlobSet map[BlobHandle]struct{}
// NewBlobSet returns a new BlobSet, populated with ids.
func NewBlobSet(handles ...BlobHandle) BlobSet {
m := make(BlobSet)
for _, h := range handles {
m[h] = struct{}{}
}
return m
}
// Has returns true iff id is contained in the set.
func (s BlobSet) Has(h BlobHandle) bool {
_, ok := s[h]
return ok
}
// Insert adds id to the set.
func (s BlobSet) Insert(h BlobHandle) {
s[h] = struct{}{}
}
// Delete removes id from the set.
func (s BlobSet) Delete(h BlobHandle) {
delete(s, h)
}
// Equals returns true iff s equals other.
func (s BlobSet) Equals(other BlobSet) bool {
if len(s) != len(other) {
return false
}
for h := range s {
if _, ok := other[h]; !ok {
return false
}
}
return true
}
// Merge adds the blobs in other to the current set.
func (s BlobSet) Merge(other BlobSet) {
for h := range other {
s.Insert(h)
}
}
// Intersect returns a new set containing the handles that are present in both sets.
func (s BlobSet) Intersect(other BlobSet) (result BlobSet) {
result = NewBlobSet()
set1 := s
set2 := other
// iterate over the smaller set
if len(set2) < len(set1) {
set1, set2 = set2, set1
}
for h := range set1 {
if set2.Has(h) {
result.Insert(h)
}
}
return result
}
// Sub returns a new set containing all handles that are present in s but not in
// other.
func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
result = NewBlobSet()
for h := range s {
if !other.Has(h) {
result.Insert(h)
}
}
return result
}
// List returns a sorted slice of all BlobHandle in the set.
func (s BlobSet) List() BlobHandles {
list := make(BlobHandles, 0, len(s))
for h := range s {
list = append(list, h)
}
sort.Sort(list)
return list
}
func (s BlobSet) String() string {
str := s.List().String()
if len(str) < 2 {
return "{}"
}
return "{" + str[1:len(str)-1] + "}"
}

41
internal/blob_test.go Normal file
View File

@@ -0,0 +1,41 @@
package restic
import (
"encoding/json"
"testing"
)
var blobTypeJSON = []struct {
t BlobType
res string
}{
{DataBlob, `"data"`},
{TreeBlob, `"tree"`},
}
func TestBlobTypeJSON(t *testing.T) {
for _, test := range blobTypeJSON {
// test serialize
buf, err := json.Marshal(test.t)
if err != nil {
t.Error(err)
continue
}
if test.res != string(buf) {
t.Errorf("want %q, got %q", test.res, string(buf))
continue
}
// test unserialize
var v BlobType
err = json.Unmarshal([]byte(test.res), &v)
if err != nil {
t.Error(err)
continue
}
if test.t != v {
t.Errorf("want %v, got %v", test.t, v)
continue
}
}
}

21
internal/buffer.go Normal file
View File

@@ -0,0 +1,21 @@
package restic
import "restic/crypto"
// NewBlobBuffer returns a buffer that is large enough to hold a blob of size
// plaintext bytes, including the crypto overhead.
func NewBlobBuffer(size int) []byte {
return make([]byte, size, size+crypto.Extension)
}
// PlaintextLength returns the plaintext length of a blob with ciphertextSize
// bytes.
func PlaintextLength(ciphertextSize int) int {
return ciphertextSize - crypto.Extension
}
// CiphertextLength returns the encrypted length of a blob with plaintextSize
// bytes.
func CiphertextLength(plaintextSize int) int {
return plaintextSize + crypto.Extension
}

795
internal/checker/checker.go Normal file
View File

@@ -0,0 +1,795 @@
package checker
import (
"context"
"crypto/sha256"
"fmt"
"io"
"os"
"sync"
"restic/errors"
"restic/fs"
"restic/hashing"
"restic"
"restic/debug"
"restic/pack"
"restic/repository"
)
// Checker runs various checks on a repository. It is advisable to create an
// exclusive Lock in the repository before running any checks.
//
// A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct {
packs restic.IDSet
blobs restic.IDSet
blobRefs struct {
sync.Mutex
M map[restic.ID]uint
}
indexes map[restic.ID]*repository.Index
orphanedPacks restic.IDs
masterIndex *repository.MasterIndex
repo restic.Repository
}
// New returns a new checker which runs on repo.
func New(repo restic.Repository) *Checker {
c := &Checker{
packs: restic.NewIDSet(),
blobs: restic.NewIDSet(),
masterIndex: repository.NewMasterIndex(),
indexes: make(map[restic.ID]*repository.Index),
repo: repo,
}
c.blobRefs.M = make(map[restic.ID]uint)
return c
}
const defaultParallelism = 40
// ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct {
PackID restic.ID
Indexes restic.IDSet
}
func (e ErrDuplicatePacks) Error() string {
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes)
}
// ErrOldIndexFormat is returned when an index with the old format is
// found.
type ErrOldIndexFormat struct {
restic.ID
}
func (err ErrOldIndexFormat) Error() string {
return fmt.Sprintf("index %v has old format", err.ID.Str())
}
// LoadIndex loads all index files.
func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
debug.Log("Start")
type indexRes struct {
Index *repository.Index
err error
ID string
}
indexCh := make(chan indexRes)
worker := func(ctx context.Context, id restic.ID) error {
debug.Log("worker got index %v", id)
idx, err := repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeIndex)
if errors.Cause(err) == repository.ErrOldIndexFormat {
debug.Log("index %v has old format", id.Str())
hints = append(hints, ErrOldIndexFormat{id})
idx, err = repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeOldIndex)
}
err = errors.Wrapf(err, "error loading index %v", id.Str())
select {
case indexCh <- indexRes{Index: idx, ID: id.String(), err: err}:
case <-ctx.Done():
}
return nil
}
go func() {
defer close(indexCh)
debug.Log("start loading indexes in parallel")
err := repository.FilesInParallel(ctx, c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker))
debug.Log("loading indexes finished, error: %v", err)
if err != nil {
panic(err)
}
}()
done := make(chan struct{})
defer close(done)
packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh {
debug.Log("process index %v, err %v", res.ID, res.err)
if res.err != nil {
errs = append(errs, res.err)
continue
}
idxID, err := restic.ParseID(res.ID)
if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
continue
}
c.indexes[idxID] = res.Index
c.masterIndex.Insert(res.Index)
debug.Log("process blobs")
cnt := 0
for blob := range res.Index.Each(ctx) {
c.packs.Insert(blob.PackID)
c.blobs.Insert(blob.ID)
c.blobRefs.M[blob.ID] = 0
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = restic.NewIDSet()
}
packToIndex[blob.PackID].Insert(idxID)
}
debug.Log("%d blobs processed", cnt)
}
debug.Log("checking for duplicate packs")
for packID := range c.packs {
debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 {
hints = append(hints, ErrDuplicatePacks{
PackID: packID,
Indexes: packToIndex[packID],
})
}
}
c.repo.SetIndex(c.masterIndex)
return hints, errs
}
// PackError describes an error with a specific pack.
type PackError struct {
ID restic.ID
Orphaned bool
Err error
}
func (e PackError) Error() string {
return "pack " + e.ID.String() + ": " + e.Err.Error()
}
func packIDTester(ctx context.Context, repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup) {
debug.Log("worker start")
defer debug.Log("worker done")
defer wg.Done()
for id := range inChan {
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
ok, err := repo.Backend().Test(ctx, h)
if err != nil {
err = PackError{ID: id, Err: err}
} else {
if !ok {
err = PackError{ID: id, Err: errors.New("does not exist")}
}
}
if err != nil {
debug.Log("error checking for pack %s: %v", id.Str(), err)
select {
case <-ctx.Done():
return
case errChan <- err:
}
continue
}
debug.Log("pack %s exists", id.Str())
}
}
// Packs checks that all packs referenced in the index are still available and
// there are no packs that aren't in an index. errChan is closed after all
// packs have been checked.
func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
defer close(errChan)
debug.Log("checking for %d packs", len(c.packs))
seenPacks := restic.NewIDSet()
var workerWG sync.WaitGroup
IDChan := make(chan restic.ID)
for i := 0; i < defaultParallelism; i++ {
workerWG.Add(1)
go packIDTester(ctx, c.repo, IDChan, errChan, &workerWG)
}
for id := range c.packs {
seenPacks.Insert(id)
IDChan <- id
}
close(IDChan)
debug.Log("waiting for %d workers to terminate", defaultParallelism)
workerWG.Wait()
debug.Log("workers terminated")
for id := range c.repo.List(ctx, restic.DataFile) {
debug.Log("check data blob %v", id.Str())
if !seenPacks.Has(id) {
c.orphanedPacks = append(c.orphanedPacks, id)
select {
case <-ctx.Done():
return
case errChan <- PackError{ID: id, Orphaned: true, Err: errors.New("not referenced in any index")}:
}
}
}
}
// Error is an error that occurred while checking a repository.
type Error struct {
TreeID restic.ID
BlobID restic.ID
Err error
}
func (e Error) Error() string {
if !e.BlobID.IsNull() && !e.TreeID.IsNull() {
msg := "tree " + e.TreeID.Str()
msg += ", blob " + e.BlobID.Str()
msg += ": " + e.Err.Error()
return msg
}
if !e.TreeID.IsNull() {
return "tree " + e.TreeID.Str() + ": " + e.Err.Error()
}
return e.Err.Error()
}
func loadTreeFromSnapshot(ctx context.Context, repo restic.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(ctx, repo, id)
if err != nil {
debug.Log("error loading snapshot %v: %v", id.Str(), err)
return restic.ID{}, err
}
if sn.Tree == nil {
debug.Log("snapshot %v has no tree", id.Str())
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
}
return *sn.Tree, nil
}
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(ctx context.Context, repo restic.Repository) (restic.IDs, []error) {
var trees struct {
IDs restic.IDs
sync.Mutex
}
var errs struct {
errs []error
sync.Mutex
}
snapshotWorker := func(ctx context.Context, strID string) error {
id, err := restic.ParseID(strID)
if err != nil {
return err
}
debug.Log("load snapshot %v", id.Str())
treeID, err := loadTreeFromSnapshot(ctx, repo, id)
if err != nil {
errs.Lock()
errs.errs = append(errs.errs, err)
errs.Unlock()
return nil
}
debug.Log("snapshot %v has tree %v", id.Str(), treeID.Str())
trees.Lock()
trees.IDs = append(trees.IDs, treeID)
trees.Unlock()
return nil
}
err := repository.FilesInParallel(ctx, repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
if err != nil {
errs.errs = append(errs.errs, err)
}
return trees.IDs, errs.errs
}
// TreeError collects several errors that occurred while processing a tree.
type TreeError struct {
ID restic.ID
Errors []error
}
func (e TreeError) Error() string {
return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors)
}
type treeJob struct {
restic.ID
error
*restic.Tree
}
// loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(ctx context.Context, repo restic.Repository,
in <-chan restic.ID, out chan<- treeJob,
wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
job treeJob
)
outCh = nil
for {
select {
case <-ctx.Done():
return
case treeID, ok := <-inCh:
if !ok {
return
}
debug.Log("load tree %v", treeID.Str())
tree, err := repo.LoadTree(ctx, treeID)
debug.Log("load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
job = treeJob{ID: treeID, error: err, Tree: tree}
outCh = out
inCh = nil
case outCh <- job:
debug.Log("sent tree %v", job.ID.Str())
outCh = nil
inCh = in
}
}
}
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(ctx context.Context, in <-chan treeJob, out chan<- error, wg *sync.WaitGroup) {
defer func() {
debug.Log("exiting")
wg.Done()
}()
var (
inCh = in
outCh = out
treeError TreeError
)
outCh = nil
for {
select {
case <-ctx.Done():
debug.Log("done channel closed, exiting")
return
case job, ok := <-inCh:
if !ok {
debug.Log("input channel closed, exiting")
return
}
id := job.ID
alreadyChecked := false
c.blobRefs.Lock()
if c.blobRefs.M[id] > 0 {
alreadyChecked = true
}
c.blobRefs.M[id]++
debug.Log("tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
c.blobRefs.Unlock()
if alreadyChecked {
continue
}
debug.Log("check tree %v (tree %v, err %v)", job.ID.Str(), job.Tree, job.error)
var errs []error
if job.error != nil {
errs = append(errs, job.error)
} else {
errs = c.checkTree(job.ID, job.Tree)
}
if len(errs) > 0 {
debug.Log("checked tree %v: %v errors", job.ID.Str(), len(errs))
treeError = TreeError{ID: job.ID, Errors: errs}
outCh = out
inCh = nil
}
case outCh <- treeError:
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
outCh = nil
inCh = in
}
}
}
func filterTrees(ctx context.Context, backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob) {
defer func() {
debug.Log("closing output channels")
close(loaderChan)
close(out)
}()
var (
inCh = in
outCh = out
loadCh = loaderChan
job treeJob
nextTreeID restic.ID
outstandingLoadTreeJobs = 0
)
outCh = nil
loadCh = nil
for {
if loadCh == nil && len(backlog) > 0 {
loadCh = loaderChan
nextTreeID, backlog = backlog[0], backlog[1:]
}
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
debug.Log("backlog is empty, all channels nil, exiting")
return
}
select {
case <-ctx.Done():
return
case loadCh <- nextTreeID:
outstandingLoadTreeJobs++
loadCh = nil
case j, ok := <-inCh:
if !ok {
debug.Log("input channel closed")
inCh = nil
in = nil
continue
}
outstandingLoadTreeJobs--
debug.Log("input job tree %v", j.ID.Str())
var err error
if j.error != nil {
debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID.Str())
} else if j.Tree == nil {
debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID.Str())
err = errors.New("tree is nil and error is nil")
} else {
debug.Log("subtrees for tree %v: %v", j.ID.Str(), j.Tree.Subtrees())
for _, id := range j.Tree.Subtrees() {
if id.IsNull() {
// We do not need to raise this error here, it is
// checked when the tree is checked. Just make sure
// that we do not add any null IDs to the backlog.
debug.Log("tree %v has nil subtree", j.ID.Str())
continue
}
backlog = append(backlog, id)
}
}
if err != nil {
// send a new job with the new error instead of the old one
j = treeJob{ID: j.ID, error: err}
}
job = j
outCh = out
inCh = nil
case outCh <- job:
debug.Log("tree sent to check: %v", job.ID.Str())
outCh = nil
inCh = in
}
}
}
// Structure checks that for all snapshots all referenced data blobs and
// subtrees are available in the index. errChan is closed after all trees have
// been traversed.
func (c *Checker) Structure(ctx context.Context, errChan chan<- error) {
defer close(errChan)
trees, errs := loadSnapshotTreeIDs(ctx, c.repo)
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
for _, err := range errs {
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob)
var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ {
wg.Add(2)
go loadTreeWorker(ctx, c.repo, treeIDChan, treeJobChan1, &wg)
go c.checkTreeWorker(ctx, treeJobChan2, errChan, &wg)
}
filterTrees(ctx, trees, treeIDChan, treeJobChan1, treeJobChan2)
wg.Wait()
}
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("checking tree %v", id.Str())
var blobs []restic.ID
for _, node := range tree.Nodes {
switch node.Type {
case "file":
if node.Content == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
}
for b, blobID := range node.Content {
if blobID.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
continue
}
blobs = append(blobs, blobID)
}
case "dir":
if node.Subtree == nil {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
continue
}
if node.Subtree.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
continue
}
case "symlink", "socket", "chardev", "dev", "fifo":
// nothing to check
default:
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
}
if node.Name == "" {
errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")})
}
}
for _, blobID := range blobs {
c.blobRefs.Lock()
c.blobRefs.M[blobID]++
debug.Log("blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
c.blobRefs.Unlock()
if !c.blobs.Has(blobID) {
debug.Log("tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
}
}
return errs
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
debug.Log("checking %d blobs", len(c.blobs))
for id := range c.blobs {
if c.blobRefs.M[id] == 0 {
debug.Log("blob %v not referenced", id.Str())
blobs = append(blobs, id)
}
}
return blobs
}
// CountPacks returns the number of packs in the repository.
func (c *Checker) CountPacks() uint64 {
return uint64(len(c.packs))
}
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(ctx context.Context, r restic.Repository, id restic.ID) error {
debug.Log("checking pack %v", id.Str())
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
rd, err := r.Backend().Load(ctx, h, 0, 0)
if err != nil {
return err
}
packfile, err := fs.TempFile("", "restic-temp-check-")
if err != nil {
return errors.Wrap(err, "TempFile")
}
defer func() {
packfile.Close()
os.Remove(packfile.Name())
}()
hrd := hashing.NewReader(rd, sha256.New())
size, err := io.Copy(packfile, hrd)
if err != nil {
return errors.Wrap(err, "Copy")
}
if err = rd.Close(); err != nil {
return err
}
hash := restic.IDFromHash(hrd.Sum(nil))
debug.Log("hash for pack %v is %v", id.Str(), hash.Str())
if !hash.Equal(id) {
debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
}
blobs, err := pack.List(r.Key(), packfile, size)
if err != nil {
return err
}
var errs []error
var buf []byte
for i, blob := range blobs {
debug.Log(" check blob %d: %v", i, blob)
buf = buf[:cap(buf)]
if uint(len(buf)) < blob.Length {
buf = make([]byte, blob.Length)
}
buf = buf[:blob.Length]
_, err := packfile.Seek(int64(blob.Offset), 0)
if err != nil {
return errors.Errorf("Seek(%v): %v", blob.Offset, err)
}
_, err = io.ReadFull(packfile, buf)
if err != nil {
debug.Log(" error loading blob %v: %v", blob.ID.Str(), err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
n, err := r.Key().Decrypt(buf, buf)
if err != nil {
debug.Log(" error decrypting blob %v: %v", blob.ID.Str(), err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
buf = buf[:n]
hash := restic.Hash(buf)
if !hash.Equal(blob.ID) {
debug.Log(" Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
continue
}
}
if len(errs) > 0 {
return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
}
return nil
}
// ReadData loads all data from the repository and checks the integrity.
func (c *Checker) ReadData(ctx context.Context, p *restic.Progress, errChan chan<- error) {
defer close(errChan)
p.Start()
defer p.Done()
worker := func(wg *sync.WaitGroup, in <-chan restic.ID) {
defer wg.Done()
for {
var id restic.ID
var ok bool
select {
case <-ctx.Done():
return
case id, ok = <-in:
if !ok {
return
}
}
err := checkPack(ctx, c.repo, id)
p.Report(restic.Stat{Blobs: 1})
if err == nil {
continue
}
select {
case <-ctx.Done():
return
case errChan <- err:
}
}
}
ch := c.repo.List(ctx, restic.DataFile)
var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ {
wg.Add(1)
go worker(&wg, ch)
}
wg.Wait()
}

View File

@@ -0,0 +1,370 @@
package checker_test
import (
"context"
"io"
"math/rand"
"path/filepath"
"sort"
"testing"
"restic"
"restic/archiver"
"restic/checker"
"restic/repository"
"restic/test"
)
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func collectErrors(ctx context.Context, f func(context.Context, chan<- error)) (errs []error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
errChan := make(chan error)
go f(ctx, errChan)
for err := range errChan {
errs = append(errs, err)
}
return errs
}
func checkPacks(chkr *checker.Checker) []error {
return collectErrors(context.TODO(), chkr.Packs)
}
func checkStruct(chkr *checker.Checker) []error {
return collectErrors(context.TODO(), chkr.Structure)
}
func checkData(chkr *checker.Checker) []error {
return collectErrors(
context.TODO(),
func(ctx context.Context, errCh chan<- error) {
chkr.ReadData(ctx, nil, errCh)
},
)
}
func TestCheckRepo(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
}
func TestMissingPack(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
packHandle := restic.Handle{
Type: restic.DataFile,
Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6",
}
test.OK(t, repo.Backend().Remove(context.TODO(), packHandle))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
errs = checkPacks(chkr)
test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok {
test.Equals(t, packHandle.Name, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
}
func TestUnreferencedPack(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
// index 3f1a only references pack 60e0
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
indexHandle := restic.Handle{
Type: restic.IndexFile,
Name: "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44",
}
test.OK(t, repo.Backend().Remove(context.TODO(), indexHandle))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
errs = checkPacks(chkr)
test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok {
test.Equals(t, packID, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
}
func TestUnreferencedBlobs(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
snapshotHandle := restic.Handle{
Type: restic.SnapshotFile,
Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02",
}
test.OK(t, repo.Backend().Remove(context.TODO(), snapshotHandle))
unusedBlobsBySnapshot := restic.IDs{
restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"),
restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"),
restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"),
}
sort.Sort(unusedBlobsBySnapshot)
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
blobs := chkr.UnusedBlobs()
sort.Sort(blobs)
test.Equals(t, unusedBlobsBySnapshot, blobs)
}
func TestModifiedIndex(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
done := make(chan struct{})
defer close(done)
h := restic.Handle{
Type: restic.IndexFile,
Name: "90f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
f, err := repo.Backend().Load(context.TODO(), h, 0, 0)
test.OK(t, err)
// save the index again with a modified name so that the hash doesn't match
// the content any more
h2 := restic.Handle{
Type: restic.IndexFile,
Name: "80f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
err = repo.Backend().Save(context.TODO(), h2, f)
test.OK(t, err)
test.OK(t, f.Close())
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) == 0 {
t.Fatalf("expected errors not found")
}
for _, err := range errs {
t.Logf("found expected error %v", err)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
}
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
func TestDuplicatePacksInIndex(t *testing.T) {
repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(hints) == 0 {
t.Fatalf("did not get expected checker hints for duplicate packs in indexes")
}
found := false
for _, hint := range hints {
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
found = true
} else {
t.Errorf("got unexpected hint: %v", hint)
}
}
if !found {
t.Fatalf("did not find hint ErrDuplicatePacks")
}
if len(errs) > 0 {
t.Errorf("expected no errors, got %v: %v", len(errs), errs)
}
}
// errorBackend randomly modifies data after reading.
type errorBackend struct {
restic.Backend
ProduceErrors bool
}
func (b errorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
rd, err := b.Backend.Load(ctx, h, length, offset)
if err != nil {
return rd, err
}
if b.ProduceErrors {
return errorReadCloser{rd}, err
}
return rd, nil
}
type errorReadCloser struct {
io.ReadCloser
}
func (erd errorReadCloser) Read(p []byte) (int, error) {
n, err := erd.ReadCloser.Read(p)
if n > 0 {
induceError(p[:n])
}
return n, err
}
func (erd errorReadCloser) Close() error {
return erd.ReadCloser.Close()
}
// induceError flips a bit in the slice.
func induceError(data []byte) {
if rand.Float32() < 0.2 {
return
}
pos := rand.Intn(len(data))
data[pos] ^= 1
}
func TestCheckerModifiedData(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := archiver.New(repo)
_, id, err := arch.Snapshot(context.TODO(), nil, []string{"."}, nil, "localhost", nil)
test.OK(t, err)
t.Logf("archived as %v", id.Str())
beError := &errorBackend{Backend: repo.Backend()}
checkRepo := repository.New(beError)
test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5))
chkr := checker.New(checkRepo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
beError.ProduceErrors = true
errFound := false
for _, err := range checkPacks(chkr) {
t.Logf("pack error: %v", err)
}
for _, err := range checkStruct(chkr) {
t.Logf("struct error: %v", err)
}
for _, err := range checkData(chkr) {
t.Logf("data error: %v", err)
errFound = true
}
if !errFound {
t.Fatal("no error found, checker is broken")
}
}
func BenchmarkChecker(t *testing.B) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) > 0 {
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
test.OKs(t, checkPacks(chkr))
test.OKs(t, checkStruct(chkr))
test.OKs(t, checkData(chkr))
}
}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,51 @@
package checker
import (
"context"
"restic"
"testing"
)
// TestCheckRepo runs the checker on repo.
func TestCheckRepo(t testing.TB, repo restic.Repository) {
chkr := New(repo)
hints, errs := chkr.LoadIndex(context.TODO())
if len(errs) != 0 {
t.Fatalf("errors loading index: %v", errs)
}
if len(hints) != 0 {
t.Fatalf("errors loading index: %v", hints)
}
// packs
errChan := make(chan error)
go chkr.Packs(context.TODO(), errChan)
for err := range errChan {
t.Error(err)
}
// structure
errChan = make(chan error)
go chkr.Structure(context.TODO(), errChan)
for err := range errChan {
t.Error(err)
}
// unused blobs
blobs := chkr.UnusedBlobs()
if len(blobs) > 0 {
t.Errorf("unused blobs found: %v", blobs)
}
// read data
errChan = make(chan error)
go chkr.ReadData(context.TODO(), nil, errChan)
for err := range errChan {
t.Error(err)
}
}

80
internal/config.go Normal file
View File

@@ -0,0 +1,80 @@
package restic
import (
"context"
"testing"
"restic/errors"
"restic/debug"
"github.com/restic/chunker"
)
// Config contains the configuration for a repository.
type Config struct {
Version uint `json:"version"`
ID string `json:"id"`
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
}
// RepoVersion is the version that is written to the config when a repository
// is newly created with Init().
const RepoVersion = 1
// JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface {
LoadJSONUnpacked(context.Context, FileType, ID, interface{}) error
}
// CreateConfig creates a config file with a randomly selected polynomial and
// ID.
func CreateConfig() (Config, error) {
var (
err error
cfg Config
)
cfg.ChunkerPolynomial, err = chunker.RandomPolynomial()
if err != nil {
return Config{}, errors.Wrap(err, "chunker.RandomPolynomial")
}
cfg.ID = NewRandomID().String()
cfg.Version = RepoVersion
debug.Log("New config: %#v", cfg)
return cfg, nil
}
// TestCreateConfig creates a config for use within tests.
func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) {
cfg.ChunkerPolynomial = pol
cfg.ID = NewRandomID().String()
cfg.Version = RepoVersion
return cfg
}
// LoadConfig returns loads, checks and returns the config for a repository.
func LoadConfig(ctx context.Context, r JSONUnpackedLoader) (Config, error) {
var (
cfg Config
)
err := r.LoadJSONUnpacked(ctx, ConfigFile, ID{}, &cfg)
if err != nil {
return Config{}, err
}
if cfg.Version != RepoVersion {
return Config{}, errors.New("unsupported repository version")
}
if !cfg.ChunkerPolynomial.Irreducible() {
return Config{}, errors.New("invalid chunker polynomial")
}
return cfg, nil
}

55
internal/config_test.go Normal file
View File

@@ -0,0 +1,55 @@
package restic_test
import (
"context"
"restic"
"testing"
. "restic/test"
)
type saver func(restic.FileType, interface{}) (restic.ID, error)
func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) {
return s(t, arg)
}
type loader func(context.Context, restic.FileType, restic.ID, interface{}) error
func (l loader) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, arg interface{}) error {
return l(ctx, t, id, arg)
}
func TestConfig(t *testing.T) {
resultConfig := restic.Config{}
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(restic.Config)
resultConfig = cfg
return restic.ID{}, nil
}
cfg1, err := restic.CreateConfig()
OK(t, err)
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
load := func(ctx context.Context, tpe restic.FileType, id restic.ID, arg interface{}) error {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(*restic.Config)
*cfg = resultConfig
return nil
}
cfg2, err := restic.LoadConfig(context.TODO(), loader(load))
OK(t, err)
Assert(t, cfg1 == cfg2,
"configs aren't equal: %v != %v", cfg1, cfg2)
}

331
internal/crypto/crypto.go Normal file
View File

@@ -0,0 +1,331 @@
package crypto
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/json"
"fmt"
"restic/errors"
"golang.org/x/crypto/poly1305"
)
const (
aesKeySize = 32 // for AES-256
macKeySizeK = 16 // for AES-128
macKeySizeR = 16 // for Poly1305
macKeySize = macKeySizeK + macKeySizeR // for Poly1305-AES128
ivSize = aes.BlockSize
macSize = poly1305.TagSize
// Extension is the number of bytes a plaintext is enlarged by encrypting it.
Extension = ivSize + macSize
)
var (
// ErrUnauthenticated is returned when ciphertext verification has failed.
ErrUnauthenticated = errors.New("ciphertext verification failed")
)
// Key holds encryption and message authentication keys for a repository. It is stored
// encrypted and authenticated as a JSON data structure in the Data field of the Key
// structure.
type Key struct {
MACKey `json:"mac"`
EncryptionKey `json:"encrypt"`
}
// EncryptionKey is key used for encryption
type EncryptionKey [32]byte
// MACKey is used to sign (authenticate) data.
type MACKey struct {
K [16]byte // for AES-128
R [16]byte // for Poly1305
masked bool // remember if the MAC key has already been masked
}
// mask for key, (cf. http://cr.yp.to/mac/poly1305-20050329.pdf)
var poly1305KeyMask = [16]byte{
0xff,
0xff,
0xff,
0x0f, // 3: top four bits zero
0xfc, // 4: bottom two bits zero
0xff,
0xff,
0x0f, // 7: top four bits zero
0xfc, // 8: bottom two bits zero
0xff,
0xff,
0x0f, // 11: top four bits zero
0xfc, // 12: bottom two bits zero
0xff,
0xff,
0x0f, // 15: top four bits zero
}
func poly1305MAC(msg []byte, nonce []byte, key *MACKey) []byte {
k := poly1305PrepareKey(nonce, key)
var out [16]byte
poly1305.Sum(&out, msg, &k)
return out[:]
}
// mask poly1305 key
func maskKey(k *MACKey) {
if k == nil || k.masked {
return
}
for i := 0; i < poly1305.TagSize; i++ {
k.R[i] = k.R[i] & poly1305KeyMask[i]
}
k.masked = true
}
// construct mac key from slice (k||r), with masking
func macKeyFromSlice(mk *MACKey, data []byte) {
copy(mk.K[:], data[:16])
copy(mk.R[:], data[16:32])
maskKey(mk)
}
// prepare key for low-level poly1305.Sum(): r||n
func poly1305PrepareKey(nonce []byte, key *MACKey) [32]byte {
var k [32]byte
maskKey(key)
cipher, err := aes.NewCipher(key.K[:])
if err != nil {
panic(err)
}
cipher.Encrypt(k[16:], nonce[:])
copy(k[:16], key.R[:])
return k
}
func poly1305Verify(msg []byte, nonce []byte, key *MACKey, mac []byte) bool {
k := poly1305PrepareKey(nonce, key)
var m [16]byte
copy(m[:], mac)
return poly1305.Verify(&m, msg, &k)
}
// NewRandomKey returns new encryption and message authentication keys.
func NewRandomKey() *Key {
k := &Key{}
n, err := rand.Read(k.EncryptionKey[:])
if n != aesKeySize || err != nil {
panic("unable to read enough random bytes for encryption key")
}
n, err = rand.Read(k.MACKey.K[:])
if n != macKeySizeK || err != nil {
panic("unable to read enough random bytes for MAC encryption key")
}
n, err = rand.Read(k.MACKey.R[:])
if n != macKeySizeR || err != nil {
panic("unable to read enough random bytes for MAC key")
}
maskKey(&k.MACKey)
return k
}
func newIV() []byte {
iv := make([]byte, ivSize)
n, err := rand.Read(iv)
if n != ivSize || err != nil {
panic("unable to read enough random bytes for iv")
}
return iv
}
type jsonMACKey struct {
K []byte `json:"k"`
R []byte `json:"r"`
}
// MarshalJSON converts the MACKey to JSON.
func (m *MACKey) MarshalJSON() ([]byte, error) {
return json.Marshal(jsonMACKey{K: m.K[:], R: m.R[:]})
}
// UnmarshalJSON fills the key m with data from the JSON representation.
func (m *MACKey) UnmarshalJSON(data []byte) error {
j := jsonMACKey{}
err := json.Unmarshal(data, &j)
if err != nil {
return errors.Wrap(err, "Unmarshal")
}
copy(m.K[:], j.K)
copy(m.R[:], j.R)
return nil
}
// Valid tests whether the key k is valid (i.e. not zero).
func (m *MACKey) Valid() bool {
nonzeroK := false
for i := 0; i < len(m.K); i++ {
if m.K[i] != 0 {
nonzeroK = true
}
}
if !nonzeroK {
return false
}
for i := 0; i < len(m.R); i++ {
if m.R[i] != 0 {
return true
}
}
return false
}
// MarshalJSON converts the EncryptionKey to JSON.
func (k *EncryptionKey) MarshalJSON() ([]byte, error) {
return json.Marshal(k[:])
}
// UnmarshalJSON fills the key k with data from the JSON representation.
func (k *EncryptionKey) UnmarshalJSON(data []byte) error {
d := make([]byte, aesKeySize)
err := json.Unmarshal(data, &d)
if err != nil {
return errors.Wrap(err, "Unmarshal")
}
copy(k[:], d)
return nil
}
// Valid tests whether the key k is valid (i.e. not zero).
func (k *EncryptionKey) Valid() bool {
for i := 0; i < len(k); i++ {
if k[i] != 0 {
return true
}
}
return false
}
// ErrInvalidCiphertext is returned when trying to encrypt into the slice that
// holds the plaintext.
var ErrInvalidCiphertext = errors.New("invalid ciphertext, same slice used for plaintext")
// Encrypt encrypts and authenticates data. Stored in ciphertext is IV || Ciphertext ||
// MAC. Encrypt returns the new ciphertext slice, which is extended when
// necessary. ciphertext and plaintext may not point to (exactly) the same
// slice or non-intersecting slices.
func (k *Key) Encrypt(ciphertext []byte, plaintext []byte) ([]byte, error) {
if !k.Valid() {
return nil, errors.New("invalid key")
}
ciphertext = ciphertext[:cap(ciphertext)]
// test for same slice, if possible
if len(plaintext) > 0 && len(ciphertext) > 0 && &plaintext[0] == &ciphertext[0] {
return nil, ErrInvalidCiphertext
}
// extend ciphertext slice if necessary
if len(ciphertext) < len(plaintext)+Extension {
ext := len(plaintext) + Extension - cap(ciphertext)
ciphertext = append(ciphertext, make([]byte, ext)...)
ciphertext = ciphertext[:cap(ciphertext)]
}
iv := newIV()
c, err := aes.NewCipher(k.EncryptionKey[:])
if err != nil {
panic(fmt.Sprintf("unable to create cipher: %v", err))
}
e := cipher.NewCTR(c, iv[:])
e.XORKeyStream(ciphertext[ivSize:], plaintext)
copy(ciphertext, iv[:])
// truncate to only cover iv and actual ciphertext
ciphertext = ciphertext[:ivSize+len(plaintext)]
mac := poly1305MAC(ciphertext[ivSize:], ciphertext[:ivSize], &k.MACKey)
ciphertext = append(ciphertext, mac...)
return ciphertext, nil
}
// Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form
// IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the
// same slice.
func (k *Key) Decrypt(plaintext []byte, ciphertextWithMac []byte) (int, error) {
if !k.Valid() {
return 0, errors.New("invalid key")
}
// check for plausible length
if len(ciphertextWithMac) < ivSize+macSize {
panic("trying to decrypt invalid data: ciphertext too small")
}
// check buffer length for plaintext
plaintextLength := len(ciphertextWithMac) - ivSize - macSize
if len(plaintext) < plaintextLength {
return 0, errors.Errorf("plaintext buffer too small, %d < %d", len(plaintext), plaintextLength)
}
// extract mac
l := len(ciphertextWithMac) - macSize
ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:]
// verify mac
if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &k.MACKey, mac) {
return 0, ErrUnauthenticated
}
// extract iv
iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:]
if len(ciphertext) != plaintextLength {
return 0, errors.Errorf("plaintext and ciphertext lengths do not match: %d != %d", len(ciphertext), plaintextLength)
}
// decrypt data
c, err := aes.NewCipher(k.EncryptionKey[:])
if err != nil {
panic(fmt.Sprintf("unable to create cipher: %v", err))
}
// decrypt
e := cipher.NewCTR(c, iv)
plaintext = plaintext[:len(ciphertext)]
e.XORKeyStream(plaintext, ciphertext)
return plaintextLength, nil
}
// Valid tests if the key is valid.
func (k *Key) Valid() bool {
return k.EncryptionKey.Valid() && k.MACKey.Valid()
}

View File

@@ -0,0 +1,139 @@
package crypto
import (
"bytes"
"encoding/hex"
"testing"
)
// test vectors from http://cr.yp.to/mac/poly1305-20050329.pdf
var poly1305Tests = []struct {
msg []byte
r []byte
k []byte
nonce []byte
mac []byte
}{
{
[]byte("\xf3\xf6"),
[]byte("\x85\x1f\xc4\x0c\x34\x67\xac\x0b\xe0\x5c\xc2\x04\x04\xf3\xf7\x00"),
[]byte("\xec\x07\x4c\x83\x55\x80\x74\x17\x01\x42\x5b\x62\x32\x35\xad\xd6"),
[]byte("\xfb\x44\x73\x50\xc4\xe8\x68\xc5\x2a\xc3\x27\x5c\xf9\xd4\x32\x7e"),
[]byte("\xf4\xc6\x33\xc3\x04\x4f\xc1\x45\xf8\x4f\x33\x5c\xb8\x19\x53\xde"),
},
{
[]byte(""),
[]byte("\xa0\xf3\x08\x00\x00\xf4\x64\x00\xd0\xc7\xe9\x07\x6c\x83\x44\x03"),
[]byte("\x75\xde\xaa\x25\xc0\x9f\x20\x8e\x1d\xc4\xce\x6b\x5c\xad\x3f\xbf"),
[]byte("\x61\xee\x09\x21\x8d\x29\xb0\xaa\xed\x7e\x15\x4a\x2c\x55\x09\xcc"),
[]byte("\xdd\x3f\xab\x22\x51\xf1\x1a\xc7\x59\xf0\x88\x71\x29\xcc\x2e\xe7"),
},
{
[]byte("\x66\x3c\xea\x19\x0f\xfb\x83\xd8\x95\x93\xf3\xf4\x76\xb6\xbc\x24\xd7\xe6\x79\x10\x7e\xa2\x6a\xdb\x8c\xaf\x66\x52\xd0\x65\x61\x36"),
[]byte("\x48\x44\x3d\x0b\xb0\xd2\x11\x09\xc8\x9a\x10\x0b\x5c\xe2\xc2\x08"),
[]byte("\x6a\xcb\x5f\x61\xa7\x17\x6d\xd3\x20\xc5\xc1\xeb\x2e\xdc\xdc\x74"),
[]byte("\xae\x21\x2a\x55\x39\x97\x29\x59\x5d\xea\x45\x8b\xc6\x21\xff\x0e"),
[]byte("\x0e\xe1\xc1\x6b\xb7\x3f\x0f\x4f\xd1\x98\x81\x75\x3c\x01\xcd\xbe"),
}, {
[]byte("\xab\x08\x12\x72\x4a\x7f\x1e\x34\x27\x42\xcb\xed\x37\x4d\x94\xd1\x36\xc6\xb8\x79\x5d\x45\xb3\x81\x98\x30\xf2\xc0\x44\x91\xfa\xf0\x99\x0c\x62\xe4\x8b\x80\x18\xb2\xc3\xe4\xa0\xfa\x31\x34\xcb\x67\xfa\x83\xe1\x58\xc9\x94\xd9\x61\xc4\xcb\x21\x09\x5c\x1b\xf9"),
[]byte("\x12\x97\x6a\x08\xc4\x42\x6d\x0c\xe8\xa8\x24\x07\xc4\xf4\x82\x07"),
[]byte("\xe1\xa5\x66\x8a\x4d\x5b\x66\xa5\xf6\x8c\xc5\x42\x4e\xd5\x98\x2d"),
[]byte("\x9a\xe8\x31\xe7\x43\x97\x8d\x3a\x23\x52\x7c\x71\x28\x14\x9e\x3a"),
[]byte("\x51\x54\xad\x0d\x2c\xb2\x6e\x01\x27\x4f\xc5\x11\x48\x49\x1f\x1b"),
},
}
func TestPoly1305(t *testing.T) {
for _, test := range poly1305Tests {
key := &MACKey{}
copy(key.K[:], test.k)
copy(key.R[:], test.r)
mac := poly1305MAC(test.msg, test.nonce, key)
if !bytes.Equal(mac, test.mac) {
t.Fatalf("wrong mac calculated, want: %02x, got: %02x", test.mac, mac)
}
if !poly1305Verify(test.msg, test.nonce, key, test.mac) {
t.Fatalf("mac does not verify: mac: %02x", test.mac)
}
}
}
var testValues = []struct {
ekey EncryptionKey
skey MACKey
ciphertext []byte
plaintext []byte
}{
{
ekey: EncryptionKey([...]byte{
0x30, 0x3e, 0x86, 0x87, 0xb1, 0xd7, 0xdb, 0x18, 0x42, 0x1b, 0xdc, 0x6b, 0xb8, 0x58, 0x8c, 0xca,
0xda, 0xc4, 0xd5, 0x9e, 0xe8, 0x7b, 0x8f, 0xf7, 0x0c, 0x44, 0xe6, 0x35, 0x79, 0x0c, 0xaf, 0xef,
}),
skey: MACKey{
K: [...]byte{0xef, 0x4d, 0x88, 0x24, 0xcb, 0x80, 0xb2, 0xbc, 0xc5, 0xfb, 0xff, 0x8a, 0x9b, 0x12, 0xa4, 0x2c},
R: [...]byte{0xcc, 0x8d, 0x4b, 0x94, 0x8e, 0xe0, 0xeb, 0xfe, 0x1d, 0x41, 0x5d, 0xe9, 0x21, 0xd1, 0x03, 0x53},
},
ciphertext: decodeHex("69fb41c62d12def4593bd71757138606338f621aeaeb39da0fe4f99233f8037a54ea63338a813bcf3f75d8c3cc75dddf8750"),
plaintext: []byte("Dies ist ein Test!"),
},
}
func decodeHex(s string) []byte {
d, _ := hex.DecodeString(s)
return d
}
func TestCrypto(t *testing.T) {
msg := make([]byte, 0, 8*1024*1024) // use 8MiB for now
for _, tv := range testValues {
// test encryption
k := &Key{
EncryptionKey: tv.ekey,
MACKey: tv.skey,
}
msg, err := k.Encrypt(msg, tv.plaintext)
if err != nil {
t.Fatal(err)
}
// decrypt message
buf := make([]byte, len(tv.plaintext))
n, err := k.Decrypt(buf, msg)
if err != nil {
t.Fatal(err)
}
buf = buf[:n]
// change mac, this must fail
msg[len(msg)-8] ^= 0x23
if _, err = k.Decrypt(buf, msg); err != ErrUnauthenticated {
t.Fatal("wrong MAC value not detected")
}
// reset mac
msg[len(msg)-8] ^= 0x23
// tamper with message, this must fail
msg[16+5] ^= 0x85
if _, err = k.Decrypt(buf, msg); err != ErrUnauthenticated {
t.Fatal("tampered message not detected")
}
// test decryption
p := make([]byte, len(tv.ciphertext))
n, err = k.Decrypt(p, tv.ciphertext)
if err != nil {
t.Fatal(err)
}
p = p[:n]
if !bytes.Equal(p, tv.plaintext) {
t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p)
}
}
}

View File

@@ -0,0 +1,171 @@
package crypto_test
import (
"bytes"
"crypto/rand"
"io"
"testing"
"restic/crypto"
. "restic/test"
"github.com/restic/chunker"
)
const testLargeCrypto = false
func TestEncryptDecrypt(t *testing.T) {
k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if testLargeCrypto {
tests = append(tests, 7<<20+123)
}
for _, size := range tests {
data := Random(42, size)
buf := make([]byte, size+crypto.Extension)
ciphertext, err := k.Encrypt(buf, data)
OK(t, err)
Assert(t, len(ciphertext) == len(data)+crypto.Extension,
"ciphertext length does not match: want %d, got %d",
len(data)+crypto.Extension, len(ciphertext))
plaintext := make([]byte, len(ciphertext))
n, err := k.Decrypt(plaintext, ciphertext)
OK(t, err)
plaintext = plaintext[:n]
Assert(t, len(plaintext) == len(data),
"plaintext length does not match: want %d, got %d",
len(data), len(plaintext))
Equals(t, plaintext, data)
}
}
func TestSmallBuffer(t *testing.T) {
k := crypto.NewRandomKey()
size := 600
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
ciphertext := make([]byte, size/2)
ciphertext, err = k.Encrypt(ciphertext, data)
// this must extend the slice
Assert(t, cap(ciphertext) > size/2,
"expected extended slice, but capacity is only %d bytes",
cap(ciphertext))
// check for the correct plaintext
plaintext := make([]byte, len(ciphertext))
n, err := k.Decrypt(plaintext, ciphertext)
OK(t, err)
plaintext = plaintext[:n]
Assert(t, bytes.Equal(plaintext, data),
"wrong plaintext returned")
}
func TestSameBuffer(t *testing.T) {
k := crypto.NewRandomKey()
size := 600
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
ciphertext := make([]byte, 0, size+crypto.Extension)
ciphertext, err = k.Encrypt(ciphertext, data)
OK(t, err)
// use the same buffer for decryption
n, err := k.Decrypt(ciphertext, ciphertext)
OK(t, err)
ciphertext = ciphertext[:n]
Assert(t, bytes.Equal(ciphertext, data),
"wrong plaintext returned")
}
func TestCornerCases(t *testing.T) {
k := crypto.NewRandomKey()
// nil plaintext should encrypt to the empty string
// nil ciphertext should allocate a new slice for the ciphertext
c, err := k.Encrypt(nil, nil)
OK(t, err)
Assert(t, len(c) == crypto.Extension,
"wrong length returned for ciphertext, expected 0, got %d",
len(c))
// this should decrypt to nil
n, err := k.Decrypt(nil, c)
OK(t, err)
Equals(t, 0, n)
// test encryption for same slice, this should return an error
_, err = k.Encrypt(c, c)
Equals(t, crypto.ErrInvalidCiphertext, err)
}
func TestLargeEncrypt(t *testing.T) {
if !testLargeCrypto {
t.SkipNow()
}
k := crypto.NewRandomKey()
for _, size := range []int{chunker.MaxSize, chunker.MaxSize + 1, chunker.MaxSize + 1<<20} {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
ciphertext, err := k.Encrypt(make([]byte, size+crypto.Extension), data)
OK(t, err)
plaintext, err := k.Decrypt([]byte{}, ciphertext)
OK(t, err)
Equals(t, plaintext, data)
}
}
func BenchmarkEncrypt(b *testing.B) {
size := 8 << 20 // 8MiB
data := make([]byte, size)
k := crypto.NewRandomKey()
buf := make([]byte, len(data)+crypto.Extension)
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_, err := k.Encrypt(buf, data)
OK(b, err)
}
}
func BenchmarkDecrypt(b *testing.B) {
size := 8 << 20 // 8MiB
data := make([]byte, size)
k := crypto.NewRandomKey()
plaintext := make([]byte, size)
ciphertext := make([]byte, size+crypto.Extension)
ciphertext, err := k.Encrypt(ciphertext, data)
OK(b, err)
b.ResetTimer()
b.SetBytes(int64(size))
for i := 0; i < b.N; i++ {
_, err = k.Decrypt(plaintext, ciphertext)
OK(b, err)
}
}

2
internal/crypto/doc.go Normal file
View File

@@ -0,0 +1,2 @@
// Package crypto provides all cryptographic operations needed in restic.
package crypto

102
internal/crypto/kdf.go Normal file
View File

@@ -0,0 +1,102 @@
package crypto
import (
"crypto/rand"
"time"
"restic/errors"
sscrypt "github.com/elithrar/simple-scrypt"
"golang.org/x/crypto/scrypt"
)
const saltLength = 64
// KDFParams are the default parameters used for the key derivation function KDF().
type KDFParams struct {
N int
R int
P int
}
// DefaultKDFParams are the default parameters used for Calibrate and KDF().
var DefaultKDFParams = KDFParams{
N: sscrypt.DefaultParams.N,
R: sscrypt.DefaultParams.R,
P: sscrypt.DefaultParams.P,
}
// Calibrate determines new KDF parameters for the current hardware.
func Calibrate(timeout time.Duration, memory int) (KDFParams, error) {
defaultParams := sscrypt.Params{
N: DefaultKDFParams.N,
R: DefaultKDFParams.R,
P: DefaultKDFParams.P,
DKLen: sscrypt.DefaultParams.DKLen,
SaltLen: sscrypt.DefaultParams.SaltLen,
}
params, err := sscrypt.Calibrate(timeout, memory, defaultParams)
if err != nil {
return DefaultKDFParams, errors.Wrap(err, "scrypt.Calibrate")
}
return KDFParams{
N: params.N,
R: params.R,
P: params.P,
}, nil
}
// KDF derives encryption and message authentication keys from the password
// using the supplied parameters N, R and P and the Salt.
func KDF(p KDFParams, salt []byte, password string) (*Key, error) {
if len(salt) != saltLength {
return nil, errors.Errorf("scrypt() called with invalid salt bytes (len %d)", len(salt))
}
// make sure we have valid parameters
params := sscrypt.Params{
N: p.N,
R: p.R,
P: p.P,
DKLen: sscrypt.DefaultParams.DKLen,
SaltLen: len(salt),
}
if err := params.Check(); err != nil {
return nil, errors.Wrap(err, "Check")
}
derKeys := &Key{}
keybytes := macKeySize + aesKeySize
scryptKeys, err := scrypt.Key([]byte(password), salt, p.N, p.R, p.P, keybytes)
if err != nil {
return nil, errors.Wrap(err, "scrypt.Key")
}
if len(scryptKeys) != keybytes {
return nil, errors.Errorf("invalid numbers of bytes expanded from scrypt(): %d", len(scryptKeys))
}
// first 32 byte of scrypt output is the encryption key
copy(derKeys.EncryptionKey[:], scryptKeys[:aesKeySize])
// next 32 byte of scrypt output is the mac key, in the form k||r
macKeyFromSlice(&derKeys.MACKey, scryptKeys[aesKeySize:])
return derKeys, nil
}
// NewSalt returns new random salt bytes to use with KDF(). If NewSalt returns
// an error, this is a grave situation and the program must abort and terminate.
func NewSalt() ([]byte, error) {
buf := make([]byte, saltLength)
n, err := rand.Read(buf)
if n != saltLength || err != nil {
panic("unable to read enough random bytes for new salt")
}
return buf, nil
}

View File

@@ -0,0 +1,14 @@
package crypto
import (
"testing"
"time"
)
func TestCalibrate(t *testing.T) {
params, err := Calibrate(100*time.Millisecond, 50)
if err != nil {
t.Fatal(err)
}
t.Logf("testing calibrate, params after: %v", params)
}

202
internal/debug/debug.go Normal file
View File

@@ -0,0 +1,202 @@
// +build debug
package debug
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"restic/fs"
"runtime"
"strings"
"restic/errors"
)
var opts struct {
logger *log.Logger
funcs map[string]bool
files map[string]bool
}
// make sure that all the initialization happens before the init() functions
// are called, cf https://golang.org/ref/spec#Package_initialization
var _ = initDebug()
func initDebug() bool {
initDebugLogger()
initDebugTags()
fmt.Fprintf(os.Stderr, "debug enabled\n")
return true
}
func initDebugLogger() {
debugfile := os.Getenv("DEBUG_LOG")
if debugfile == "" {
return
}
fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile)
f, err := fs.OpenFile(debugfile, os.O_WRONLY|os.O_APPEND, 0600)
if err == nil {
_, err = f.Seek(2, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to seek to the end of %v: %v\n", debugfile, err)
os.Exit(3)
}
}
if err != nil && os.IsNotExist(errors.Cause(err)) {
f, err = fs.OpenFile(debugfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
}
if err != nil {
fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err)
os.Exit(2)
}
opts.logger = log.New(f, "", log.LstdFlags)
}
func parseFilter(envname string, pad func(string) string) map[string]bool {
filter := make(map[string]bool)
env := os.Getenv(envname)
if env == "" {
return filter
}
for _, fn := range strings.Split(env, ",") {
t := pad(strings.TrimSpace(fn))
val := true
if t[0] == '-' {
val = false
t = t[1:]
} else if t[0] == '+' {
val = true
t = t[1:]
}
// test pattern
_, err := path.Match(t, "")
if err != nil {
fmt.Fprintf(os.Stderr, "error: invalid pattern %q: %v\n", t, err)
os.Exit(5)
}
filter[t] = val
}
return filter
}
func padFunc(s string) string {
if s == "all" {
return s
}
return s
}
func padFile(s string) string {
if s == "all" {
return s
}
if !strings.Contains(s, "/") {
s = "*/" + s
}
if !strings.Contains(s, ":") {
s = s + ":*"
}
return s
}
func initDebugTags() {
opts.funcs = parseFilter("DEBUG_FUNCS", padFunc)
opts.files = parseFilter("DEBUG_FILES", padFile)
}
// taken from https://github.com/VividCortex/trace
func goroutineNum() int {
b := make([]byte, 20)
runtime.Stack(b, false)
var num int
fmt.Sscanf(string(b), "goroutine %d ", &num)
return num
}
// taken from https://github.com/VividCortex/trace
func getPosition() (fn, dir, file string, line int) {
pc, file, line, ok := runtime.Caller(2)
if !ok {
return "", "", "", 0
}
dirname, filename := filepath.Base(filepath.Dir(file)), filepath.Base(file)
Func := runtime.FuncForPC(pc)
return path.Base(Func.Name()), dirname, filename, line
}
func checkFilter(filter map[string]bool, key string) bool {
// check if key is enabled directly
if v, ok := filter[key]; ok {
return v
}
// check for globbing
for k, v := range filter {
if m, _ := path.Match(k, key); m {
return v
}
}
// check if tag "all" is enabled
if v, ok := filter["all"]; ok && v {
return true
}
return false
}
// Log prints a message to the debug log (if debug is enabled).
func Log(f string, args ...interface{}) {
fn, dir, file, line := getPosition()
goroutine := goroutineNum()
if len(f) == 0 || f[len(f)-1] != '\n' {
f += "\n"
}
pos := fmt.Sprintf("%s/%s:%d", dir, file, line)
formatString := fmt.Sprintf("%s\t%s\t%d\t%s", pos, fn, goroutine, f)
dbgprint := func() {
fmt.Fprintf(os.Stderr, formatString, args...)
}
if opts.logger != nil {
opts.logger.Printf(formatString, args...)
}
if checkFilter(opts.files, fmt.Sprintf("%s/%s:%d", dir, file, line)) {
dbgprint()
return
}
if checkFilter(opts.funcs, fn) {
dbgprint()
}
}

View File

@@ -0,0 +1,6 @@
// +build !debug
package debug
// Log prints a message to the debug log (if debug is enabled).
func Log(fmt string, args ...interface{}) {}

2
internal/debug/doc.go Normal file
View File

@@ -0,0 +1,2 @@
// Package debug provides an infrastructure for logging debug information and breakpoints.
package debug

28
internal/debug/hooks.go Normal file
View File

@@ -0,0 +1,28 @@
// +build !release
package debug
var (
hooks map[string]func(interface{})
)
func init() {
hooks = make(map[string]func(interface{}))
}
func Hook(name string, f func(interface{})) {
hooks[name] = f
}
func RunHook(name string, context interface{}) {
f, ok := hooks[name]
if !ok {
return
}
f(context)
}
func RemoveHook(name string) {
delete(hooks, name)
}

View File

@@ -0,0 +1,9 @@
// +build release
package debug
func Hook(name string, f func(interface{})) {}
func RunHook(name string, context interface{}) {}
func RemoveHook(name string) {}

View File

@@ -0,0 +1,94 @@
// +build debug
package debug
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"os"
"restic/errors"
)
type eofDetectRoundTripper struct {
http.RoundTripper
}
type eofDetectReader struct {
eofSeen bool
rd io.ReadCloser
}
func (rd *eofDetectReader) Read(p []byte) (n int, err error) {
n, err = rd.rd.Read(p)
if err == io.EOF {
rd.eofSeen = true
}
return n, err
}
func (rd *eofDetectReader) Close() error {
if !rd.eofSeen {
buf, err := ioutil.ReadAll(rd)
msg := fmt.Sprintf("body not drained, %d bytes not read", len(buf))
if err != nil {
msg += fmt.Sprintf(", error: %v", err)
}
if len(buf) > 0 {
if len(buf) > 20 {
buf = append(buf[:20], []byte("...")...)
}
msg += fmt.Sprintf(", body: %q", buf)
}
fmt.Fprintln(os.Stderr, msg)
Log("%s: %+v", msg, errors.New("Close()"))
}
return rd.rd.Close()
}
func (tr eofDetectRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
res, err = tr.RoundTripper.RoundTrip(req)
if res != nil && res.Body != nil {
res.Body = &eofDetectReader{rd: res.Body}
}
return res, err
}
type loggingRoundTripper struct {
http.RoundTripper
}
// RoundTripper returns a new http.RoundTripper which logs all requests (if
// debug is enabled). When debug is not enabled, upstream is returned.
func RoundTripper(upstream http.RoundTripper) http.RoundTripper {
return loggingRoundTripper{eofDetectRoundTripper{upstream}}
}
func (tr loggingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
trace, err := httputil.DumpRequestOut(req, false)
if err != nil {
Log("DumpRequestOut() error: %v\n", err)
} else {
Log("------------ HTTP REQUEST -----------\n%s", trace)
}
res, err = tr.RoundTripper.RoundTrip(req)
if err != nil {
Log("RoundTrip() returned error: %v", err)
}
if res != nil {
trace, err := httputil.DumpResponse(res, false)
if err != nil {
Log("DumpResponse() error: %v\n", err)
} else {
Log("------------ HTTP RESPONSE ----------\n%s", trace)
}
}
return res, err
}

View File

@@ -0,0 +1,11 @@
// +build !debug
package debug
import "net/http"
// RoundTripper returns a new http.RoundTripper which logs all requests (if
// debug is enabled). When debug is not enabled, upstream is returned.
func RoundTripper(upstream http.RoundTripper) http.RoundTripper {
return upstream
}

5
internal/doc.go Normal file
View File

@@ -0,0 +1,5 @@
// Package restic is the top level package for the restic backup program,
// please see https://github.com/restic/restic for more information.
//
// This package exposes the main objects that are handled in restic.
package restic

2
internal/errors/doc.go Normal file
View File

@@ -0,0 +1,2 @@
// Package errors provides custom error types used within restic.
package errors

38
internal/errors/fatal.go Normal file
View File

@@ -0,0 +1,38 @@
package errors
import "fmt"
// fatalError is an error that should be printed to the user, then the program
// should exit with an error code.
type fatalError string
func (e fatalError) Error() string {
return string(e)
}
func (e fatalError) Fatal() bool {
return true
}
// Fataler is an error which should be printed to the user directly.
// Afterwards, the program should exit with an error.
type Fataler interface {
Fatal() bool
}
// IsFatal returns true if err is a fatal message that should be printed to the
// user. Then, the program should exit.
func IsFatal(err error) bool {
e, ok := err.(Fataler)
return ok && e.Fatal()
}
// Fatal returns a wrapped error which implements the Fataler interface.
func Fatal(s string) error {
return Wrap(fatalError(s), "Fatal")
}
// Fatalf returns an error which implements the Fataler interface.
func Fatalf(s string, data ...interface{}) error {
return fatalError(fmt.Sprintf(s, data...))
}

24
internal/errors/wrap.go Normal file
View File

@@ -0,0 +1,24 @@
package errors
import "github.com/pkg/errors"
// Cause returns the cause of an error.
func Cause(err error) error {
return errors.Cause(err)
}
// New creates a new error based on message. Wrapped so that this package does
// not appear in the stack trace.
var New = errors.New
// Errorf creates an error based on a format string and values. Wrapped so that
// this package does not appear in the stack trace.
var Errorf = errors.Errorf
// Wrap wraps an error retrieved from outside of restic. Wrapped so that this
// package does not appear in the stack trace.
var Wrap = errors.Wrap
// Wrapf returns an error annotating err with the format specifier. If err is
// nil, Wrapf returns nil.
var Wrapf = errors.Wrapf

62
internal/file.go Normal file
View File

@@ -0,0 +1,62 @@
package restic
import (
"fmt"
"restic/errors"
)
// FileType is the type of a file in the backend.
type FileType string
// These are the different data types a backend can store.
const (
DataFile FileType = "data"
KeyFile = "key"
LockFile = "lock"
SnapshotFile = "snapshot"
IndexFile = "index"
ConfigFile = "config"
)
// Handle is used to store and access data in a backend.
type Handle struct {
Type FileType
Name string
}
func (h Handle) String() string {
name := h.Name
if len(name) > 10 {
name = name[:10]
}
return fmt.Sprintf("<%s/%s>", h.Type, name)
}
// Valid returns an error if h is not valid.
func (h Handle) Valid() error {
if h.Type == "" {
return errors.New("type is empty")
}
switch h.Type {
case DataFile:
case KeyFile:
case LockFile:
case SnapshotFile:
case IndexFile:
case ConfigFile:
default:
return errors.Errorf("invalid Type %q", h.Type)
}
if h.Type == ConfigFile {
return nil
}
if h.Name == "" {
return errors.New("invalid Name")
}
return nil
}

28
internal/file_test.go Normal file
View File

@@ -0,0 +1,28 @@
package restic
import "testing"
var handleTests = []struct {
h Handle
valid bool
}{
{Handle{Name: "foo"}, false},
{Handle{Type: "foobar"}, false},
{Handle{Type: ConfigFile, Name: ""}, true},
{Handle{Type: DataFile, Name: ""}, false},
{Handle{Type: "", Name: "x"}, false},
{Handle{Type: LockFile, Name: "010203040506"}, true},
}
func TestHandleValid(t *testing.T) {
for i, test := range handleTests {
err := test.h.Valid()
if err != nil && test.valid {
t.Errorf("test %v failed: error returned for valid handle: %v", i, err)
}
if !test.valid && err == nil {
t.Errorf("test %v failed: expected error for invalid handle not found", i)
}
}
}

5
internal/filter/doc.go Normal file
View File

@@ -0,0 +1,5 @@
// Package filter implements filters for files similar to filepath.Glob, but
// in contrast to filepath.Glob a pattern may specify directories.
//
// For a list of valid patterns please see the documentation on filepath.Glob.
package filter

122
internal/filter/filter.go Normal file
View File

@@ -0,0 +1,122 @@
package filter
import (
"path/filepath"
"strings"
"restic/errors"
)
// ErrBadString is returned when Match is called with the empty string as the
// second argument.
var ErrBadString = errors.New("filter.Match: string is empty")
// Match returns true if str matches the pattern. When the pattern is
// malformed, filepath.ErrBadPattern is returned. The empty pattern matches
// everything, when str is the empty string ErrBadString is returned.
//
// Pattern can be a combination of patterns suitable for filepath.Match, joined
// by filepath.Separator.
func Match(pattern, str string) (matched bool, err error) {
if pattern == "" {
return true, nil
}
pattern = filepath.Clean(pattern)
if str == "" {
return false, ErrBadString
}
// convert file path separator to '/'
if filepath.Separator != '/' {
pattern = strings.Replace(pattern, string(filepath.Separator), "/", -1)
str = strings.Replace(str, string(filepath.Separator), "/", -1)
}
patterns := strings.Split(pattern, "/")
strs := strings.Split(str, "/")
return match(patterns, strs)
}
func hasDoubleWildcard(list []string) (ok bool, pos int) {
for i, item := range list {
if item == "**" {
return true, i
}
}
return false, 0
}
func match(patterns, strs []string) (matched bool, err error) {
if ok, pos := hasDoubleWildcard(patterns); ok {
// gradually expand '**' into separate wildcards
for i := 0; i <= len(strs)-len(patterns)+1; i++ {
newPat := make([]string, pos)
copy(newPat, patterns[:pos])
for k := 0; k < i; k++ {
newPat = append(newPat, "*")
}
newPat = append(newPat, patterns[pos+1:]...)
matched, err := match(newPat, strs)
if err != nil {
return false, err
}
if matched {
return true, nil
}
}
return false, nil
}
if len(patterns) == 0 && len(strs) == 0 {
return true, nil
}
if len(patterns) <= len(strs) {
outer:
for offset := len(strs) - len(patterns); offset >= 0; offset-- {
for i := len(patterns) - 1; i >= 0; i-- {
ok, err := filepath.Match(patterns[i], strs[offset+i])
if err != nil {
return false, errors.Wrap(err, "Match")
}
if !ok {
continue outer
}
}
return true, nil
}
}
return false, nil
}
// List returns true if str matches one of the patterns. Empty patterns are
// ignored.
func List(patterns []string, str string) (matched bool, err error) {
for _, pat := range patterns {
if pat == "" {
continue
}
matched, err = Match(pat, str)
if err != nil {
return false, err
}
if matched {
return true, nil
}
}
return false, nil
}

View File

@@ -0,0 +1,288 @@
package filter_test
import (
"bufio"
"compress/bzip2"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"restic/filter"
)
var matchTests = []struct {
pattern string
path string
match bool
}{
{"", "", true},
{"", "foo", true},
{"", "/x/y/z/foo", true},
{"*.go", "/foo/bar/test.go", true},
{"*.c", "/foo/bar/test.go", false},
{"*", "/foo/bar/test.go", true},
{"foo*", "/foo/bar/test.go", true},
{"bar*", "/foo/bar/test.go", true},
{"/bar*", "/foo/bar/test.go", false},
{"bar/*", "/foo/bar/test.go", true},
{"baz/*", "/foo/bar/test.go", false},
{"bar/test.go", "/foo/bar/test.go", true},
{"bar/*.go", "/foo/bar/test.go", true},
{"ba*/*.go", "/foo/bar/test.go", true},
{"bb*/*.go", "/foo/bar/test.go", false},
{"test.*", "/foo/bar/test.go", true},
{"tesT.*", "/foo/bar/test.go", false},
{"bar/*", "/foo/bar/baz", true},
{"bar", "/foo/bar", true},
{"/foo/bar", "/foo/bar", true},
{"/foo/bar/", "/foo/bar", true},
{"/foo/bar", "/foo/baz", false},
{"/foo/bar", "/foo/baz/", false},
{"/foo///bar", "/foo/bar", true},
{"/foo/../bar", "/foo/bar", false},
{"/foo/../bar", "/bar", true},
{"/foo", "/foo/baz", true},
{"/foo/", "/foo/baz", true},
{"/foo/*", "/foo", false},
{"/foo/*", "/foo/baz", true},
{"bar", "/foo/bar/baz", true},
{"bar", "/foo/bar/test.go", true},
{"/foo/*test.*", "/foo/bar/test.go", false},
{"/foo/*/test.*", "/foo/bar/test.go", true},
{"/foo/*/bar/test.*", "/foo/bar/test.go", false},
{"/*/*/bar/test.*", "/foo/bar/test.go", false},
{"/*/*/bar/test.*", "/foo/bar/baz/test.go", false},
{"/*/*/baz/test.*", "/foo/bar/baz/test.go", true},
{"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false},
{"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false},
{"/foo/bar/test.*", "bar/baz/test.go", false},
{"/x/y/bar/baz/test.*", "bar/baz/test.go", false},
{"/x/y/bar/baz/test.c", "bar/baz/test.go", false},
{"baz/test.*", "bar/baz/test.go", true},
{"baz/tesT.*", "bar/baz/test.go", false},
{"test.go", "bar/baz/test.go", true},
{"*.go", "bar/baz/test.go", true},
{"*.c", "bar/baz/test.go", false},
{"sdk", "/foo/bar/sdk", true},
{"sdk", "/foo/bar/sdk/test/sdk_foo.go", true},
{
"sdk/*/cpp/*/*vars*.html",
"/usr/share/doc/libreoffice/sdk/docs/cpp/ref/a00517.html",
false,
},
{"foo/**/bar/*.go", "/home/user/foo/work/special/project/bar/test.go", true},
{"foo/**/bar/*.go", "/home/user/foo/bar/test.go", true},
{"foo/**/bar/*.go", "x/foo/bar/test.go", true},
{"foo/**/bar/*.go", "foo/bar/test.go", true},
{"foo/**/bar/*.go", "/home/user/foo/test.c", false},
{"foo/**/bar/*.go", "bar/foo/main.go", false},
{"foo/**/bar/*.go", "/foo/bar/main.go", true},
{"foo/**/bar/*.go", "bar/main.go", false},
{"foo/**/bar", "/home/user/foo/x/y/bar", true},
{"foo/**/bar", "/home/user/foo/x/y/bar/main.go", true},
{"user/**/important*", "/home/user/work/x/y/hidden/x", false},
{"user/**/hidden*/**/c", "/home/user/work/x/y/hidden/z/a/b/c", true},
{"c:/foo/*test.*", "c:/foo/bar/test.go", false},
{"c:/foo", "c:/foo/bar", true},
{"c:/foo/", "c:/foo/bar", true},
{"c:/foo/*/test.*", "c:/foo/bar/test.go", true},
{"c:/foo/*/bar/test.*", "c:/foo/bar/test.go", false},
}
func testpattern(t *testing.T, pattern, path string, shouldMatch bool) {
match, err := filter.Match(pattern, path)
if err != nil {
t.Errorf("test pattern %q failed: expected no error for path %q, but error returned: %v",
pattern, path, err)
}
if match != shouldMatch {
t.Errorf("test: filter.Match(%q, %q): expected %v, got %v",
pattern, path, shouldMatch, match)
}
}
func TestMatch(t *testing.T) {
for _, test := range matchTests {
testpattern(t, test.pattern, test.path, test.match)
// Test with native path separator
if filepath.Separator != '/' {
// Test with pattern as native
pattern := strings.Replace(test.pattern, "/", string(filepath.Separator), -1)
testpattern(t, pattern, test.path, test.match)
// Test with path as native
path := strings.Replace(test.path, "/", string(filepath.Separator), -1)
testpattern(t, test.pattern, path, test.match)
// Test with both pattern and path as native
testpattern(t, pattern, path, test.match)
}
}
}
func ExampleMatch() {
match, _ := filter.Match("*.go", "/home/user/file.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
func ExampleMatch_wildcards() {
match, _ := filter.Match("/home/[uU]ser/?.go", "/home/user/F.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
var filterListTests = []struct {
patterns []string
path string
match bool
}{
{[]string{"*.go"}, "/foo/bar/test.go", true},
{[]string{"*.c"}, "/foo/bar/test.go", false},
{[]string{"*.go", "*.c"}, "/foo/bar/test.go", true},
{[]string{"*"}, "/foo/bar/test.go", true},
{[]string{"x"}, "/foo/bar/test.go", false},
{[]string{"?"}, "/foo/bar/test.go", false},
{[]string{"?", "x"}, "/foo/bar/x", true},
{[]string{"/*/*/bar/test.*"}, "/foo/bar/test.go", false},
{[]string{"/*/*/bar/test.*", "*.go"}, "/foo/bar/test.go", true},
{[]string{"", "*.c"}, "/foo/bar/test.go", false},
}
func TestList(t *testing.T) {
for i, test := range filterListTests {
match, err := filter.List(test.patterns, test.path)
if err != nil {
t.Errorf("test %d failed: expected no error for patterns %q, but error returned: %v",
i, test.patterns, err)
continue
}
if match != test.match {
t.Errorf("test %d: filter.MatchList(%q, %q): expected %v, got %v",
i, test.patterns, test.path, test.match, match)
}
}
}
func ExampleList() {
match, _ := filter.List([]string{"*.c", "*.go"}, "/home/user/file.go")
fmt.Printf("match: %v\n", match)
// Output:
// match: true
}
func extractTestLines(t testing.TB) (lines []string) {
f, err := os.Open("testdata/libreoffice.txt.bz2")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := f.Close(); err != nil {
t.Fatal(err)
}
}()
sc := bufio.NewScanner(bzip2.NewReader(f))
for sc.Scan() {
lines = append(lines, sc.Text())
}
return lines
}
func TestFilterPatternsFile(t *testing.T) {
lines := extractTestLines(t)
var testPatterns = []struct {
pattern string
hits uint
}{
{"*.html", 18249},
{"sdk", 22186},
{"sdk/*/cpp/*/*vars.html", 3},
}
for _, test := range testPatterns {
var c uint
for _, line := range lines {
match, err := filter.Match(test.pattern, line)
if err != nil {
t.Error(err)
continue
}
if match {
c++
// fmt.Printf("pattern %q, line %q\n", test.pattern, line)
}
}
if c != test.hits {
t.Errorf("wrong number of hits for pattern %q: want %d, got %d",
test.pattern, test.hits, c)
}
}
}
func BenchmarkFilterLines(b *testing.B) {
pattern := "sdk/*/cpp/*/*vars.html"
lines := extractTestLines(b)
var c uint
b.ResetTimer()
for i := 0; i < b.N; i++ {
c = 0
for _, line := range lines {
match, err := filter.Match(pattern, line)
if err != nil {
b.Fatal(err)
}
if match {
c++
}
}
if c != 3 {
b.Fatalf("wrong number of matches: expected 3, got %d", c)
}
}
}
func BenchmarkFilterPatterns(b *testing.B) {
patterns := []string{
"sdk/*",
"*.html",
}
lines := extractTestLines(b)
var c uint
b.ResetTimer()
for i := 0; i < b.N; i++ {
c = 0
for _, line := range lines {
match, err := filter.List(patterns, line)
if err != nil {
b.Fatal(err)
}
if match {
c++
}
}
if c != 22185 {
b.Fatalf("wrong number of matches: expected 22185, got %d", c)
}
}
}

Some files were not shown because too many files have changed in this diff Show More