mirror of
https://github.com/restic/restic.git
synced 2025-08-23 15:18:07 +00:00
Moves files
This commit is contained in:
28
internal/repository/doc.go
Normal file
28
internal/repository/doc.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Package repository implements a restic repository on top of a backend. In
|
||||
// the following the abstractions used for this package are listed. More
|
||||
// information can be found in the restic design document.
|
||||
//
|
||||
// File
|
||||
//
|
||||
// A file is a named handle for some data saved in the backend. For the local
|
||||
// backend, this corresponds to actual files saved to disk. Usually, the SHA256
|
||||
// hash of the content is used for a file's name (hexadecimal, in lower-case
|
||||
// ASCII characters). An exception is the file `config`. Most files are
|
||||
// encrypted before being saved in a backend. This means that the name is the
|
||||
// hash of the ciphertext.
|
||||
//
|
||||
// Blob
|
||||
//
|
||||
// A blob is a number of bytes that has a type (data or tree). Blobs are
|
||||
// identified by an ID, which is the SHA256 hash of the blobs' contents. One or
|
||||
// more blobs are bundled together in a Pack and then saved to the backend.
|
||||
// Blobs are always encrypted before being bundled in a Pack.
|
||||
//
|
||||
// Pack
|
||||
//
|
||||
// A Pack is a File in the backend that contains one or more (encrypted) blobs,
|
||||
// followed by a header at the end of the Pack. The header is encrypted and
|
||||
// contains the ID, type, length and offset for each blob contained in the
|
||||
// Pack.
|
||||
//
|
||||
package repository
|
540
internal/repository/index.go
Normal file
540
internal/repository/index.go
Normal file
@@ -0,0 +1,540 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"restic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
// Index holds a lookup table for id -> pack.
|
||||
type Index struct {
|
||||
m sync.Mutex
|
||||
pack map[restic.BlobHandle][]indexEntry
|
||||
|
||||
final bool // set to true for all indexes read from the backend ("finalized")
|
||||
id restic.ID // set to the ID of the index when it's finalized
|
||||
supersedes restic.IDs
|
||||
created time.Time
|
||||
}
|
||||
|
||||
type indexEntry struct {
|
||||
packID restic.ID
|
||||
offset uint
|
||||
length uint
|
||||
}
|
||||
|
||||
// NewIndex returns a new index.
|
||||
func NewIndex() *Index {
|
||||
return &Index{
|
||||
pack: make(map[restic.BlobHandle][]indexEntry),
|
||||
created: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (idx *Index) store(blob restic.PackedBlob) {
|
||||
newEntry := indexEntry{
|
||||
packID: blob.PackID,
|
||||
offset: blob.Offset,
|
||||
length: blob.Length,
|
||||
}
|
||||
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||
idx.pack[h] = append(idx.pack[h], newEntry)
|
||||
}
|
||||
|
||||
// Final returns true iff the index is already written to the repository, it is
|
||||
// finalized.
|
||||
func (idx *Index) Final() bool {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
return idx.final
|
||||
}
|
||||
|
||||
const (
|
||||
indexMinBlobs = 20
|
||||
indexMaxBlobs = 2000
|
||||
indexMinAge = 2 * time.Minute
|
||||
indexMaxAge = 15 * time.Minute
|
||||
)
|
||||
|
||||
// IndexFull returns true iff the index is "full enough" to be saved as a preliminary index.
|
||||
var IndexFull = func(idx *Index) bool {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
debug.Log("checking whether index %p is full", idx)
|
||||
|
||||
packs := len(idx.pack)
|
||||
age := time.Now().Sub(idx.created)
|
||||
|
||||
if age > indexMaxAge {
|
||||
debug.Log("index %p is old enough", idx, age)
|
||||
return true
|
||||
}
|
||||
|
||||
if packs < indexMinBlobs || age < indexMinAge {
|
||||
debug.Log("index %p only has %d packs or is too young (%v)", idx, packs, age)
|
||||
return false
|
||||
}
|
||||
|
||||
if packs > indexMaxBlobs {
|
||||
debug.Log("index %p has %d packs", idx, packs)
|
||||
return true
|
||||
}
|
||||
|
||||
debug.Log("index %p is not full", idx)
|
||||
return false
|
||||
}
|
||||
|
||||
// Store remembers the id and pack in the index. An existing entry will be
|
||||
// silently overwritten.
|
||||
func (idx *Index) Store(blob restic.PackedBlob) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
if idx.final {
|
||||
panic("store new item in finalized index")
|
||||
}
|
||||
|
||||
debug.Log("%v", blob)
|
||||
|
||||
idx.store(blob)
|
||||
}
|
||||
|
||||
// Lookup queries the index for the blob ID and returns a restic.PackedBlob.
|
||||
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
h := restic.BlobHandle{ID: id, Type: tpe}
|
||||
|
||||
if packs, ok := idx.pack[h]; ok {
|
||||
blobs = make([]restic.PackedBlob, 0, len(packs))
|
||||
|
||||
for _, p := range packs {
|
||||
debug.Log("id %v found in pack %v at %d, length %d",
|
||||
id.Str(), p.packID.Str(), p.offset, p.length)
|
||||
|
||||
blob := restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: tpe,
|
||||
Length: p.length,
|
||||
ID: id,
|
||||
Offset: p.offset,
|
||||
},
|
||||
PackID: p.packID,
|
||||
}
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
debug.Log("id %v not found", id.Str())
|
||||
return nil, errors.Errorf("id %v not found in index", id)
|
||||
}
|
||||
|
||||
// ListPack returns a list of blobs contained in a pack.
|
||||
func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
for h, packList := range idx.pack {
|
||||
for _, entry := range packList {
|
||||
if entry.packID == id {
|
||||
list = append(list, restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
ID: h.ID,
|
||||
Type: h.Type,
|
||||
Length: entry.length,
|
||||
Offset: entry.offset,
|
||||
},
|
||||
PackID: entry.packID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
// Has returns true iff the id is listed in the index.
|
||||
func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool {
|
||||
_, err := idx.Lookup(id, tpe)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// LookupSize returns the length of the plaintext content of the blob with the
|
||||
// given id.
|
||||
func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (plaintextLength uint, err error) {
|
||||
blobs, err := idx.Lookup(id, tpe)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return uint(restic.PlaintextLength(int(blobs[0].Length))), nil
|
||||
}
|
||||
|
||||
// Supersedes returns the list of indexes this index supersedes, if any.
|
||||
func (idx *Index) Supersedes() restic.IDs {
|
||||
return idx.supersedes
|
||||
}
|
||||
|
||||
// AddToSupersedes adds the ids to the list of indexes superseded by this
|
||||
// index. If the index has already been finalized, an error is returned.
|
||||
func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
if idx.final {
|
||||
return errors.New("index already finalized")
|
||||
}
|
||||
|
||||
idx.supersedes = append(idx.supersedes, ids...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. When the
|
||||
// context is cancelled, the background goroutine terminates. This blocks any
|
||||
// modification of the index.
|
||||
func (idx *Index) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||
idx.m.Lock()
|
||||
|
||||
ch := make(chan restic.PackedBlob)
|
||||
|
||||
go func() {
|
||||
defer idx.m.Unlock()
|
||||
defer func() {
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
for h, packs := range idx.pack {
|
||||
for _, blob := range packs {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
ID: h.ID,
|
||||
Type: h.Type,
|
||||
Offset: blob.offset,
|
||||
Length: blob.length,
|
||||
},
|
||||
PackID: blob.packID,
|
||||
}:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Packs returns all packs in this index
|
||||
func (idx *Index) Packs() restic.IDSet {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
packs := restic.NewIDSet()
|
||||
for _, list := range idx.pack {
|
||||
for _, entry := range list {
|
||||
packs.Insert(entry.packID)
|
||||
}
|
||||
}
|
||||
|
||||
return packs
|
||||
}
|
||||
|
||||
// Count returns the number of blobs of type t in the index.
|
||||
func (idx *Index) Count(t restic.BlobType) (n uint) {
|
||||
debug.Log("counting blobs of type %v", t)
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
for h, list := range idx.pack {
|
||||
if h.Type != t {
|
||||
continue
|
||||
}
|
||||
|
||||
n += uint(len(list))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type packJSON struct {
|
||||
ID restic.ID `json:"id"`
|
||||
Blobs []blobJSON `json:"blobs"`
|
||||
}
|
||||
|
||||
type blobJSON struct {
|
||||
ID restic.ID `json:"id"`
|
||||
Type restic.BlobType `json:"type"`
|
||||
Offset uint `json:"offset"`
|
||||
Length uint `json:"length"`
|
||||
}
|
||||
|
||||
// generatePackList returns a list of packs.
|
||||
func (idx *Index) generatePackList() ([]*packJSON, error) {
|
||||
list := []*packJSON{}
|
||||
packs := make(map[restic.ID]*packJSON)
|
||||
|
||||
for h, packedBlobs := range idx.pack {
|
||||
for _, blob := range packedBlobs {
|
||||
if blob.packID.IsNull() {
|
||||
panic("null pack id")
|
||||
}
|
||||
|
||||
debug.Log("handle blob %v", h)
|
||||
|
||||
if blob.packID.IsNull() {
|
||||
debug.Log("blob %v has no packID! (offset %v, length %v)",
|
||||
h, blob.offset, blob.length)
|
||||
return nil, errors.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", h)
|
||||
}
|
||||
|
||||
// see if pack is already in map
|
||||
p, ok := packs[blob.packID]
|
||||
if !ok {
|
||||
// else create new pack
|
||||
p = &packJSON{ID: blob.packID}
|
||||
|
||||
// and append it to the list and map
|
||||
list = append(list, p)
|
||||
packs[p.ID] = p
|
||||
}
|
||||
|
||||
// add blob
|
||||
p.Blobs = append(p.Blobs, blobJSON{
|
||||
ID: h.ID,
|
||||
Type: h.Type,
|
||||
Offset: blob.offset,
|
||||
Length: blob.length,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("done")
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
type jsonIndex struct {
|
||||
Supersedes restic.IDs `json:"supersedes,omitempty"`
|
||||
Packs []*packJSON `json:"packs"`
|
||||
}
|
||||
|
||||
// Encode writes the JSON serialization of the index to the writer w.
|
||||
func (idx *Index) Encode(w io.Writer) error {
|
||||
debug.Log("encoding index")
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
return idx.encode(w)
|
||||
}
|
||||
|
||||
// encode writes the JSON serialization of the index to the writer w.
|
||||
func (idx *Index) encode(w io.Writer) error {
|
||||
debug.Log("encoding index")
|
||||
|
||||
list, err := idx.generatePackList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
idxJSON := jsonIndex{
|
||||
Supersedes: idx.supersedes,
|
||||
Packs: list,
|
||||
}
|
||||
return enc.Encode(idxJSON)
|
||||
}
|
||||
|
||||
// Finalize sets the index to final and writes the JSON serialization to w.
|
||||
func (idx *Index) Finalize(w io.Writer) error {
|
||||
debug.Log("encoding index")
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
idx.final = true
|
||||
|
||||
return idx.encode(w)
|
||||
}
|
||||
|
||||
// ID returns the ID of the index, if available. If the index is not yet
|
||||
// finalized, an error is returned.
|
||||
func (idx *Index) ID() (restic.ID, error) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
if !idx.final {
|
||||
return restic.ID{}, errors.New("index not finalized")
|
||||
}
|
||||
|
||||
return idx.id, nil
|
||||
}
|
||||
|
||||
// SetID sets the ID the index has been written to. This requires that
|
||||
// Finalize() has been called before, otherwise an error is returned.
|
||||
func (idx *Index) SetID(id restic.ID) error {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
if !idx.final {
|
||||
return errors.New("index is not final")
|
||||
}
|
||||
|
||||
if !idx.id.IsNull() {
|
||||
return errors.New("ID already set")
|
||||
}
|
||||
|
||||
debug.Log("ID set to %v", id.Str())
|
||||
idx.id = id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dump writes the pretty-printed JSON representation of the index to w.
|
||||
func (idx *Index) Dump(w io.Writer) error {
|
||||
debug.Log("dumping index")
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
list, err := idx.generatePackList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outer := jsonIndex{
|
||||
Supersedes: idx.Supersedes(),
|
||||
Packs: list,
|
||||
}
|
||||
|
||||
buf, err := json.MarshalIndent(outer, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(append(buf, '\n'))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
debug.Log("done")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isErrOldIndex returns true if the error may be caused by an old index
|
||||
// format.
|
||||
func isErrOldIndex(err error) bool {
|
||||
if e, ok := err.(*json.UnmarshalTypeError); ok && e.Value == "array" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ErrOldIndexFormat means an index with the old format was detected.
|
||||
var ErrOldIndexFormat = errors.New("index has old format")
|
||||
|
||||
// DecodeIndex loads and unserializes an index from rd.
|
||||
func DecodeIndex(buf []byte) (idx *Index, err error) {
|
||||
debug.Log("Start decoding index")
|
||||
idxJSON := &jsonIndex{}
|
||||
|
||||
err = json.Unmarshal(buf, idxJSON)
|
||||
if err != nil {
|
||||
debug.Log("Error %v", err)
|
||||
|
||||
if isErrOldIndex(err) {
|
||||
debug.Log("index is probably old format, trying that")
|
||||
err = ErrOldIndexFormat
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "Decode")
|
||||
}
|
||||
|
||||
idx = NewIndex()
|
||||
for _, pack := range idxJSON.Packs {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID,
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
},
|
||||
PackID: pack.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
idx.supersedes = idxJSON.Supersedes
|
||||
idx.final = true
|
||||
|
||||
debug.Log("done")
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// DecodeOldIndex loads and unserializes an index in the old format from rd.
|
||||
func DecodeOldIndex(buf []byte) (idx *Index, err error) {
|
||||
debug.Log("Start decoding old index")
|
||||
list := []*packJSON{}
|
||||
|
||||
err = json.Unmarshal(buf, &list)
|
||||
if err != nil {
|
||||
debug.Log("Error %#v", err)
|
||||
return nil, errors.Wrap(err, "Decode")
|
||||
}
|
||||
|
||||
idx = NewIndex()
|
||||
for _, pack := range list {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID,
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
},
|
||||
PackID: pack.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
idx.final = true
|
||||
|
||||
debug.Log("done")
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
||||
func LoadIndexWithDecoder(ctx context.Context, repo restic.Repository, id restic.ID, fn func([]byte) (*Index, error)) (idx *Index, err error) {
|
||||
debug.Log("Loading index %v", id.Str())
|
||||
|
||||
buf, err := repo.LoadAndDecrypt(ctx, restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idx, err = fn(buf)
|
||||
if err != nil {
|
||||
debug.Log("error while decoding index %v: %v", id, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idx.id = id
|
||||
|
||||
return idx, nil
|
||||
}
|
381
internal/repository/index_test.go
Normal file
381
internal/repository/index_test.go
Normal file
@@ -0,0 +1,381 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"restic"
|
||||
"testing"
|
||||
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func TestIndexSerialize(t *testing.T) {
|
||||
type testEntry struct {
|
||||
id restic.ID
|
||||
pack restic.ID
|
||||
tpe restic.BlobType
|
||||
offset, length uint
|
||||
}
|
||||
tests := []testEntry{}
|
||||
|
||||
idx := repository.NewIndex()
|
||||
|
||||
// create 50 packs with 20 blobs each
|
||||
for i := 0; i < 50; i++ {
|
||||
packID := restic.NewRandomID()
|
||||
|
||||
pos := uint(0)
|
||||
for j := 0; j < 20; j++ {
|
||||
id := restic.NewRandomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: restic.DataBlob,
|
||||
ID: id,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
},
|
||||
PackID: packID,
|
||||
})
|
||||
|
||||
tests = append(tests, testEntry{
|
||||
id: id,
|
||||
pack: packID,
|
||||
tpe: restic.DataBlob,
|
||||
offset: pos,
|
||||
length: length,
|
||||
})
|
||||
|
||||
pos += length
|
||||
}
|
||||
}
|
||||
|
||||
wr := bytes.NewBuffer(nil)
|
||||
err := idx.Encode(wr)
|
||||
OK(t, err)
|
||||
|
||||
idx2, err := repository.DecodeIndex(wr.Bytes())
|
||||
OK(t, err)
|
||||
Assert(t, idx2 != nil,
|
||||
"nil returned for decoded index")
|
||||
|
||||
wr2 := bytes.NewBuffer(nil)
|
||||
err = idx2.Encode(wr2)
|
||||
OK(t, err)
|
||||
|
||||
for _, testBlob := range tests {
|
||||
list, err := idx.Lookup(testBlob.id, testBlob.tpe)
|
||||
OK(t, err)
|
||||
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
|
||||
}
|
||||
result := list[0]
|
||||
|
||||
Equals(t, testBlob.pack, result.PackID)
|
||||
Equals(t, testBlob.tpe, result.Type)
|
||||
Equals(t, testBlob.offset, result.Offset)
|
||||
Equals(t, testBlob.length, result.Length)
|
||||
|
||||
list2, err := idx2.Lookup(testBlob.id, testBlob.tpe)
|
||||
OK(t, err)
|
||||
|
||||
if len(list2) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2)
|
||||
}
|
||||
result2 := list2[0]
|
||||
|
||||
Equals(t, testBlob.pack, result2.PackID)
|
||||
Equals(t, testBlob.tpe, result2.Type)
|
||||
Equals(t, testBlob.offset, result2.Offset)
|
||||
Equals(t, testBlob.length, result2.Length)
|
||||
}
|
||||
|
||||
// add more blobs to idx
|
||||
newtests := []testEntry{}
|
||||
for i := 0; i < 10; i++ {
|
||||
packID := restic.NewRandomID()
|
||||
|
||||
pos := uint(0)
|
||||
for j := 0; j < 10; j++ {
|
||||
id := restic.NewRandomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: restic.DataBlob,
|
||||
ID: id,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
},
|
||||
PackID: packID,
|
||||
})
|
||||
|
||||
newtests = append(newtests, testEntry{
|
||||
id: id,
|
||||
pack: packID,
|
||||
tpe: restic.DataBlob,
|
||||
offset: pos,
|
||||
length: length,
|
||||
})
|
||||
|
||||
pos += length
|
||||
}
|
||||
}
|
||||
|
||||
// serialize idx, unserialize to idx3
|
||||
wr3 := bytes.NewBuffer(nil)
|
||||
err = idx.Finalize(wr3)
|
||||
OK(t, err)
|
||||
|
||||
Assert(t, idx.Final(),
|
||||
"index not final after encoding")
|
||||
|
||||
id := restic.NewRandomID()
|
||||
OK(t, idx.SetID(id))
|
||||
id2, err := idx.ID()
|
||||
Assert(t, id2.Equal(id),
|
||||
"wrong ID returned: want %v, got %v", id, id2)
|
||||
|
||||
idx3, err := repository.DecodeIndex(wr3.Bytes())
|
||||
OK(t, err)
|
||||
Assert(t, idx3 != nil,
|
||||
"nil returned for decoded index")
|
||||
Assert(t, idx3.Final(),
|
||||
"decoded index is not final")
|
||||
|
||||
// all new blobs must be in the index
|
||||
for _, testBlob := range newtests {
|
||||
list, err := idx3.Lookup(testBlob.id, testBlob.tpe)
|
||||
OK(t, err)
|
||||
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
|
||||
}
|
||||
|
||||
blob := list[0]
|
||||
|
||||
Equals(t, testBlob.pack, blob.PackID)
|
||||
Equals(t, testBlob.tpe, blob.Type)
|
||||
Equals(t, testBlob.offset, blob.Offset)
|
||||
Equals(t, testBlob.length, blob.Length)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexSize(t *testing.T) {
|
||||
idx := repository.NewIndex()
|
||||
|
||||
packs := 200
|
||||
blobs := 100
|
||||
for i := 0; i < packs; i++ {
|
||||
packID := restic.NewRandomID()
|
||||
|
||||
pos := uint(0)
|
||||
for j := 0; j < blobs; j++ {
|
||||
id := restic.NewRandomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: restic.DataBlob,
|
||||
ID: id,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
},
|
||||
PackID: packID,
|
||||
})
|
||||
|
||||
pos += length
|
||||
}
|
||||
}
|
||||
|
||||
wr := bytes.NewBuffer(nil)
|
||||
|
||||
err := idx.Encode(wr)
|
||||
OK(t, err)
|
||||
|
||||
t.Logf("Index file size for %d blobs in %d packs is %d", blobs*packs, packs, wr.Len())
|
||||
}
|
||||
|
||||
// example index serialization from doc/Design.rst
|
||||
var docExample = []byte(`
|
||||
{
|
||||
"supersedes": [
|
||||
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
|
||||
],
|
||||
"packs": [
|
||||
{
|
||||
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
|
||||
"blobs": [
|
||||
{
|
||||
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
|
||||
"type": "data",
|
||||
"offset": 0,
|
||||
"length": 25
|
||||
},{
|
||||
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
|
||||
"type": "tree",
|
||||
"offset": 38,
|
||||
"length": 100
|
||||
},
|
||||
{
|
||||
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
|
||||
"type": "data",
|
||||
"offset": 150,
|
||||
"length": 123
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
var docOldExample = []byte(`
|
||||
[ {
|
||||
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
|
||||
"blobs": [
|
||||
{
|
||||
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
|
||||
"type": "data",
|
||||
"offset": 0,
|
||||
"length": 25
|
||||
},{
|
||||
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
|
||||
"type": "tree",
|
||||
"offset": 38,
|
||||
"length": 100
|
||||
},
|
||||
{
|
||||
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
|
||||
"type": "data",
|
||||
"offset": 150,
|
||||
"length": 123
|
||||
}
|
||||
]
|
||||
} ]
|
||||
`)
|
||||
|
||||
var exampleTests = []struct {
|
||||
id, packID restic.ID
|
||||
tpe restic.BlobType
|
||||
offset, length uint
|
||||
}{
|
||||
{
|
||||
restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"),
|
||||
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||
restic.DataBlob, 0, 25,
|
||||
}, {
|
||||
restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"),
|
||||
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||
restic.TreeBlob, 38, 100,
|
||||
}, {
|
||||
restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"),
|
||||
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||
restic.DataBlob, 150, 123,
|
||||
},
|
||||
}
|
||||
|
||||
var exampleLookupTest = struct {
|
||||
packID restic.ID
|
||||
blobs map[restic.ID]restic.BlobType
|
||||
}{
|
||||
restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||
map[restic.ID]restic.BlobType{
|
||||
restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob,
|
||||
restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob,
|
||||
restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob,
|
||||
},
|
||||
}
|
||||
|
||||
func TestIndexUnserialize(t *testing.T) {
|
||||
oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
|
||||
|
||||
idx, err := repository.DecodeIndex(docExample)
|
||||
OK(t, err)
|
||||
|
||||
for _, test := range exampleTests {
|
||||
list, err := idx.Lookup(test.id, test.tpe)
|
||||
OK(t, err)
|
||||
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
|
||||
}
|
||||
blob := list[0]
|
||||
|
||||
t.Logf("looking for blob %v/%v, got %v", test.tpe, test.id.Str(), blob)
|
||||
|
||||
Equals(t, test.packID, blob.PackID)
|
||||
Equals(t, test.tpe, blob.Type)
|
||||
Equals(t, test.offset, blob.Offset)
|
||||
Equals(t, test.length, blob.Length)
|
||||
}
|
||||
|
||||
Equals(t, oldIdx, idx.Supersedes())
|
||||
|
||||
blobs := idx.ListPack(exampleLookupTest.packID)
|
||||
if len(blobs) != len(exampleLookupTest.blobs) {
|
||||
t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs))
|
||||
}
|
||||
|
||||
for _, blob := range blobs {
|
||||
b, ok := exampleLookupTest.blobs[blob.ID]
|
||||
if !ok {
|
||||
t.Errorf("unexpected blob %v found", blob.ID.Str())
|
||||
}
|
||||
if blob.Type != b {
|
||||
t.Errorf("unexpected type for blob %v: want %v, got %v", blob.ID.Str(), b, blob.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecodeIndex(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := repository.DecodeIndex(docExample)
|
||||
OK(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexUnserializeOld(t *testing.T) {
|
||||
idx, err := repository.DecodeOldIndex(docOldExample)
|
||||
OK(t, err)
|
||||
|
||||
for _, test := range exampleTests {
|
||||
list, err := idx.Lookup(test.id, test.tpe)
|
||||
OK(t, err)
|
||||
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
|
||||
}
|
||||
blob := list[0]
|
||||
|
||||
Equals(t, test.packID, blob.PackID)
|
||||
Equals(t, test.tpe, blob.Type)
|
||||
Equals(t, test.offset, blob.Offset)
|
||||
Equals(t, test.length, blob.Length)
|
||||
}
|
||||
|
||||
Equals(t, 0, len(idx.Supersedes()))
|
||||
}
|
||||
|
||||
func TestIndexPacks(t *testing.T) {
|
||||
idx := repository.NewIndex()
|
||||
packs := restic.NewIDSet()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
packID := restic.NewRandomID()
|
||||
idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: restic.DataBlob,
|
||||
ID: restic.NewRandomID(),
|
||||
Offset: 0,
|
||||
Length: 23,
|
||||
},
|
||||
PackID: packID,
|
||||
})
|
||||
|
||||
packs.Insert(packID)
|
||||
}
|
||||
|
||||
idxPacks := idx.Packs()
|
||||
Assert(t, packs.Equals(idxPacks), "packs in index do not match packs added to index")
|
||||
}
|
260
internal/repository/key.go
Normal file
260
internal/repository/key.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"restic"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/crypto"
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoKeyFound is returned when no key for the repository could be decrypted.
|
||||
ErrNoKeyFound = errors.New("wrong password or no key found")
|
||||
|
||||
// ErrMaxKeysReached is returned when the maximum number of keys was checked and no key could be found.
|
||||
ErrMaxKeysReached = errors.New("maximum number of keys reached")
|
||||
)
|
||||
|
||||
// Key represents an encrypted master key for a repository.
|
||||
type Key struct {
|
||||
Created time.Time `json:"created"`
|
||||
Username string `json:"username"`
|
||||
Hostname string `json:"hostname"`
|
||||
|
||||
KDF string `json:"kdf"`
|
||||
N int `json:"N"`
|
||||
R int `json:"r"`
|
||||
P int `json:"p"`
|
||||
Salt []byte `json:"salt"`
|
||||
Data []byte `json:"data"`
|
||||
|
||||
user *crypto.Key
|
||||
master *crypto.Key
|
||||
|
||||
name string
|
||||
}
|
||||
|
||||
// KDFParams tracks the parameters used for the KDF. If not set, it will be
|
||||
// calibrated on the first run of AddKey().
|
||||
var KDFParams *crypto.KDFParams
|
||||
|
||||
var (
|
||||
// KDFTimeout specifies the maximum runtime for the KDF.
|
||||
KDFTimeout = 500 * time.Millisecond
|
||||
|
||||
// KDFMemory limits the memory the KDF is allowed to use.
|
||||
KDFMemory = 60
|
||||
)
|
||||
|
||||
// createMasterKey creates a new master key in the given backend and encrypts
|
||||
// it with the password.
|
||||
func createMasterKey(s *Repository, password string) (*Key, error) {
|
||||
return AddKey(context.TODO(), s, password, nil)
|
||||
}
|
||||
|
||||
// OpenKey tries do decrypt the key specified by name with the given password.
|
||||
func OpenKey(ctx context.Context, s *Repository, name string, password string) (*Key, error) {
|
||||
k, err := LoadKey(ctx, s, name)
|
||||
if err != nil {
|
||||
debug.Log("LoadKey(%v) returned error %v", name[:12], err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check KDF
|
||||
if k.KDF != "scrypt" {
|
||||
return nil, errors.New("only supported KDF is scrypt()")
|
||||
}
|
||||
|
||||
// derive user key
|
||||
params := crypto.KDFParams{
|
||||
N: k.N,
|
||||
R: k.R,
|
||||
P: k.P,
|
||||
}
|
||||
k.user, err = crypto.KDF(params, k.Salt, password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "crypto.KDF")
|
||||
}
|
||||
|
||||
// decrypt master keys
|
||||
buf := make([]byte, len(k.Data))
|
||||
n, err := k.user.Decrypt(buf, k.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// restore json
|
||||
k.master = &crypto.Key{}
|
||||
err = json.Unmarshal(buf, k.master)
|
||||
if err != nil {
|
||||
debug.Log("Unmarshal() returned error %v", err)
|
||||
return nil, errors.Wrap(err, "Unmarshal")
|
||||
}
|
||||
k.name = name
|
||||
|
||||
if !k.Valid() {
|
||||
return nil, errors.New("Invalid key for repository")
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// SearchKey tries to decrypt at most maxKeys keys in the backend with the
|
||||
// given password. If none could be found, ErrNoKeyFound is returned. When
|
||||
// maxKeys is reached, ErrMaxKeysReached is returned. When setting maxKeys to
|
||||
// zero, all keys in the repo are checked.
|
||||
func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int) (*Key, error) {
|
||||
checked := 0
|
||||
|
||||
// try at most maxKeysForSearch keys in repo
|
||||
for name := range s.Backend().List(ctx, restic.KeyFile) {
|
||||
if maxKeys > 0 && checked > maxKeys {
|
||||
return nil, ErrMaxKeysReached
|
||||
}
|
||||
|
||||
debug.Log("trying key %v", name[:12])
|
||||
key, err := OpenKey(ctx, s, name, password)
|
||||
if err != nil {
|
||||
debug.Log("key %v returned error %v", name[:12], err)
|
||||
|
||||
// ErrUnauthenticated means the password is wrong, try the next key
|
||||
if errors.Cause(err) == crypto.ErrUnauthenticated {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("successfully opened key %v", name[:12])
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, ErrNoKeyFound
|
||||
}
|
||||
|
||||
// LoadKey loads a key from the backend.
|
||||
func LoadKey(ctx context.Context, s *Repository, name string) (k *Key, err error) {
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: name}
|
||||
data, err := backend.LoadAll(ctx, s.be, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k = &Key{}
|
||||
err = json.Unmarshal(data, k)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Unmarshal")
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// AddKey adds a new key to an already existing repository.
|
||||
func AddKey(ctx context.Context, s *Repository, password string, template *crypto.Key) (*Key, error) {
|
||||
// make sure we have valid KDF parameters
|
||||
if KDFParams == nil {
|
||||
p, err := crypto.Calibrate(KDFTimeout, KDFMemory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Calibrate")
|
||||
}
|
||||
|
||||
KDFParams = &p
|
||||
debug.Log("calibrated KDF parameters are %v", p)
|
||||
}
|
||||
|
||||
// fill meta data about key
|
||||
newkey := &Key{
|
||||
Created: time.Now(),
|
||||
KDF: "scrypt",
|
||||
N: KDFParams.N,
|
||||
R: KDFParams.R,
|
||||
P: KDFParams.P,
|
||||
}
|
||||
|
||||
hn, err := os.Hostname()
|
||||
if err == nil {
|
||||
newkey.Hostname = hn
|
||||
}
|
||||
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
newkey.Username = usr.Username
|
||||
}
|
||||
|
||||
// generate random salt
|
||||
newkey.Salt, err = crypto.NewSalt()
|
||||
if err != nil {
|
||||
panic("unable to read enough random bytes for salt: " + err.Error())
|
||||
}
|
||||
|
||||
// call KDF to derive user key
|
||||
newkey.user, err = crypto.KDF(*KDFParams, newkey.Salt, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if template == nil {
|
||||
// generate new random master keys
|
||||
newkey.master = crypto.NewRandomKey()
|
||||
} else {
|
||||
// copy master keys from old key
|
||||
newkey.master = template
|
||||
}
|
||||
|
||||
// encrypt master keys (as json) with user key
|
||||
buf, err := json.Marshal(newkey.master)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Marshal")
|
||||
}
|
||||
|
||||
newkey.Data, err = newkey.user.Encrypt(nil, buf)
|
||||
|
||||
// dump as json
|
||||
buf, err = json.Marshal(newkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Marshal")
|
||||
}
|
||||
|
||||
// store in repository and return
|
||||
h := restic.Handle{
|
||||
Type: restic.KeyFile,
|
||||
Name: restic.Hash(buf).String(),
|
||||
}
|
||||
|
||||
err = s.be.Save(ctx, h, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newkey.name = h.Name
|
||||
|
||||
return newkey, nil
|
||||
}
|
||||
|
||||
func (k *Key) String() string {
|
||||
if k == nil {
|
||||
return "<Key nil>"
|
||||
}
|
||||
return fmt.Sprintf("<Key of %s@%s, created on %s>", k.Username, k.Hostname, k.Created)
|
||||
}
|
||||
|
||||
// Name returns an identifier for the key.
|
||||
func (k Key) Name() string {
|
||||
return k.name
|
||||
}
|
||||
|
||||
// Valid tests whether the mac and encryption keys are valid (i.e. not zero)
|
||||
func (k *Key) Valid() bool {
|
||||
return k.user.Valid() && k.master.Valid()
|
||||
}
|
266
internal/repository/master_index.go
Normal file
266
internal/repository/master_index.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved.
|
||||
type MasterIndex struct {
|
||||
idx []*Index
|
||||
idxMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMasterIndex creates a new master index.
|
||||
func NewMasterIndex() *MasterIndex {
|
||||
return &MasterIndex{}
|
||||
}
|
||||
|
||||
// Lookup queries all known Indexes for the ID and returns the first match.
|
||||
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
debug.Log("looking up id %v, tpe %v", id.Str(), tpe)
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
blobs, err = idx.Lookup(id, tpe)
|
||||
if err == nil {
|
||||
debug.Log("found id %v: %v", id.Str(), blobs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("id %v not found in any index", id.Str())
|
||||
return nil, errors.Errorf("id %v not found in any index", id)
|
||||
}
|
||||
|
||||
// LookupSize queries all known Indexes for the ID and returns the first match.
|
||||
func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, error) {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
if idx.Has(id, tpe) {
|
||||
return idx.LookupSize(id, tpe)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.Errorf("id %v not found in any index", id)
|
||||
}
|
||||
|
||||
// ListPack returns the list of blobs in a pack. The first matching index is
|
||||
// returned, or nil if no index contains information about the pack id.
|
||||
func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
list := idx.ListPack(id)
|
||||
if len(list) > 0 {
|
||||
return list
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has queries all known Indexes for the ID and returns the first match.
|
||||
func (mi *MasterIndex) Has(id restic.ID, tpe restic.BlobType) bool {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
if idx.Has(id, tpe) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Count returns the number of blobs of type t in the index.
|
||||
func (mi *MasterIndex) Count(t restic.BlobType) (n uint) {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
var sum uint
|
||||
for _, idx := range mi.idx {
|
||||
sum += idx.Count(t)
|
||||
}
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
// Insert adds a new index to the MasterIndex.
|
||||
func (mi *MasterIndex) Insert(idx *Index) {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
mi.idx = append(mi.idx, idx)
|
||||
}
|
||||
|
||||
// Remove deletes an index from the MasterIndex.
|
||||
func (mi *MasterIndex) Remove(index *Index) {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
for i, idx := range mi.idx {
|
||||
if idx == index {
|
||||
mi.idx = append(mi.idx[:i], mi.idx[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store remembers the id and pack in the index.
|
||||
func (mi *MasterIndex) Store(pb restic.PackedBlob) {
|
||||
mi.idxMutex.RLock()
|
||||
for _, idx := range mi.idx {
|
||||
if !idx.Final() {
|
||||
mi.idxMutex.RUnlock()
|
||||
idx.Store(pb)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mi.idxMutex.RUnlock()
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
newIdx := NewIndex()
|
||||
newIdx.Store(pb)
|
||||
mi.idx = append(mi.idx, newIdx)
|
||||
}
|
||||
|
||||
// NotFinalIndexes returns all indexes that have not yet been saved.
|
||||
func (mi *MasterIndex) NotFinalIndexes() []*Index {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
var list []*Index
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
if !idx.Final() {
|
||||
list = append(list, idx)
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("return %d indexes", len(list))
|
||||
return list
|
||||
}
|
||||
|
||||
// FullIndexes returns all indexes that are full.
|
||||
func (mi *MasterIndex) FullIndexes() []*Index {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
var list []*Index
|
||||
|
||||
debug.Log("checking %d indexes", len(mi.idx))
|
||||
for _, idx := range mi.idx {
|
||||
if idx.Final() {
|
||||
debug.Log("index %p is final", idx)
|
||||
continue
|
||||
}
|
||||
|
||||
if IndexFull(idx) {
|
||||
debug.Log("index %p is full", idx)
|
||||
list = append(list, idx)
|
||||
} else {
|
||||
debug.Log("index %p not full", idx)
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("return %d indexes", len(list))
|
||||
return list
|
||||
}
|
||||
|
||||
// All returns all indexes.
|
||||
func (mi *MasterIndex) All() []*Index {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
return mi.idx
|
||||
}
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. When the
|
||||
// context is cancelled, the background goroutine terminates. This blocks any
|
||||
// modification of the index.
|
||||
func (mi *MasterIndex) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||
mi.idxMutex.RLock()
|
||||
|
||||
ch := make(chan restic.PackedBlob)
|
||||
|
||||
go func() {
|
||||
defer mi.idxMutex.RUnlock()
|
||||
defer func() {
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
idxCh := idx.Each(ctx)
|
||||
for pb := range idxCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- pb:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// RebuildIndex combines all known indexes to a new index, leaving out any
|
||||
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
||||
// of all known indexes in the "supersedes" field.
|
||||
func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
||||
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
|
||||
|
||||
newIndex := NewIndex()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
for i, idx := range mi.idx {
|
||||
debug.Log("adding index %d", i)
|
||||
|
||||
for pb := range idx.Each(ctx) {
|
||||
if packBlacklist.Has(pb.PackID) {
|
||||
continue
|
||||
}
|
||||
|
||||
newIndex.Store(pb)
|
||||
}
|
||||
|
||||
if !idx.Final() {
|
||||
debug.Log("index %d isn't final, don't add to supersedes field", i)
|
||||
continue
|
||||
}
|
||||
|
||||
id, err := idx.ID()
|
||||
if err != nil {
|
||||
debug.Log("index %d does not have an ID: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("adding index id %v to supersedes field", id.Str())
|
||||
|
||||
err = newIndex.AddToSupersedes(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newIndex, nil
|
||||
}
|
163
internal/repository/packer_manager.go
Normal file
163
internal/repository/packer_manager.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"os"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
"restic/errors"
|
||||
"restic/hashing"
|
||||
|
||||
"restic/crypto"
|
||||
"restic/debug"
|
||||
"restic/fs"
|
||||
"restic/pack"
|
||||
)
|
||||
|
||||
// Saver implements saving data in a backend.
|
||||
type Saver interface {
|
||||
Save(context.Context, restic.Handle, io.Reader) error
|
||||
}
|
||||
|
||||
// Packer holds a pack.Packer together with a hash writer.
|
||||
type Packer struct {
|
||||
*pack.Packer
|
||||
hw *hashing.Writer
|
||||
tmpfile *os.File
|
||||
}
|
||||
|
||||
// packerManager keeps a list of open packs and creates new on demand.
|
||||
type packerManager struct {
|
||||
be Saver
|
||||
key *crypto.Key
|
||||
pm sync.Mutex
|
||||
packers []*Packer
|
||||
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
const minPackSize = 4 * 1024 * 1024
|
||||
const maxPackSize = 16 * 1024 * 1024
|
||||
const maxPackers = 200
|
||||
|
||||
// newPackerManager returns an new packer manager which writes temporary files
|
||||
// to a temporary directory
|
||||
func newPackerManager(be Saver, key *crypto.Key) *packerManager {
|
||||
return &packerManager{
|
||||
be: be,
|
||||
key: key,
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, (minPackSize+maxPackSize)/2)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// findPacker returns a packer for a new blob of size bytes. Either a new one is
|
||||
// created or one is returned that already has some blobs.
|
||||
func (r *packerManager) findPacker(size uint) (packer *Packer, err error) {
|
||||
r.pm.Lock()
|
||||
defer r.pm.Unlock()
|
||||
|
||||
// search for a suitable packer
|
||||
if len(r.packers) > 0 {
|
||||
debug.Log("searching packer for %d bytes\n", size)
|
||||
for i, p := range r.packers {
|
||||
if p.Packer.Size()+size < maxPackSize {
|
||||
debug.Log("found packer %v", p)
|
||||
// remove from list
|
||||
r.packers = append(r.packers[:i], r.packers[i+1:]...)
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no suitable packer found, return new
|
||||
debug.Log("create new pack for %d bytes", size)
|
||||
tmpfile, err := fs.TempFile("", "restic-temp-pack-")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fs.TempFile")
|
||||
}
|
||||
|
||||
hw := hashing.NewWriter(tmpfile, sha256.New())
|
||||
p := pack.NewPacker(r.key, hw)
|
||||
packer = &Packer{
|
||||
Packer: p,
|
||||
hw: hw,
|
||||
tmpfile: tmpfile,
|
||||
}
|
||||
|
||||
return packer, nil
|
||||
}
|
||||
|
||||
// insertPacker appends p to s.packs.
|
||||
func (r *packerManager) insertPacker(p *Packer) {
|
||||
r.pm.Lock()
|
||||
defer r.pm.Unlock()
|
||||
|
||||
r.packers = append(r.packers, p)
|
||||
debug.Log("%d packers\n", len(r.packers))
|
||||
}
|
||||
|
||||
// savePacker stores p in the backend.
|
||||
func (r *Repository) savePacker(p *Packer) error {
|
||||
debug.Log("save packer with %d blobs (%d bytes)\n", p.Packer.Count(), p.Packer.Size())
|
||||
_, err := p.Packer.Finalize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = p.tmpfile.Seek(0, 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
id := restic.IDFromHash(p.hw.Sum(nil))
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
|
||||
err = r.be.Save(context.TODO(), h, p.tmpfile)
|
||||
if err != nil {
|
||||
debug.Log("Save(%v) error: %v", h, err)
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("saved as %v", h)
|
||||
|
||||
err = p.tmpfile.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "close tempfile")
|
||||
}
|
||||
|
||||
err = fs.RemoveIfExists(p.tmpfile.Name())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
|
||||
// update blobs in the index
|
||||
for _, b := range p.Packer.Blobs() {
|
||||
debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str())
|
||||
r.idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: b.Type,
|
||||
ID: b.ID,
|
||||
Offset: b.Offset,
|
||||
Length: uint(b.Length),
|
||||
},
|
||||
PackID: id,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// countPacker returns the number of open (unfinished) packers.
|
||||
func (r *packerManager) countPacker() int {
|
||||
r.pm.Lock()
|
||||
defer r.pm.Unlock()
|
||||
|
||||
return len(r.packers)
|
||||
}
|
162
internal/repository/packer_manager_test.go
Normal file
162
internal/repository/packer_manager_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"restic"
|
||||
"restic/backend/mem"
|
||||
"restic/crypto"
|
||||
"restic/fs"
|
||||
"restic/mock"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type randReader struct {
|
||||
src rand.Source
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newRandReader(src rand.Source) *randReader {
|
||||
return &randReader{
|
||||
src: src,
|
||||
rand: rand.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Read generates len(p) random bytes and writes them into p. It
|
||||
// always returns len(p) and a nil error.
|
||||
func (r *randReader) Read(p []byte) (n int, err error) {
|
||||
for i := 0; i < len(p); i += 7 {
|
||||
val := r.src.Int63()
|
||||
for j := 0; i+j < len(p) && j < 7; j++ {
|
||||
p[i+j] = byte(val)
|
||||
val >>= 8
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func randomID(rd io.Reader) restic.ID {
|
||||
id := restic.ID{}
|
||||
_, err := io.ReadFull(rd, id[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
const maxBlobSize = 1 << 20
|
||||
|
||||
func saveFile(t testing.TB, be Saver, f *os.File, id restic.ID) {
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
t.Logf("save file %v", h)
|
||||
|
||||
if err := be.Save(context.TODO(), h, f); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := fs.RemoveIfExists(f.Name()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf []byte) (bytes int) {
|
||||
for i := 0; i < 100; i++ {
|
||||
l := rnd.rand.Intn(1 << 20)
|
||||
seed := rnd.rand.Int63()
|
||||
|
||||
packer, err := pm.findPacker(uint(l))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rd := newRandReader(rand.NewSource(seed))
|
||||
id := randomID(rd)
|
||||
buf = buf[:l]
|
||||
_, err = io.ReadFull(rd, buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := packer.Add(restic.DataBlob, id, buf)
|
||||
if n != l {
|
||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
||||
}
|
||||
bytes += l
|
||||
|
||||
if packer.Size() < minPackSize && pm.countPacker() < maxPackers {
|
||||
pm.insertPacker(packer)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = packer.Finalize()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = packer.tmpfile.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
||||
saveFile(t, be, packer.tmpfile, packID)
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager) (bytes int) {
|
||||
if pm.countPacker() > 0 {
|
||||
for _, packer := range pm.packers {
|
||||
n, err := packer.Finalize()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes += int(n)
|
||||
|
||||
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
||||
saveFile(t, be, packer.tmpfile, packID)
|
||||
}
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
func TestPackerManager(t *testing.T) {
|
||||
rnd := newRandReader(rand.NewSource(23))
|
||||
|
||||
be := mem.New()
|
||||
pm := newPackerManager(be, crypto.NewRandomKey())
|
||||
|
||||
blobBuf := make([]byte, maxBlobSize)
|
||||
|
||||
bytes := fillPacks(t, rnd, be, pm, blobBuf)
|
||||
bytes += flushRemainingPacks(t, rnd, be, pm)
|
||||
|
||||
t.Logf("saved %d bytes", bytes)
|
||||
}
|
||||
|
||||
func BenchmarkPackerManager(t *testing.B) {
|
||||
rnd := newRandReader(rand.NewSource(23))
|
||||
|
||||
be := &mock.Backend{
|
||||
SaveFn: func(context.Context, restic.Handle, io.Reader) error { return nil },
|
||||
}
|
||||
blobBuf := make([]byte, maxBlobSize)
|
||||
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
bytes := 0
|
||||
pm := newPackerManager(be, crypto.NewRandomKey())
|
||||
bytes += fillPacks(t, rnd, be, pm, blobBuf)
|
||||
bytes += flushRemainingPacks(t, rnd, be, pm)
|
||||
t.Logf("saved %d bytes", bytes)
|
||||
}
|
||||
}
|
76
internal/repository/parallel.go
Normal file
76
internal/repository/parallel.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
// ParallelWorkFunc gets one file ID to work on. If an error is returned,
|
||||
// processing stops. When the contect is cancelled the function should return.
|
||||
type ParallelWorkFunc func(ctx context.Context, id string) error
|
||||
|
||||
// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned,
|
||||
// processing stops. When the context is cancelled the function should return.
|
||||
type ParallelIDWorkFunc func(ctx context.Context, id restic.ID) error
|
||||
|
||||
// FilesInParallel runs n workers of f in parallel, on the IDs that
|
||||
// repo.List(t) yield. If f returns an error, the process is aborted and the
|
||||
// first error is returned.
|
||||
func FilesInParallel(ctx context.Context, repo restic.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
|
||||
wg := &sync.WaitGroup{}
|
||||
ch := repo.List(ctx, t)
|
||||
errors := make(chan error, n)
|
||||
|
||||
for i := 0; uint(i) < n; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case id, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
err := f(ctx, id)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errors:
|
||||
return err
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParallelWorkFuncParseID converts a function that takes a restic.ID to a
|
||||
// function that takes a string. Filenames that do not parse as a restic.ID
|
||||
// are ignored.
|
||||
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
|
||||
return func(ctx context.Context, s string) error {
|
||||
id, err := restic.ParseID(s)
|
||||
if err != nil {
|
||||
debug.Log("invalid ID %q: %v", id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return f(ctx, id)
|
||||
}
|
||||
}
|
127
internal/repository/parallel_test.go
Normal file
127
internal/repository/parallel_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"restic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
type testIDs []string
|
||||
|
||||
var lister = testIDs{
|
||||
"40bb581cd36de952985c97a3ff6b21df41ee897d4db2040354caa36a17ff5268",
|
||||
"2e15811a4d14ffac66d36a9ff456019d8de4c10c949d45b643f8477d17e92ff3",
|
||||
"70c11b3ed521ad6b76d905c002ca98b361fca06aca060a063432c7311155a4da",
|
||||
"8056a33e75dccdda701b6c989c7ed0cb71bbb6da13c6427fe5986f0896cc91c0",
|
||||
"79d8776200596aa0237b10d470f7b850b86f8a1a80988ef5c8bee2874ce992e2",
|
||||
"f9f1f29791c6b79b90b35efd083f17a3b163bbbafb1a2fdf43d46d56cffda289",
|
||||
"3834178d05d0f6dd07f872ee0262ff1ace0f0f375768227d3c902b0b66591369",
|
||||
"66d5cc68c9186414806f366ae5493ce7f229212993750a4992be4030f6af28c5",
|
||||
"ebca5af4f397944f68cd215e3dfa2b197a7ba0f7c17d65d9f7390d0a15cde296",
|
||||
"d4511ce6ff732d106275a57e40745c599e987c0da44c42cddbef592aac102437",
|
||||
"f366202f0bfeefaedd7b49e2f21a90d3cbddb97d257a74d788dd34e19a684dae",
|
||||
"a5c17728ab2433cd50636dd5c6c7068c7a44f2999d09c46e8f528466da8a059d",
|
||||
"bae0f9492b9b208233029b87692a1a55cbd7fbe1cf3f6d7bc693ac266a6d6f0e",
|
||||
"9d500187913c7510d71d1902703d312c7aaa56f1e98351385b9535fdabae595e",
|
||||
"ffbddd8a4c1e54d258bb3e16d3929b546b61af63cb560b3e3061a8bef5b24552",
|
||||
"201bb3abf655e7ef71e79ed4fb1079b0502b5acb4d9fad5e72a0de690c50a386",
|
||||
"08eb57bbd559758ea96e99f9b7688c30e7b3bcf0c4562ff4535e2d8edeffaeed",
|
||||
"e50b7223b04985ff38d9e11d1cba333896ef4264f82bd5d0653a028bce70e542",
|
||||
"65a9421cd59cc7b7a71dcd9076136621af607fb4701d2e5c2af23b6396cf2f37",
|
||||
"995a655b3521c19b4d0c266222266d89c8fc62889597d61f45f336091e646d57",
|
||||
"51ec6f0bce77ed97df2dd7ae849338c3a8155a057da927eedd66e3d61be769ad",
|
||||
"7b3923a0c0666431efecdbf6cb171295ec1710b6595eebcba3b576b49d13e214",
|
||||
"2cedcc3d14698bea7e4b0546f7d5d48951dd90add59e6f2d44b693fd8913717d",
|
||||
"fd6770cbd54858fdbd3d7b4239b985e5599180064d93ca873f27e86e8407d011",
|
||||
"9edc51d8e6e04d05c9757848c1bfbfdc8e86b6330982294632488922e59fdb1b",
|
||||
"1a6c4fbb24ad724c968b2020417c3d057e6c89e49bdfb11d91006def65eab6a0",
|
||||
"cb3b29808cd0adfa2dca1f3a04f98114fbccf4eb487cdd4022f49bd70eeb049b",
|
||||
"f55edcb40c619e29a20e432f8aaddc83a649be2c2d1941ccdc474cd2af03d490",
|
||||
"e8ccc1763a92de23566b95c3ad1414a098016ece69a885fc8a72782a7517d17c",
|
||||
"0fe2e3db8c5a12ad7101a63a0fffee901be54319cfe146bead7aec851722f82d",
|
||||
"36be45a6ae7c95ad97cee1b33023be324bce7a7b4b7036e24125679dd9ff5b44",
|
||||
"1685ed1a57c37859fbef1f7efb7509f20b84ec17a765605de43104d2fa37884b",
|
||||
"9d83629a6a004c505b100a0b5d0b246833b63aa067aa9b59e3abd6b74bc4d3a8",
|
||||
"be49a66b60175c5e2ee273b42165f86ef11bb6518c1c79950bcd3f4c196c98bd",
|
||||
"0fd89885d821761b4a890782908e75793028747d15ace3c6cbf0ad56582b4fa5",
|
||||
"94a767519a4e352a88796604943841fea21429f3358b4d5d55596dbda7d15dce",
|
||||
"8dd07994afe6e572ddc9698fb0d13a0d4c26a38b7992818a71a99d1e0ac2b034",
|
||||
"f7380a6f795ed31fbeb2945c72c5fd1d45044e5ab152311e75e007fa530f5847",
|
||||
"5ca1ce01458e484393d7e9c8af42b0ff37a73a2fee0f18e14cff0fb180e33014",
|
||||
"8f44178be3fe0a2bd41f922576fb7a9b19d589754504be746f56c759df328fda",
|
||||
"12d33847c2be711c989f37360dd7aa8537fd14972262a4530634a08fdf32a767",
|
||||
"31e077f5080f78846a00093caff2b6b839519cc47516142eeba9c41d4072a605",
|
||||
"14f01db8a0054e70222b76d2555d70114b4bf8a0f02084324af2df226f14a795",
|
||||
"7f5dbbaf31b4551828e8e76cef408375db9fbcdcdb6b5949f2d1b0c4b8632132",
|
||||
"42a5d9b9bb7e4a16f23ba916bcf87f38c1aa1f2de2ab79736f725850a8ff6a1b",
|
||||
"e06f8f901ea708beba8712a11b6e2d0be7c4b018d0254204ef269bcdf5e8c6cc",
|
||||
"d9ba75785bf45b0c4fd3b2365c968099242483f2f0d0c7c20306dac11fae96e9",
|
||||
"428debbb280873907cef2ec099efe1566e42a59775d6ec74ded0c4048d5a6515",
|
||||
"3b51049d4dae701098e55a69536fa31ad2be1adc17b631a695a40e8a294fe9c0",
|
||||
"168f88aa4b105e9811f5f79439cc1a689be4eec77f3361d42f22fe8f7ddc74a9",
|
||||
"0baa0ab2249b33d64449a899cb7bd8eae5231f0d4ff70f09830dc1faa2e4abee",
|
||||
"0c3896d346b580306a49de29f3a78913a41e14b8461b124628c33a64636241f2",
|
||||
"b18313f1651c15e100e7179aa3eb8ffa62c3581159eaf7f83156468d19781e42",
|
||||
"996361f7d988e48267ccc7e930fed4637be35fe7562b8601dceb7a32313a14c8",
|
||||
"dfb4e6268437d53048d22b811048cd045df15693fc6789affd002a0fc80a6e60",
|
||||
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
|
||||
}
|
||||
|
||||
func (tests testIDs) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
ch := make(chan string)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
for _, id := range tests {
|
||||
select {
|
||||
case ch <- id:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func TestFilesInParallel(t *testing.T) {
|
||||
f := func(ctx context.Context, id string) error {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
for n := uint(1); n < 5; n++ {
|
||||
err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f)
|
||||
OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
var errTest = errors.New("test error")
|
||||
|
||||
func TestFilesInParallelWithError(t *testing.T) {
|
||||
|
||||
f := func(ctx context.Context, id string) error {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
||||
if rand.Float32() < 0.01 {
|
||||
return errTest
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for n := uint(1); n < 5; n++ {
|
||||
err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f)
|
||||
Equals(t, errTest, err)
|
||||
}
|
||||
}
|
21
internal/repository/pool.go
Normal file
21
internal/repository/pool.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, chunker.MinSize)
|
||||
},
|
||||
}
|
||||
|
||||
func getBuf() []byte {
|
||||
return bufPool.Get().([]byte)
|
||||
}
|
||||
|
||||
func freeBuf(data []byte) {
|
||||
bufPool.Put(data)
|
||||
}
|
130
internal/repository/repack.go
Normal file
130
internal/repository/repack.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/fs"
|
||||
"restic/hashing"
|
||||
"restic/pack"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Repack takes a list of packs together with a list of blobs contained in
|
||||
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
||||
// into a new pack. Returned is the list of obsolete packs which can then
|
||||
// be removed.
|
||||
func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *restic.Progress) (obsoletePacks restic.IDSet, err error) {
|
||||
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
||||
|
||||
for packID := range packs {
|
||||
// load the complete pack into a temp file
|
||||
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
|
||||
|
||||
tempfile, err := fs.TempFile("", "restic-temp-repack-")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "TempFile")
|
||||
}
|
||||
|
||||
beRd, err := repo.Backend().Load(ctx, h, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrd := hashing.NewReader(beRd, sha256.New())
|
||||
packLength, err := io.Copy(tempfile, hrd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Copy")
|
||||
}
|
||||
|
||||
if err = beRd.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
hash := restic.IDFromHash(hrd.Sum(nil))
|
||||
debug.Log("pack %v loaded (%d bytes), hash %v", packID.Str(), packLength, hash.Str())
|
||||
|
||||
if !packID.Equal(hash) {
|
||||
return nil, errors.Errorf("hash does not match id: want %v, got %v", packID, hash)
|
||||
}
|
||||
|
||||
_, err = tempfile.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
blobs, err := pack.List(repo.Key(), tempfile, packLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("processing pack %v, blobs: %v", packID.Str(), len(blobs))
|
||||
var buf []byte
|
||||
for _, entry := range blobs {
|
||||
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||
if !keepBlobs.Has(h) {
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log(" process blob %v", h)
|
||||
|
||||
buf = buf[:]
|
||||
if uint(len(buf)) < entry.Length {
|
||||
buf = make([]byte, entry.Length)
|
||||
}
|
||||
buf = buf[:entry.Length]
|
||||
|
||||
n, err := tempfile.ReadAt(buf, int64(entry.Offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ReadAt")
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
return nil, errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
|
||||
h, tempfile.Name(), len(buf), n)
|
||||
}
|
||||
|
||||
n, err = repo.Key().Decrypt(buf, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
|
||||
id := restic.Hash(buf)
|
||||
if !id.Equal(entry.ID) {
|
||||
return nil, errors.Errorf("read blob %v from %v: wrong data returned, hash is %v",
|
||||
h, tempfile.Name(), id)
|
||||
}
|
||||
|
||||
_, err = repo.SaveBlob(ctx, entry.Type, buf, entry.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log(" saved blob %v", entry.ID.Str())
|
||||
|
||||
keepBlobs.Delete(h)
|
||||
}
|
||||
|
||||
if err = tempfile.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
if err = fs.RemoveIfExists(tempfile.Name()); err != nil {
|
||||
return nil, errors.Wrap(err, "Remove")
|
||||
}
|
||||
if p != nil {
|
||||
p.Report(restic.Stat{Blobs: 1})
|
||||
}
|
||||
}
|
||||
|
||||
if err := repo.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return packs, nil
|
||||
}
|
240
internal/repository/repack_test.go
Normal file
240
internal/repository/repack_test.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"restic"
|
||||
"restic/index"
|
||||
"restic/repository"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func randomSize(min, max int) int {
|
||||
return rand.Intn(max-min) + min
|
||||
}
|
||||
|
||||
func random(t testing.TB, length int) []byte {
|
||||
rd := restic.NewRandReader(rand.New(rand.NewSource(int64(length))))
|
||||
buf := make([]byte, length)
|
||||
_, err := io.ReadFull(rd, buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read %d random bytes: %v", length, err)
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) {
|
||||
for i := 0; i < blobs; i++ {
|
||||
var (
|
||||
tpe restic.BlobType
|
||||
length int
|
||||
)
|
||||
|
||||
if rand.Float32() < pData {
|
||||
tpe = restic.DataBlob
|
||||
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
|
||||
} else {
|
||||
tpe = restic.TreeBlob
|
||||
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
|
||||
}
|
||||
|
||||
buf := random(t, length)
|
||||
id := restic.Hash(buf)
|
||||
|
||||
if repo.Index().Has(id, restic.DataBlob) {
|
||||
t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := repo.SaveBlob(context.TODO(), tpe, buf, id)
|
||||
if err != nil {
|
||||
t.Fatalf("SaveFrom() error %v", err)
|
||||
}
|
||||
|
||||
if rand.Float32() < 0.2 {
|
||||
if err = repo.Flush(); err != nil {
|
||||
t.Fatalf("repo.Flush() returned error %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := repo.Flush(); err != nil {
|
||||
t.Fatalf("repo.Flush() returned error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// selectBlobs splits the list of all blobs randomly into two lists. A blob
|
||||
// will be contained in the firstone ith probability p.
|
||||
func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) {
|
||||
list1 = restic.NewBlobSet()
|
||||
list2 = restic.NewBlobSet()
|
||||
|
||||
blobs := restic.NewBlobSet()
|
||||
|
||||
for id := range repo.List(context.TODO(), restic.DataFile) {
|
||||
entries, _, err := repo.ListPack(context.TODO(), id)
|
||||
if err != nil {
|
||||
t.Fatalf("error listing pack %v: %v", id, err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||
if blobs.Has(h) {
|
||||
t.Errorf("ignoring duplicate blob %v", h)
|
||||
continue
|
||||
}
|
||||
blobs.Insert(h)
|
||||
|
||||
if rand.Float32() <= p {
|
||||
list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
|
||||
} else {
|
||||
list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return list1, list2
|
||||
}
|
||||
|
||||
func listPacks(t *testing.T, repo restic.Repository) restic.IDSet {
|
||||
list := restic.NewIDSet()
|
||||
for id := range repo.List(context.TODO(), restic.DataFile) {
|
||||
list.Insert(id)
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet {
|
||||
packs := restic.NewIDSet()
|
||||
|
||||
idx := repo.Index()
|
||||
for h := range blobs {
|
||||
list, err := idx.Lookup(h.ID, h.Type)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, pb := range list {
|
||||
packs.Insert(pb.PackID)
|
||||
}
|
||||
}
|
||||
|
||||
return packs
|
||||
}
|
||||
|
||||
func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) {
|
||||
repackedBlobs, err := repository.Repack(context.TODO(), repo, packs, blobs, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for id := range repackedBlobs {
|
||||
err = repo.Backend().Remove(context.TODO(), restic.Handle{Type: restic.DataFile, Name: id.String()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func saveIndex(t *testing.T, repo restic.Repository) {
|
||||
if err := repo.SaveIndex(context.TODO()); err != nil {
|
||||
t.Fatalf("repo.SaveIndex() %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
||||
idx, _, err := index.New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for id := range repo.List(context.TODO(), restic.IndexFile) {
|
||||
h := restic.Handle{
|
||||
Type: restic.IndexFile,
|
||||
Name: id.String(),
|
||||
}
|
||||
err = repo.Backend().Remove(context.TODO(), h)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = idx.Save(context.TODO(), repo, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func reloadIndex(t *testing.T, repo restic.Repository) {
|
||||
repo.SetIndex(repository.NewMasterIndex())
|
||||
if err := repo.LoadIndex(context.TODO()); err != nil {
|
||||
t.Fatalf("error loading new index: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepack(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
createRandomBlobs(t, repo, 100, 0.7)
|
||||
|
||||
packsBefore := listPacks(t, repo)
|
||||
|
||||
// Running repack on empty ID sets should not do anything at all.
|
||||
repack(t, repo, nil, nil)
|
||||
|
||||
packsAfter := listPacks(t, repo)
|
||||
|
||||
if !packsAfter.Equals(packsBefore) {
|
||||
t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v",
|
||||
packsBefore, packsAfter)
|
||||
}
|
||||
|
||||
saveIndex(t, repo)
|
||||
|
||||
removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2)
|
||||
|
||||
removePacks := findPacksForBlobs(t, repo, removeBlobs)
|
||||
|
||||
repack(t, repo, removePacks, keepBlobs)
|
||||
rebuildIndex(t, repo)
|
||||
reloadIndex(t, repo)
|
||||
|
||||
packsAfter = listPacks(t, repo)
|
||||
for id := range removePacks {
|
||||
if packsAfter.Has(id) {
|
||||
t.Errorf("pack %v still present although it should have been repacked and removed", id.Str())
|
||||
}
|
||||
}
|
||||
|
||||
idx := repo.Index()
|
||||
|
||||
for h := range keepBlobs {
|
||||
list, err := idx.Lookup(h.ID, h.Type)
|
||||
if err != nil {
|
||||
t.Errorf("unable to find blob %v in repo", h.ID.Str())
|
||||
continue
|
||||
}
|
||||
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one pack in the list, got: %v", list)
|
||||
continue
|
||||
}
|
||||
|
||||
pb := list[0]
|
||||
|
||||
if removePacks.Has(pb.PackID) {
|
||||
t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID)
|
||||
}
|
||||
}
|
||||
|
||||
for h := range removeBlobs {
|
||||
if _, err := idx.Lookup(h.ID, h.Type); err == nil {
|
||||
t.Errorf("blob %v still contained in the repo", h)
|
||||
}
|
||||
}
|
||||
}
|
579
internal/repository/repository.go
Normal file
579
internal/repository/repository.go
Normal file
@@ -0,0 +1,579 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/crypto"
|
||||
"restic/debug"
|
||||
"restic/pack"
|
||||
)
|
||||
|
||||
// Repository is used to access a repository in a backend.
|
||||
type Repository struct {
|
||||
be restic.Backend
|
||||
cfg restic.Config
|
||||
key *crypto.Key
|
||||
keyName string
|
||||
idx *MasterIndex
|
||||
|
||||
*packerManager
|
||||
}
|
||||
|
||||
// New returns a new repository with backend be.
|
||||
func New(be restic.Backend) *Repository {
|
||||
repo := &Repository{
|
||||
be: be,
|
||||
idx: NewMasterIndex(),
|
||||
packerManager: newPackerManager(be, nil),
|
||||
}
|
||||
|
||||
return repo
|
||||
}
|
||||
|
||||
// Config returns the repository configuration.
|
||||
func (r *Repository) Config() restic.Config {
|
||||
return r.cfg
|
||||
}
|
||||
|
||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||
// all IDs of type t are unique.
|
||||
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
|
||||
return restic.PrefixLength(r.be, t)
|
||||
}
|
||||
|
||||
// LoadAndDecrypt loads and decrypts data identified by t and id from the
|
||||
// backend.
|
||||
func (r *Repository) LoadAndDecrypt(ctx context.Context, t restic.FileType, id restic.ID) ([]byte, error) {
|
||||
debug.Log("load %v with id %v", t, id.Str())
|
||||
|
||||
h := restic.Handle{Type: t, Name: id.String()}
|
||||
buf, err := backend.LoadAll(ctx, r.be, h)
|
||||
if err != nil {
|
||||
debug.Log("error loading %v: %v", h, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
|
||||
return nil, errors.Errorf("load %v: invalid data returned", h)
|
||||
}
|
||||
|
||||
// decrypt
|
||||
n, err := r.decryptTo(buf, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf[:n], nil
|
||||
}
|
||||
|
||||
// loadBlob tries to load and decrypt content identified by t and id from a
|
||||
// pack from the backend, the result is stored in plaintextBuf, which must be
|
||||
// large enough to hold the complete blob.
|
||||
func (r *Repository) loadBlob(ctx context.Context, id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) {
|
||||
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id.Str(), len(plaintextBuf), cap(plaintextBuf))
|
||||
|
||||
// lookup packs
|
||||
blobs, err := r.idx.Lookup(id, t)
|
||||
if err != nil {
|
||||
debug.Log("id %v not found in index: %v", id.Str(), err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var lastError error
|
||||
for _, blob := range blobs {
|
||||
debug.Log("id %v found: %v", id.Str(), blob)
|
||||
|
||||
if blob.Type != t {
|
||||
debug.Log("blob %v has wrong block type, want %v", blob, t)
|
||||
}
|
||||
|
||||
// load blob from pack
|
||||
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}
|
||||
|
||||
if uint(cap(plaintextBuf)) < blob.Length {
|
||||
return 0, errors.Errorf("buffer is too small: %v < %v", cap(plaintextBuf), blob.Length)
|
||||
}
|
||||
|
||||
plaintextBuf = plaintextBuf[:blob.Length]
|
||||
|
||||
n, err := restic.ReadAt(ctx, r.be, h, int64(blob.Offset), plaintextBuf)
|
||||
if err != nil {
|
||||
debug.Log("error loading blob %v: %v", blob, err)
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
|
||||
if uint(n) != blob.Length {
|
||||
lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d",
|
||||
id.Str(), blob.Length, uint(n))
|
||||
debug.Log("lastError: %v", lastError)
|
||||
continue
|
||||
}
|
||||
|
||||
// decrypt
|
||||
n, err = r.decryptTo(plaintextBuf, plaintextBuf)
|
||||
if err != nil {
|
||||
lastError = errors.Errorf("decrypting blob %v failed: %v", id, err)
|
||||
continue
|
||||
}
|
||||
plaintextBuf = plaintextBuf[:n]
|
||||
|
||||
// check hash
|
||||
if !restic.Hash(plaintextBuf).Equal(id) {
|
||||
lastError = errors.Errorf("blob %v returned invalid hash", id)
|
||||
continue
|
||||
}
|
||||
|
||||
return len(plaintextBuf), nil
|
||||
}
|
||||
|
||||
if lastError != nil {
|
||||
return 0, lastError
|
||||
}
|
||||
|
||||
return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
|
||||
}
|
||||
|
||||
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
||||
// the item.
|
||||
func (r *Repository) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, item interface{}) (err error) {
|
||||
buf, err := r.LoadAndDecrypt(ctx, t, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(buf, item)
|
||||
}
|
||||
|
||||
// LookupBlobSize returns the size of blob id.
|
||||
func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, error) {
|
||||
return r.idx.LookupSize(id, tpe)
|
||||
}
|
||||
|
||||
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
||||
// is small enough, it will be packed together with other small blobs.
|
||||
func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) {
|
||||
if id == nil {
|
||||
// compute plaintext hash
|
||||
hashedID := restic.Hash(data)
|
||||
id = &hashedID
|
||||
}
|
||||
|
||||
debug.Log("save id %v (%v, %d bytes)", id.Str(), t, len(data))
|
||||
|
||||
// get buf from the pool
|
||||
ciphertext := getBuf()
|
||||
defer freeBuf(ciphertext)
|
||||
|
||||
// encrypt blob
|
||||
ciphertext, err := r.Encrypt(ciphertext, data)
|
||||
if err != nil {
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
// find suitable packer and add blob
|
||||
packer, err := r.findPacker(uint(len(ciphertext)))
|
||||
if err != nil {
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
// save ciphertext
|
||||
_, err = packer.Add(t, *id, ciphertext)
|
||||
if err != nil {
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
// if the pack is not full enough and there are less than maxPackers
|
||||
// packers, put back to the list
|
||||
if packer.Size() < minPackSize && r.countPacker() < maxPackers {
|
||||
debug.Log("pack is not full enough (%d bytes)", packer.Size())
|
||||
r.insertPacker(packer)
|
||||
return *id, nil
|
||||
}
|
||||
|
||||
// else write the pack to the backend
|
||||
return *id, r.savePacker(packer)
|
||||
}
|
||||
|
||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||
// backend as type t, without a pack. It returns the storage hash.
|
||||
func (r *Repository) SaveJSONUnpacked(ctx context.Context, t restic.FileType, item interface{}) (restic.ID, error) {
|
||||
debug.Log("save new blob %v", t)
|
||||
plaintext, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return restic.ID{}, errors.Wrap(err, "json.Marshal")
|
||||
}
|
||||
|
||||
return r.SaveUnpacked(ctx, t, plaintext)
|
||||
}
|
||||
|
||||
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
|
||||
// storage hash.
|
||||
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) {
|
||||
ciphertext := restic.NewBlobBuffer(len(p))
|
||||
ciphertext, err = r.Encrypt(ciphertext, p)
|
||||
if err != nil {
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
id = restic.Hash(ciphertext)
|
||||
h := restic.Handle{Type: t, Name: id.String()}
|
||||
|
||||
err = r.be.Save(ctx, h, bytes.NewReader(ciphertext))
|
||||
if err != nil {
|
||||
debug.Log("error saving blob %v: %v", h, err)
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
debug.Log("blob %v saved", h)
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Flush saves all remaining packs.
|
||||
func (r *Repository) Flush() error {
|
||||
r.pm.Lock()
|
||||
defer r.pm.Unlock()
|
||||
|
||||
debug.Log("manually flushing %d packs", len(r.packerManager.packers))
|
||||
|
||||
for _, p := range r.packerManager.packers {
|
||||
err := r.savePacker(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
r.packerManager.packers = r.packerManager.packers[:0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Backend returns the backend for the repository.
|
||||
func (r *Repository) Backend() restic.Backend {
|
||||
return r.be
|
||||
}
|
||||
|
||||
// Index returns the currently used MasterIndex.
|
||||
func (r *Repository) Index() restic.Index {
|
||||
return r.idx
|
||||
}
|
||||
|
||||
// SetIndex instructs the repository to use the given index.
|
||||
func (r *Repository) SetIndex(i restic.Index) {
|
||||
r.idx = i.(*MasterIndex)
|
||||
}
|
||||
|
||||
// SaveIndex saves an index in the repository.
|
||||
func SaveIndex(ctx context.Context, repo restic.Repository, index *Index) (restic.ID, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
err := index.Finalize(buf)
|
||||
if err != nil {
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
return repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes())
|
||||
}
|
||||
|
||||
// saveIndex saves all indexes in the backend.
|
||||
func (r *Repository) saveIndex(ctx context.Context, indexes ...*Index) error {
|
||||
for i, idx := range indexes {
|
||||
debug.Log("Saving index %d", i)
|
||||
|
||||
sid, err := SaveIndex(ctx, r, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Saved index %d as %v", i, sid.Str())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveIndex saves all new indexes in the backend.
|
||||
func (r *Repository) SaveIndex(ctx context.Context) error {
|
||||
return r.saveIndex(ctx, r.idx.NotFinalIndexes()...)
|
||||
}
|
||||
|
||||
// SaveFullIndex saves all full indexes in the backend.
|
||||
func (r *Repository) SaveFullIndex(ctx context.Context) error {
|
||||
return r.saveIndex(ctx, r.idx.FullIndexes()...)
|
||||
}
|
||||
|
||||
const loadIndexParallelism = 20
|
||||
|
||||
// LoadIndex loads all index files from the backend in parallel and stores them
|
||||
// in the master index. The first error that occurred is returned.
|
||||
func (r *Repository) LoadIndex(ctx context.Context) error {
|
||||
debug.Log("Loading index")
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
indexes := make(chan *Index)
|
||||
|
||||
worker := func(ctx context.Context, id restic.ID) error {
|
||||
idx, err := LoadIndex(ctx, r, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case indexes <- idx:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(indexes)
|
||||
errCh <- FilesInParallel(ctx, r.be, restic.IndexFile, loadIndexParallelism,
|
||||
ParallelWorkFuncParseID(worker))
|
||||
}()
|
||||
|
||||
for idx := range indexes {
|
||||
r.idx.Insert(idx)
|
||||
}
|
||||
|
||||
if err := <-errCh; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadIndex loads the index id from backend and returns it.
|
||||
func LoadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*Index, error) {
|
||||
idx, err := LoadIndexWithDecoder(ctx, repo, id, DecodeIndex)
|
||||
if err == nil {
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
if errors.Cause(err) == ErrOldIndexFormat {
|
||||
fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
|
||||
return LoadIndexWithDecoder(ctx, repo, id, DecodeOldIndex)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// SearchKey finds a key with the supplied password, afterwards the config is
|
||||
// read and parsed. It tries at most maxKeys key files in the repo.
|
||||
func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int) error {
|
||||
key, err := SearchKey(ctx, r, password, maxKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.key = key.master
|
||||
r.packerManager.key = key.master
|
||||
r.keyName = key.Name()
|
||||
r.cfg, err = restic.LoadConfig(ctx, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init creates a new master key with the supplied password, initializes and
|
||||
// saves the repository config.
|
||||
func (r *Repository) Init(ctx context.Context, password string) error {
|
||||
has, err := r.be.Test(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if has {
|
||||
return errors.New("repository master key and config already initialized")
|
||||
}
|
||||
|
||||
cfg, err := restic.CreateConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.init(ctx, password, cfg)
|
||||
}
|
||||
|
||||
// init creates a new master key with the supplied password and uses it to save
|
||||
// the config into the repo.
|
||||
func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error {
|
||||
key, err := createMasterKey(r, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.key = key.master
|
||||
r.packerManager.key = key.master
|
||||
r.keyName = key.Name()
|
||||
r.cfg = cfg
|
||||
_, err = r.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// decrypt authenticates and decrypts ciphertext and stores the result in
|
||||
// plaintext.
|
||||
func (r *Repository) decryptTo(plaintext, ciphertext []byte) (int, error) {
|
||||
if r.key == nil {
|
||||
return 0, errors.New("key for repository not set")
|
||||
}
|
||||
|
||||
return r.key.Decrypt(plaintext, ciphertext)
|
||||
}
|
||||
|
||||
// Encrypt encrypts and authenticates the plaintext and saves the result in
|
||||
// ciphertext.
|
||||
func (r *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||
if r.key == nil {
|
||||
return nil, errors.New("key for repository not set")
|
||||
}
|
||||
|
||||
return r.key.Encrypt(ciphertext, plaintext)
|
||||
}
|
||||
|
||||
// Key returns the current master key.
|
||||
func (r *Repository) Key() *crypto.Key {
|
||||
return r.key
|
||||
}
|
||||
|
||||
// KeyName returns the name of the current key in the backend.
|
||||
func (r *Repository) KeyName() string {
|
||||
return r.keyName
|
||||
}
|
||||
|
||||
// List returns a channel that yields all IDs of type t in the backend.
|
||||
func (r *Repository) List(ctx context.Context, t restic.FileType) <-chan restic.ID {
|
||||
out := make(chan restic.ID)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for strID := range r.be.List(ctx, t) {
|
||||
if id, err := restic.ParseID(strID); err == nil {
|
||||
select {
|
||||
case out <- id:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListPack returns the list of blobs saved in the pack id and the length of
|
||||
// the file as stored in the backend.
|
||||
func (r *Repository) ListPack(ctx context.Context, id restic.ID) ([]restic.Blob, int64, error) {
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
|
||||
blobInfo, err := r.Backend().Stat(ctx, h)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return blobs, blobInfo.Size, nil
|
||||
}
|
||||
|
||||
// Delete calls backend.Delete() if implemented, and returns an error
|
||||
// otherwise.
|
||||
func (r *Repository) Delete(ctx context.Context) error {
|
||||
if b, ok := r.be.(restic.Deleter); ok {
|
||||
return b.Delete(ctx)
|
||||
}
|
||||
|
||||
return errors.New("Delete() called for backend that does not implement this method")
|
||||
}
|
||||
|
||||
// Close closes the repository by closing the backend.
|
||||
func (r *Repository) Close() error {
|
||||
return r.be.Close()
|
||||
}
|
||||
|
||||
// LoadBlob loads a blob of type t from the repository to the buffer. buf must
|
||||
// be large enough to hold the encrypted blob, since it is used as scratch
|
||||
// space.
|
||||
func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) (int, error) {
|
||||
debug.Log("load blob %v into buf (len %v, cap %v)", id.Str(), len(buf), cap(buf))
|
||||
size, err := r.idx.LookupSize(id, t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cap(buf) < restic.CiphertextLength(int(size)) {
|
||||
return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", cap(buf), restic.CiphertextLength(int(size)))
|
||||
}
|
||||
|
||||
n, err := r.loadBlob(ctx, id, t, buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
debug.Log("loaded %d bytes into buf %p", len(buf), buf)
|
||||
|
||||
return len(buf), err
|
||||
}
|
||||
|
||||
// SaveBlob saves a blob of type t into the repository. If id is the null id, it
|
||||
// will be computed and returned.
|
||||
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) {
|
||||
var i *restic.ID
|
||||
if !id.IsNull() {
|
||||
i = &id
|
||||
}
|
||||
return r.SaveAndEncrypt(ctx, t, buf, i)
|
||||
}
|
||||
|
||||
// LoadTree loads a tree from the repository.
|
||||
func (r *Repository) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) {
|
||||
debug.Log("load tree %v", id.Str())
|
||||
|
||||
size, err := r.idx.LookupSize(id, restic.TreeBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("size is %d, create buffer", size)
|
||||
buf := restic.NewBlobBuffer(int(size))
|
||||
|
||||
n, err := r.loadBlob(ctx, id, restic.TreeBlob, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
t := &restic.Tree{}
|
||||
err = json.Unmarshal(buf, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SaveTree stores a tree into the repository and returns the ID. The ID is
|
||||
// checked against the index. The tree is only stored when the index does not
|
||||
// contain the ID.
|
||||
func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
|
||||
buf, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return restic.ID{}, errors.Wrap(err, "MarshalJSON")
|
||||
}
|
||||
|
||||
// append a newline so that the data is always consistent (json.Encoder
|
||||
// adds a newline after each object)
|
||||
buf = append(buf, '\n')
|
||||
|
||||
id := restic.Hash(buf)
|
||||
if r.idx.Has(id, restic.TreeBlob) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
_, err = r.SaveBlob(ctx, restic.TreeBlob, buf, id)
|
||||
return id, err
|
||||
}
|
390
internal/repository/repository_test.go
Normal file
390
internal/repository/repository_test.go
Normal file
@@ -0,0 +1,390 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic"
|
||||
"restic/archiver"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
|
||||
|
||||
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func TestSave(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
for _, size := range testSizes {
|
||||
data := make([]byte, size)
|
||||
_, err := io.ReadFull(rnd, data)
|
||||
OK(t, err)
|
||||
|
||||
id := restic.Hash(data)
|
||||
|
||||
// save
|
||||
sid, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{})
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, id, sid)
|
||||
|
||||
OK(t, repo.Flush())
|
||||
// OK(t, repo.SaveIndex())
|
||||
|
||||
// read back
|
||||
buf := restic.NewBlobBuffer(size)
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
OK(t, err)
|
||||
Equals(t, len(buf), n)
|
||||
|
||||
Assert(t, len(buf) == len(data),
|
||||
"number of bytes read back does not match: expected %d, got %d",
|
||||
len(data), len(buf))
|
||||
|
||||
Assert(t, bytes.Equal(buf, data),
|
||||
"data does not match: expected %02x, got %02x",
|
||||
data, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveFrom(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
for _, size := range testSizes {
|
||||
data := make([]byte, size)
|
||||
_, err := io.ReadFull(rnd, data)
|
||||
OK(t, err)
|
||||
|
||||
id := restic.Hash(data)
|
||||
|
||||
// save
|
||||
id2, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id)
|
||||
OK(t, err)
|
||||
Equals(t, id, id2)
|
||||
|
||||
OK(t, repo.Flush())
|
||||
|
||||
// read back
|
||||
buf := restic.NewBlobBuffer(size)
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
OK(t, err)
|
||||
Equals(t, len(buf), n)
|
||||
|
||||
Assert(t, len(buf) == len(data),
|
||||
"number of bytes read back does not match: expected %d, got %d",
|
||||
len(data), len(buf))
|
||||
|
||||
Assert(t, bytes.Equal(buf, data),
|
||||
"data does not match: expected %02x, got %02x",
|
||||
data, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSaveAndEncrypt(t *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
size := 4 << 20 // 4MiB
|
||||
|
||||
data := make([]byte, size)
|
||||
_, err := io.ReadFull(rnd, data)
|
||||
OK(t, err)
|
||||
|
||||
id := restic.ID(sha256.Sum256(data))
|
||||
|
||||
t.ResetTimer()
|
||||
t.SetBytes(int64(size))
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
// save
|
||||
_, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id)
|
||||
OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTree(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping")
|
||||
}
|
||||
|
||||
// archive a few files
|
||||
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
OK(t, repo.Flush())
|
||||
|
||||
_, err := repo.LoadTree(context.TODO(), *sn.Tree)
|
||||
OK(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkLoadTree(t *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping")
|
||||
}
|
||||
|
||||
// archive a few files
|
||||
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
OK(t, repo.Flush())
|
||||
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, err := repo.LoadTree(context.TODO(), *sn.Tree)
|
||||
OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBlob(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
length := 1000000
|
||||
buf := restic.NewBlobBuffer(length)
|
||||
_, err := io.ReadFull(rnd, buf)
|
||||
OK(t, err)
|
||||
|
||||
id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
|
||||
OK(t, err)
|
||||
OK(t, repo.Flush())
|
||||
|
||||
// first, test with buffers that are too small
|
||||
for _, testlength := range []int{length - 20, length, restic.CiphertextLength(length) - 1} {
|
||||
buf = make([]byte, 0, testlength)
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
if err == nil {
|
||||
t.Errorf("LoadBlob() did not return an error for a buffer that is too small to hold the blob")
|
||||
continue
|
||||
}
|
||||
|
||||
if n != 0 {
|
||||
t.Errorf("LoadBlob() returned an error and n > 0")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// then use buffers that are large enough
|
||||
base := restic.CiphertextLength(length)
|
||||
for _, testlength := range []int{base, base + 7, base + 15, base + 1000} {
|
||||
buf = make([]byte, 0, testlength)
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if n != length {
|
||||
t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, n)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoadBlob(b *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
length := 1000000
|
||||
buf := restic.NewBlobBuffer(length)
|
||||
_, err := io.ReadFull(rnd, buf)
|
||||
OK(b, err)
|
||||
|
||||
id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
|
||||
OK(b, err)
|
||||
OK(b, repo.Flush())
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(length))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
OK(b, err)
|
||||
if n != length {
|
||||
b.Errorf("wanted %d bytes, got %d", length, n)
|
||||
}
|
||||
|
||||
id2 := restic.Hash(buf[:n])
|
||||
if !id.Equal(id2) {
|
||||
b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoadAndDecrypt(b *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
length := 1000000
|
||||
buf := restic.NewBlobBuffer(length)
|
||||
_, err := io.ReadFull(rnd, buf)
|
||||
OK(b, err)
|
||||
|
||||
dataID := restic.Hash(buf)
|
||||
|
||||
storageID, err := repo.SaveUnpacked(context.TODO(), restic.DataFile, buf)
|
||||
OK(b, err)
|
||||
// OK(b, repo.Flush())
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(length))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := repo.LoadAndDecrypt(context.TODO(), restic.DataFile, storageID)
|
||||
OK(b, err)
|
||||
if len(data) != length {
|
||||
b.Errorf("wanted %d bytes, got %d", length, len(data))
|
||||
}
|
||||
|
||||
id2 := restic.Hash(data)
|
||||
if !dataID.Equal(id2) {
|
||||
b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadJSONUnpacked(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping")
|
||||
}
|
||||
|
||||
// archive a snapshot
|
||||
sn := restic.Snapshot{}
|
||||
sn.Hostname = "foobar"
|
||||
sn.Username = "test!"
|
||||
|
||||
id, err := repo.SaveJSONUnpacked(context.TODO(), restic.SnapshotFile, &sn)
|
||||
OK(t, err)
|
||||
|
||||
var sn2 restic.Snapshot
|
||||
|
||||
// restore
|
||||
err = repo.LoadJSONUnpacked(context.TODO(), restic.SnapshotFile, id, &sn2)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, sn.Hostname, sn2.Hostname)
|
||||
Equals(t, sn.Username, sn2.Username)
|
||||
}
|
||||
|
||||
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
|
||||
|
||||
func TestRepositoryLoadIndex(t *testing.T) {
|
||||
repodir, cleanup := Env(t, repoFixture)
|
||||
defer cleanup()
|
||||
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
OK(t, repo.LoadIndex(context.TODO()))
|
||||
}
|
||||
|
||||
func BenchmarkLoadIndex(b *testing.B) {
|
||||
repository.TestUseLowSecurityKDFParameters(b)
|
||||
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
idx := repository.NewIndex()
|
||||
|
||||
for i := 0; i < 5000; i++ {
|
||||
idx.Store(restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
Type: restic.DataBlob,
|
||||
Length: 1234,
|
||||
ID: restic.NewRandomID(),
|
||||
Offset: 1235,
|
||||
},
|
||||
PackID: restic.NewRandomID(),
|
||||
})
|
||||
}
|
||||
|
||||
id, err := repository.SaveIndex(context.TODO(), repo, idx)
|
||||
OK(b, err)
|
||||
|
||||
b.Logf("index saved as %v (%v entries)", id.Str(), idx.Count(restic.DataBlob))
|
||||
fi, err := repo.Backend().Stat(context.TODO(), restic.Handle{Type: restic.IndexFile, Name: id.String()})
|
||||
OK(b, err)
|
||||
b.Logf("filesize is %v", fi.Size)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
OK(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
|
||||
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
|
||||
for i := 0; i < num; i++ {
|
||||
size := rand.Int() % sizeMax
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err := io.ReadFull(rnd, buf)
|
||||
OK(t, err)
|
||||
|
||||
_, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
|
||||
OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryIncrementalIndex(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
repository.IndexFull = func(*repository.Index) bool { return true }
|
||||
|
||||
// add 15 packs
|
||||
for j := 0; j < 5; j++ {
|
||||
// add 3 packs, write intermediate index
|
||||
for i := 0; i < 3; i++ {
|
||||
saveRandomDataBlobs(t, repo, 5, 1<<15)
|
||||
OK(t, repo.Flush())
|
||||
}
|
||||
|
||||
OK(t, repo.SaveFullIndex(context.TODO()))
|
||||
}
|
||||
|
||||
// add another 5 packs
|
||||
for i := 0; i < 5; i++ {
|
||||
saveRandomDataBlobs(t, repo, 5, 1<<15)
|
||||
OK(t, repo.Flush())
|
||||
}
|
||||
|
||||
// save final index
|
||||
OK(t, repo.SaveIndex(context.TODO()))
|
||||
|
||||
packEntries := make(map[restic.ID]map[restic.ID]struct{})
|
||||
|
||||
for id := range repo.List(context.TODO(), restic.IndexFile) {
|
||||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
OK(t, err)
|
||||
|
||||
for pb := range idx.Each(context.TODO()) {
|
||||
if _, ok := packEntries[pb.PackID]; !ok {
|
||||
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
||||
}
|
||||
|
||||
packEntries[pb.PackID][id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for packID, ids := range packEntries {
|
||||
if len(ids) > 1 {
|
||||
t.Errorf("pack %v listed in %d indexes\n", packID, len(ids))
|
||||
}
|
||||
}
|
||||
}
|
BIN
internal/repository/testdata/test-repo.tar.gz
vendored
Normal file
BIN
internal/repository/testdata/test-repo.tar.gz
vendored
Normal file
Binary file not shown.
104
internal/repository/testing.go
Normal file
104
internal/repository/testing.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"restic"
|
||||
"restic/backend/local"
|
||||
"restic/backend/mem"
|
||||
"restic/crypto"
|
||||
"restic/test"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
// testKDFParams are the parameters for the KDF to be used during testing.
|
||||
var testKDFParams = crypto.KDFParams{
|
||||
N: 128,
|
||||
R: 1,
|
||||
P: 1,
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing.
|
||||
func TestUseLowSecurityKDFParameters(t logger) {
|
||||
t.Logf("using low-security KDF parameters for test")
|
||||
KDFParams = &testKDFParams
|
||||
}
|
||||
|
||||
// TestBackend returns a fully configured in-memory backend.
|
||||
func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) {
|
||||
return mem.New(), func() {}
|
||||
}
|
||||
|
||||
const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
|
||||
|
||||
// TestRepositoryWithBackend returns a repository initialized with a test
|
||||
// password. If be is nil, an in-memory backend is used. A constant polynomial
|
||||
// is used for the chunker and low-security test parameters.
|
||||
func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) {
|
||||
TestUseLowSecurityKDFParameters(t)
|
||||
|
||||
var beCleanup func()
|
||||
if be == nil {
|
||||
be, beCleanup = TestBackend(t)
|
||||
}
|
||||
|
||||
repo := New(be)
|
||||
|
||||
cfg := restic.TestCreateConfig(t, testChunkerPol)
|
||||
err := repo.init(context.TODO(), test.TestPassword, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepository(): initialize repo failed: %v", err)
|
||||
}
|
||||
|
||||
return repo, func() {
|
||||
if beCleanup != nil {
|
||||
beCleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRepository returns a repository initialized with a test password on an
|
||||
// in-memory backend. When the environment variable RESTIC_TEST_REPO is set to
|
||||
// a non-existing directory, a local backend is created there and this is used
|
||||
// instead. The directory is not removed, but left there for inspection.
|
||||
func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) {
|
||||
dir := os.Getenv("RESTIC_TEST_REPO")
|
||||
if dir != "" {
|
||||
_, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
be, err := local.Create(local.Config{Path: dir})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating local backend at %v: %v", dir, err)
|
||||
}
|
||||
return TestRepositoryWithBackend(t, be)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
t.Logf("directory at %v already exists, using mem backend", dir)
|
||||
}
|
||||
}
|
||||
|
||||
return TestRepositoryWithBackend(t, nil)
|
||||
}
|
||||
|
||||
// TestOpenLocal opens a local repository.
|
||||
func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) {
|
||||
be, err := local.Open(local.Config{Path: dir})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
repo := New(be)
|
||||
err = repo.SearchKey(context.TODO(), test.TestPassword, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return repo
|
||||
}
|
Reference in New Issue
Block a user