mirror of
https://github.com/restic/restic.git
synced 2025-08-12 15:07:53 +00:00
Moves files
This commit is contained in:
2
internal/pack/doc.go
Normal file
2
internal/pack/doc.go
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package pack provides functions for combining and parsing pack files.
|
||||
package pack
|
314
internal/pack/pack.go
Normal file
314
internal/pack/pack.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package pack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
|
||||
"restic/crypto"
|
||||
)
|
||||
|
||||
// Packer is used to create a new Pack.
|
||||
type Packer struct {
|
||||
blobs []restic.Blob
|
||||
|
||||
bytes uint
|
||||
k *crypto.Key
|
||||
wr io.Writer
|
||||
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// NewPacker returns a new Packer that can be used to pack blobs
|
||||
// together. If wr is nil, a bytes.Buffer is used.
|
||||
func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
|
||||
if wr == nil {
|
||||
wr = bytes.NewBuffer(nil)
|
||||
}
|
||||
return &Packer{k: k, wr: wr}
|
||||
}
|
||||
|
||||
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
||||
// number of bytes written to the pack.
|
||||
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
c := restic.Blob{Type: t, ID: id}
|
||||
|
||||
n, err := p.wr.Write(data)
|
||||
c.Length = uint(n)
|
||||
c.Offset = p.bytes
|
||||
p.bytes += uint(n)
|
||||
p.blobs = append(p.blobs, c)
|
||||
|
||||
return n, errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
|
||||
|
||||
// headerEntry is used with encoding/binary to read and write header entries
|
||||
type headerEntry struct {
|
||||
Type uint8
|
||||
Length uint32
|
||||
ID restic.ID
|
||||
}
|
||||
|
||||
// Finalize writes the header for all added blobs and finalizes the pack.
|
||||
// Returned are the number of bytes written, including the header. If the
|
||||
// underlying writer implements io.Closer, it is closed.
|
||||
func (p *Packer) Finalize() (uint, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
bytesWritten := p.bytes
|
||||
|
||||
hdrBuf := bytes.NewBuffer(nil)
|
||||
bytesHeader, err := p.writeHeader(hdrBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
encryptedHeader, err := p.k.Encrypt(nil, hdrBuf.Bytes())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// append the header
|
||||
n, err := p.wr.Write(encryptedHeader)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
hdrBytes := restic.CiphertextLength(int(bytesHeader))
|
||||
if n != hdrBytes {
|
||||
return 0, errors.New("wrong number of bytes written")
|
||||
}
|
||||
|
||||
bytesWritten += uint(hdrBytes)
|
||||
|
||||
// write length
|
||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(entrySize))))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "binary.Write")
|
||||
}
|
||||
bytesWritten += uint(binary.Size(uint32(0)))
|
||||
|
||||
p.bytes = uint(bytesWritten)
|
||||
|
||||
if w, ok := p.wr.(io.Closer); ok {
|
||||
return bytesWritten, w.Close()
|
||||
}
|
||||
|
||||
return bytesWritten, nil
|
||||
}
|
||||
|
||||
// writeHeader constructs and writes the header to wr.
|
||||
func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
|
||||
for _, b := range p.blobs {
|
||||
entry := headerEntry{
|
||||
Length: uint32(b.Length),
|
||||
ID: b.ID,
|
||||
}
|
||||
|
||||
switch b.Type {
|
||||
case restic.DataBlob:
|
||||
entry.Type = 0
|
||||
case restic.TreeBlob:
|
||||
entry.Type = 1
|
||||
default:
|
||||
return 0, errors.Errorf("invalid blob type %v", b.Type)
|
||||
}
|
||||
|
||||
err := binary.Write(wr, binary.LittleEndian, entry)
|
||||
if err != nil {
|
||||
return bytesWritten, errors.Wrap(err, "binary.Write")
|
||||
}
|
||||
|
||||
bytesWritten += entrySize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Size returns the number of bytes written so far.
|
||||
func (p *Packer) Size() uint {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
return p.bytes
|
||||
}
|
||||
|
||||
// Count returns the number of blobs in this packer.
|
||||
func (p *Packer) Count() int {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
return len(p.blobs)
|
||||
}
|
||||
|
||||
// Blobs returns the slice of blobs that have been written.
|
||||
func (p *Packer) Blobs() []restic.Blob {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
return p.blobs
|
||||
}
|
||||
|
||||
// Writer return the underlying writer.
|
||||
func (p *Packer) Writer() io.Writer {
|
||||
return p.wr
|
||||
}
|
||||
|
||||
func (p *Packer) String() string {
|
||||
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
|
||||
}
|
||||
|
||||
// readHeaderLength returns the header length read from the end of the file
|
||||
// encoded in little endian.
|
||||
func readHeaderLength(rd io.ReaderAt, size int64) (uint32, error) {
|
||||
off := size - int64(binary.Size(uint32(0)))
|
||||
|
||||
buf := make([]byte, binary.Size(uint32(0)))
|
||||
n, err := rd.ReadAt(buf, off)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "ReadAt")
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
return 0, errors.New("not enough bytes read")
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint32(buf), nil
|
||||
}
|
||||
|
||||
const maxHeaderSize = 16 * 1024 * 1024
|
||||
|
||||
// we require at least one entry in the header, and one blob for a pack file
|
||||
var minFileSize = entrySize + crypto.Extension
|
||||
|
||||
// readHeader reads the header at the end of rd. size is the length of the
|
||||
// whole data accessible in rd.
|
||||
func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
|
||||
debug.Log("size: %v", size)
|
||||
if size == 0 {
|
||||
err := InvalidFileError{Message: "file is empty"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
if size < int64(minFileSize) {
|
||||
err := InvalidFileError{Message: "file is too small"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
hl, err := readHeaderLength(rd, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("header length: %v", size)
|
||||
|
||||
if hl == 0 {
|
||||
err := InvalidFileError{Message: "header length is zero"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
if hl < crypto.Extension {
|
||||
err := InvalidFileError{Message: "header length is too small"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
if (hl-crypto.Extension)%uint32(entrySize) != 0 {
|
||||
err := InvalidFileError{Message: "header length is invalid"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
if int64(hl) > size-int64(binary.Size(hl)) {
|
||||
err := InvalidFileError{Message: "header is larger than file"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
if int64(hl) > maxHeaderSize {
|
||||
err := InvalidFileError{Message: "header is larger than maxHeaderSize"}
|
||||
return nil, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
buf := make([]byte, int(hl))
|
||||
n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl)))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ReadAt")
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
return nil, errors.New("not enough bytes read")
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// InvalidFileError is return when a file is found that is not a pack file.
|
||||
type InvalidFileError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e InvalidFileError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// List returns the list of entries found in a pack file.
|
||||
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) {
|
||||
buf, err := readHeader(rd, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, err := k.Decrypt(buf, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
hdrRd := bytes.NewReader(buf)
|
||||
|
||||
entries = make([]restic.Blob, 0, uint(n)/entrySize)
|
||||
|
||||
pos := uint(0)
|
||||
for {
|
||||
e := headerEntry{}
|
||||
err = binary.Read(hdrRd, binary.LittleEndian, &e)
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "binary.Read")
|
||||
}
|
||||
|
||||
entry := restic.Blob{
|
||||
Length: uint(e.Length),
|
||||
ID: e.ID,
|
||||
Offset: pos,
|
||||
}
|
||||
|
||||
switch e.Type {
|
||||
case 0:
|
||||
entry.Type = restic.DataBlob
|
||||
case 1:
|
||||
entry.Type = restic.TreeBlob
|
||||
default:
|
||||
return nil, errors.Errorf("invalid type %d", e.Type)
|
||||
}
|
||||
|
||||
entries = append(entries, entry)
|
||||
|
||||
pos += uint(e.Length)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
145
internal/pack/pack_test.go
Normal file
145
internal/pack/pack_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package pack_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"restic"
|
||||
"testing"
|
||||
|
||||
"restic/backend/mem"
|
||||
"restic/crypto"
|
||||
"restic/pack"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231}
|
||||
|
||||
type Buf struct {
|
||||
data []byte
|
||||
id restic.ID
|
||||
}
|
||||
|
||||
func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
|
||||
bufs := []Buf{}
|
||||
|
||||
for _, l := range lengths {
|
||||
b := make([]byte, l)
|
||||
_, err := io.ReadFull(rand.Reader, b)
|
||||
OK(t, err)
|
||||
h := sha256.Sum256(b)
|
||||
bufs = append(bufs, Buf{data: b, id: h})
|
||||
}
|
||||
|
||||
// pack blobs
|
||||
p := pack.NewPacker(k, nil)
|
||||
for _, b := range bufs {
|
||||
p.Add(restic.TreeBlob, b.id, b.data)
|
||||
}
|
||||
|
||||
_, err := p.Finalize()
|
||||
OK(t, err)
|
||||
|
||||
packData := p.Writer().(*bytes.Buffer).Bytes()
|
||||
return bufs, packData, p.Size()
|
||||
}
|
||||
|
||||
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) {
|
||||
written := 0
|
||||
for _, buf := range bufs {
|
||||
written += len(buf.data)
|
||||
}
|
||||
// header length
|
||||
written += binary.Size(uint32(0))
|
||||
// header + header crypto
|
||||
headerSize := len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
|
||||
written += restic.CiphertextLength(headerSize)
|
||||
|
||||
// check length
|
||||
Equals(t, uint(written), packSize)
|
||||
|
||||
// read and parse it again
|
||||
entries, err := pack.List(k, rd, int64(packSize))
|
||||
OK(t, err)
|
||||
Equals(t, len(entries), len(bufs))
|
||||
|
||||
var buf []byte
|
||||
for i, b := range bufs {
|
||||
e := entries[i]
|
||||
Equals(t, b.id, e.ID)
|
||||
|
||||
if len(buf) < int(e.Length) {
|
||||
buf = make([]byte, int(e.Length))
|
||||
}
|
||||
buf = buf[:int(e.Length)]
|
||||
n, err := rd.ReadAt(buf, int64(e.Offset))
|
||||
OK(t, err)
|
||||
buf = buf[:n]
|
||||
|
||||
Assert(t, bytes.Equal(b.data, buf),
|
||||
"data for blob %v doesn't match", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreatePack(t *testing.T) {
|
||||
// create random keys
|
||||
k := crypto.NewRandomKey()
|
||||
|
||||
bufs, packData, packSize := newPack(t, k, testLens)
|
||||
Equals(t, uint(len(packData)), packSize)
|
||||
verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize)
|
||||
}
|
||||
|
||||
var blobTypeJSON = []struct {
|
||||
t restic.BlobType
|
||||
res string
|
||||
}{
|
||||
{restic.DataBlob, `"data"`},
|
||||
{restic.TreeBlob, `"tree"`},
|
||||
}
|
||||
|
||||
func TestBlobTypeJSON(t *testing.T) {
|
||||
for _, test := range blobTypeJSON {
|
||||
// test serialize
|
||||
buf, err := json.Marshal(test.t)
|
||||
OK(t, err)
|
||||
Equals(t, test.res, string(buf))
|
||||
|
||||
// test unserialize
|
||||
var v restic.BlobType
|
||||
err = json.Unmarshal([]byte(test.res), &v)
|
||||
OK(t, err)
|
||||
Equals(t, test.t, v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackReadSeeker(t *testing.T) {
|
||||
// create random keys
|
||||
k := crypto.NewRandomKey()
|
||||
|
||||
bufs, packData, packSize := newPack(t, k, testLens)
|
||||
|
||||
b := mem.New()
|
||||
id := restic.Hash(packData)
|
||||
|
||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
OK(t, b.Save(context.TODO(), handle, bytes.NewReader(packData)))
|
||||
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
|
||||
}
|
||||
|
||||
func TestShortPack(t *testing.T) {
|
||||
k := crypto.NewRandomKey()
|
||||
|
||||
bufs, packData, packSize := newPack(t, k, []int{23})
|
||||
|
||||
b := mem.New()
|
||||
id := restic.Hash(packData)
|
||||
|
||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
OK(t, b.Save(context.TODO(), handle, bytes.NewReader(packData)))
|
||||
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
|
||||
}
|
Reference in New Issue
Block a user