mirror of
https://github.com/restic/restic.git
synced 2025-08-25 19:37:35 +00:00
fuse: move to top level
This commit is contained in:
103
fuse/dir.go
Normal file
103
fuse/dir.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/repository"
|
||||
)
|
||||
|
||||
// Statically ensure that *dir implement those interface
|
||||
var _ = fs.HandleReadDirAller(&dir{})
|
||||
var _ = fs.NodeStringLookuper(&dir{})
|
||||
|
||||
type dir struct {
|
||||
repo *repository.Repository
|
||||
items map[string]*restic.Node
|
||||
inode uint64
|
||||
}
|
||||
|
||||
func newDir(repo *repository.Repository, node *restic.Node) (*dir, error) {
|
||||
tree, err := restic.LoadTree(repo, *node.Subtree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items := make(map[string]*restic.Node)
|
||||
for _, node := range tree.Nodes {
|
||||
items[node.Name] = node
|
||||
}
|
||||
|
||||
return &dir{
|
||||
repo: repo,
|
||||
items: items,
|
||||
inode: node.Inode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId) (*dir, error) {
|
||||
tree, err := restic.LoadTree(repo, *snapshot.Tree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items := make(map[string]*restic.Node)
|
||||
for _, node := range tree.Nodes {
|
||||
items[node.Name] = node
|
||||
}
|
||||
|
||||
return &dir{
|
||||
repo: repo,
|
||||
items: items,
|
||||
inode: inodeFromBackendId(snapshot.ID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = d.inode
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
ret := make([]fuse.Dirent, 0, len(d.items))
|
||||
|
||||
for _, node := range d.items {
|
||||
var typ fuse.DirentType
|
||||
switch node.Type {
|
||||
case "dir":
|
||||
typ = fuse.DT_Dir
|
||||
case "file":
|
||||
typ = fuse.DT_File
|
||||
case "symlink":
|
||||
typ = fuse.DT_Link
|
||||
}
|
||||
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: node.Inode,
|
||||
Type: typ,
|
||||
Name: node.Name,
|
||||
})
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
node, ok := d.items[name]
|
||||
if !ok {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
switch node.Type {
|
||||
case "dir":
|
||||
return newDir(d.repo, node)
|
||||
case "file":
|
||||
return newFile(d.repo, node)
|
||||
case "symlink":
|
||||
return newLink(d.repo, node)
|
||||
default:
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
}
|
135
fuse/file.go
Normal file
135
fuse/file.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/pack"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Statically ensure that *file implements the given interface
|
||||
var _ = fs.HandleReader(&file{})
|
||||
var _ = fs.HandleReleaser(&file{})
|
||||
|
||||
// BlobLoader is an abstracted repository with a reduced set of methods used
|
||||
// for fuse operations.
|
||||
type BlobLoader interface {
|
||||
LookupBlobSize(backend.ID) (uint, error)
|
||||
LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
type file struct {
|
||||
repo BlobLoader
|
||||
node *restic.Node
|
||||
|
||||
sizes []uint
|
||||
blobs [][]byte
|
||||
}
|
||||
|
||||
const defaultBlobSize = 128 * 1024
|
||||
|
||||
var blobPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, defaultBlobSize)
|
||||
},
|
||||
}
|
||||
|
||||
func newFile(repo BlobLoader, node *restic.Node) (*file, error) {
|
||||
sizes := make([]uint, len(node.Content))
|
||||
for i, id := range node.Content {
|
||||
size, err := repo.LookupBlobSize(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sizes[i] = size
|
||||
}
|
||||
|
||||
return &file{
|
||||
repo: repo,
|
||||
node: node,
|
||||
sizes: sizes,
|
||||
blobs: make([][]byte, len(node.Content)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = f.node.Inode
|
||||
a.Mode = f.node.Mode
|
||||
a.Size = f.node.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) getBlobAt(i int) (blob []byte, err error) {
|
||||
if f.blobs[i] != nil {
|
||||
return f.blobs[i], nil
|
||||
}
|
||||
|
||||
buf := blobPool.Get().([]byte)
|
||||
buf = buf[:cap(buf)]
|
||||
|
||||
if uint(len(buf)) < f.sizes[i] {
|
||||
if len(buf) > defaultBlobSize {
|
||||
blobPool.Put(buf)
|
||||
}
|
||||
buf = make([]byte, f.sizes[i])
|
||||
}
|
||||
|
||||
blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.blobs[i] = blob
|
||||
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
offset := req.Offset
|
||||
|
||||
// Skip blobs before the offset
|
||||
startContent := 0
|
||||
for offset > int64(f.sizes[startContent]) {
|
||||
offset -= int64(f.sizes[startContent])
|
||||
startContent++
|
||||
}
|
||||
|
||||
dst := resp.Data[0:req.Size]
|
||||
readBytes := 0
|
||||
remainingBytes := req.Size
|
||||
for i := startContent; remainingBytes > 0 && i < len(f.sizes); i++ {
|
||||
blob, err := f.getBlobAt(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
blob = blob[offset:len(blob)]
|
||||
offset = 0
|
||||
}
|
||||
|
||||
copied := copy(dst, blob)
|
||||
remainingBytes -= copied
|
||||
readBytes += copied
|
||||
|
||||
dst = dst[copied:]
|
||||
}
|
||||
resp.Data = resp.Data[:readBytes]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
|
||||
for i := range f.blobs {
|
||||
if f.blobs[i] != nil {
|
||||
blobPool.Put(f.blobs[i])
|
||||
f.blobs[i] = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
157
fuse/file_test.go
Normal file
157
fuse/file_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/pack"
|
||||
. "github.com/restic/restic/test"
|
||||
)
|
||||
|
||||
type MockRepo struct {
|
||||
blobs map[backend.ID][]byte
|
||||
}
|
||||
|
||||
func NewMockRepo(content map[backend.ID][]byte) *MockRepo {
|
||||
return &MockRepo{blobs: content}
|
||||
}
|
||||
|
||||
func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) {
|
||||
buf, ok := m.blobs[id]
|
||||
if !ok {
|
||||
return 0, errors.New("blob not found")
|
||||
}
|
||||
|
||||
return uint(len(buf)), nil
|
||||
}
|
||||
|
||||
func (m *MockRepo) LoadBlob(t pack.BlobType, id backend.ID, buf []byte) ([]byte, error) {
|
||||
size, err := m.LookupBlobSize(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint(cap(buf)) < size {
|
||||
return nil, errors.New("buffer too small")
|
||||
}
|
||||
|
||||
buf = buf[:size]
|
||||
copy(buf, m.blobs[id])
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type MockContext struct{}
|
||||
|
||||
func (m MockContext) Deadline() (time.Time, bool) { return time.Now(), false }
|
||||
func (m MockContext) Done() <-chan struct{} { return nil }
|
||||
func (m MockContext) Err() error { return nil }
|
||||
func (m MockContext) Value(key interface{}) interface{} { return nil }
|
||||
|
||||
var testContent = genTestContent()
|
||||
var testContentLengths = []uint{
|
||||
4646 * 1024,
|
||||
655 * 1024,
|
||||
378 * 1024,
|
||||
8108 * 1024,
|
||||
558 * 1024,
|
||||
}
|
||||
var testMaxFileSize uint
|
||||
|
||||
func genTestContent() map[backend.ID][]byte {
|
||||
m := make(map[backend.ID][]byte)
|
||||
|
||||
for _, length := range testContentLengths {
|
||||
buf := Random(int(length), int(length))
|
||||
id := backend.Hash(buf)
|
||||
m[id] = buf
|
||||
testMaxFileSize += length
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
const maxBufSize = 20 * 1024 * 1024
|
||||
|
||||
func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte {
|
||||
ctx := MockContext{}
|
||||
|
||||
req := &fuse.ReadRequest{
|
||||
Offset: int64(offset),
|
||||
Size: length,
|
||||
}
|
||||
resp := &fuse.ReadResponse{
|
||||
Data: make([]byte, length),
|
||||
}
|
||||
OK(t, f.Read(ctx, req, resp))
|
||||
|
||||
return resp.Data
|
||||
}
|
||||
|
||||
var offsetReadsTests = []struct {
|
||||
offset, length int
|
||||
}{
|
||||
{0, 5 * 1024 * 1024},
|
||||
{4000 * 1024, 1000 * 1024},
|
||||
}
|
||||
|
||||
func TestFuseFile(t *testing.T) {
|
||||
repo := NewMockRepo(testContent)
|
||||
ctx := MockContext{}
|
||||
|
||||
memfile := make([]byte, 0, maxBufSize)
|
||||
|
||||
var ids backend.IDs
|
||||
for id, buf := range repo.blobs {
|
||||
ids = append(ids, id)
|
||||
memfile = append(memfile, buf...)
|
||||
}
|
||||
|
||||
node := &restic.Node{
|
||||
Name: "foo",
|
||||
Inode: 23,
|
||||
Mode: 0742,
|
||||
Size: 42,
|
||||
Content: ids,
|
||||
}
|
||||
f, err := newFile(repo, node)
|
||||
OK(t, err)
|
||||
|
||||
attr := fuse.Attr{}
|
||||
OK(t, f.Attr(ctx, &attr))
|
||||
|
||||
Equals(t, node.Inode, attr.Inode)
|
||||
Equals(t, node.Mode, attr.Mode)
|
||||
Equals(t, node.Size, attr.Size)
|
||||
|
||||
for i, test := range offsetReadsTests {
|
||||
b := memfile[test.offset : test.offset+test.length]
|
||||
res := testRead(t, f, test.offset, test.length, b)
|
||||
if !bytes.Equal(b, res) {
|
||||
t.Errorf("test %d failed, wrong data returned", i)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
length := rand.Intn(int(testMaxFileSize) / 2)
|
||||
offset := rand.Intn(int(testMaxFileSize))
|
||||
if length+offset > int(testMaxFileSize) {
|
||||
diff := length + offset - int(testMaxFileSize)
|
||||
length -= diff
|
||||
}
|
||||
|
||||
b := memfile[offset : offset+length]
|
||||
fmt.Printf("test offset %d, length %d\n", offset, length)
|
||||
res := testRead(t, f, offset, length, b)
|
||||
if !bytes.Equal(b, res) {
|
||||
t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length)
|
||||
}
|
||||
}
|
||||
}
|
14
fuse/fuse.go
Normal file
14
fuse/fuse.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
)
|
||||
|
||||
// inodeFromBackendId returns a unique uint64 from a backend id.
|
||||
// Endianness has no specific meaning, it is just the simplest way to
|
||||
// transform a []byte to an uint64
|
||||
func inodeFromBackendId(id backend.ID) uint64 {
|
||||
return binary.BigEndian.Uint64(id[:8])
|
||||
}
|
30
fuse/link.go
Normal file
30
fuse/link.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/repository"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Statically ensure that *file implements the given interface
|
||||
var _ = fs.NodeReadlinker(&link{})
|
||||
|
||||
type link struct {
|
||||
node *restic.Node
|
||||
}
|
||||
|
||||
func newLink(repo *repository.Repository, node *restic.Node) (*link, error) {
|
||||
return &link{node: node}, nil
|
||||
}
|
||||
|
||||
func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||
return l.node.LinkTarget, nil
|
||||
}
|
||||
|
||||
func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = l.node.Inode
|
||||
a.Mode = l.node.Mode
|
||||
return nil
|
||||
}
|
109
fuse/snapshot.go
Normal file
109
fuse/snapshot.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/repository"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type SnapshotWithId struct {
|
||||
*restic.Snapshot
|
||||
backend.ID
|
||||
}
|
||||
|
||||
// These lines statically ensure that a *SnapshotsDir implement the given
|
||||
// interfaces; a misplaced refactoring of the implementation that breaks
|
||||
// the interface will be catched by the compiler
|
||||
var _ = fs.HandleReadDirAller(&SnapshotsDir{})
|
||||
var _ = fs.NodeStringLookuper(&SnapshotsDir{})
|
||||
|
||||
type SnapshotsDir struct {
|
||||
repo *repository.Repository
|
||||
|
||||
// knownSnapshots maps snapshot timestamp to the snapshot
|
||||
sync.RWMutex
|
||||
knownSnapshots map[string]SnapshotWithId
|
||||
}
|
||||
|
||||
func NewSnapshotsDir(repo *repository.Repository) *SnapshotsDir {
|
||||
return &SnapshotsDir{
|
||||
repo: repo,
|
||||
knownSnapshots: make(map[string]SnapshotWithId),
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
attr.Inode = 0
|
||||
attr.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
|
||||
sn.Lock()
|
||||
defer sn.Unlock()
|
||||
|
||||
for id := range sn.repo.List(backend.Snapshot, ctx.Done()) {
|
||||
snapshot, err := restic.LoadSnapshot(sn.repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sn.knownSnapshots[snapshot.Time.Format(time.RFC3339)] = SnapshotWithId{snapshot, id}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) get(name string) (snapshot SnapshotWithId, ok bool) {
|
||||
sn.RLock()
|
||||
snapshot, ok = sn.knownSnapshots[name]
|
||||
sn.RUnlock()
|
||||
return snapshot, ok
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
err := sn.updateCache(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sn.RLock()
|
||||
defer sn.RUnlock()
|
||||
|
||||
ret := make([]fuse.Dirent, 0)
|
||||
for _, snapshot := range sn.knownSnapshots {
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: inodeFromBackendId(snapshot.ID),
|
||||
Type: fuse.DT_Dir,
|
||||
Name: snapshot.Time.Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
snapshot, ok := sn.get(name)
|
||||
|
||||
if !ok {
|
||||
// We don't know about it, update the cache
|
||||
err := sn.updateCache(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshot, ok = sn.get(name)
|
||||
if !ok {
|
||||
// We still don't know about it, this time it really doesn't exist
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
}
|
||||
|
||||
return newDirFromSnapshot(sn.repo, snapshot)
|
||||
}
|
Reference in New Issue
Block a user