mirror of
https://github.com/restic/restic.git
synced 2025-08-16 03:27:31 +00:00
Compare commits
58 Commits
v0.17.1
...
patch-rele
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e2a98aa955 | ||
![]() |
bc64921a8e | ||
![]() |
633883bdb6 | ||
![]() |
8348024664 | ||
![]() |
c3f5748e5b | ||
![]() |
06ba4af436 | ||
![]() |
fb4d9b3232 | ||
![]() |
7bfe3d99ae | ||
![]() |
d46525a51b | ||
![]() |
3800eac54b | ||
![]() |
75f317eaf1 | ||
![]() |
b8527f4b38 | ||
![]() |
b8b7896d4c | ||
![]() |
d0c5b5a9b7 | ||
![]() |
8aebea7ba2 | ||
![]() |
0e9716a6e6 | ||
![]() |
de4f8b344e | ||
![]() |
75ec7d3269 | ||
![]() |
d8e0384940 | ||
![]() |
62222edc4a | ||
![]() |
962279479d | ||
![]() |
0aee70b496 | ||
![]() |
4380627cb7 | ||
![]() |
e38f6794cd | ||
![]() |
f77e67086c | ||
![]() |
7eec85b4eb | ||
![]() |
2fb07dcdb1 | ||
![]() |
5dcee7f0a3 | ||
![]() |
44968c7d43 | ||
![]() |
dbb5fb9fbd | ||
![]() |
3a4a5a8215 | ||
![]() |
d8d955e0aa | ||
![]() |
2ce485063f | ||
![]() |
f72febb34f | ||
![]() |
ee9a5cdf70 | ||
![]() |
46dce1f4fa | ||
![]() |
841f8bfef0 | ||
![]() |
1f5791222a | ||
![]() |
a7b13bd603 | ||
![]() |
0c711f5605 | ||
![]() |
4df2e33568 | ||
![]() |
11c1fbce20 | ||
![]() |
9553d873ff | ||
![]() |
048c3bb240 | ||
![]() |
d6e76a22a8 | ||
![]() |
e3a022f9b5 | ||
![]() |
fe269c752a | ||
![]() |
fc1fc00aa4 | ||
![]() |
3c82fe6ef5 | ||
![]() |
986d981bf6 | ||
![]() |
0df2fa8135 | ||
![]() |
49ccb7734c | ||
![]() |
491cc65e3a | ||
![]() |
8c1d6a50c1 | ||
![]() |
9386acc4a6 | ||
![]() |
5b60d49654 | ||
![]() |
8056181301 | ||
![]() |
76a647febf |
156
CHANGELOG.md
156
CHANGELOG.md
@@ -1,5 +1,7 @@
|
||||
# Table of Contents
|
||||
|
||||
* [Changelog for 0.17.3](#changelog-for-restic-0173-2024-11-08)
|
||||
* [Changelog for 0.17.2](#changelog-for-restic-0172-2024-10-27)
|
||||
* [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05)
|
||||
* [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26)
|
||||
* [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01)
|
||||
@@ -36,6 +38,160 @@
|
||||
* [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29)
|
||||
|
||||
|
||||
# Changelog for restic 0.17.3 (2024-11-08)
|
||||
The following sections list the changes in restic 0.17.3 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
## Summary
|
||||
|
||||
* Fix #4971: Fix unusable `mount` on macOS Sonoma
|
||||
* Fix #5003: Fix metadata errors during backup of removable disks on Windows
|
||||
* Fix #5101: Do not retry load/list operation if SFTP connection is broken
|
||||
* Fix #5107: Fix metadata error on Windows for backups using VSS
|
||||
* Enh #5096: Allow `prune --dry-run` without lock
|
||||
|
||||
## Details
|
||||
|
||||
* Bugfix #4971: Fix unusable `mount` on macOS Sonoma
|
||||
|
||||
On macOS Sonoma when using FUSE-T, it was not possible to access files in a
|
||||
mounted repository. This issue is now resolved.
|
||||
|
||||
https://github.com/restic/restic/issues/4971
|
||||
https://github.com/restic/restic/pull/5048
|
||||
|
||||
* Bugfix #5003: Fix metadata errors during backup of removable disks on Windows
|
||||
|
||||
Since restic 0.17.0, backing up removable disks on Windows could report errors
|
||||
with retrieving metadata like shown below.
|
||||
|
||||
```
|
||||
error: incomplete metadata for d:\filename: get named security info failed with: Access is denied.
|
||||
```
|
||||
|
||||
This has now been fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/5003
|
||||
https://github.com/restic/restic/pull/5123
|
||||
https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444
|
||||
|
||||
* Bugfix #5101: Do not retry load/list operation if SFTP connection is broken
|
||||
|
||||
When using restic with the SFTP backend, backend operations that load a file or
|
||||
list files were retried even if the SFTP connection was broken. This has now
|
||||
been fixed.
|
||||
|
||||
https://github.com/restic/restic/pull/5101
|
||||
https://forum.restic.net/t/restic-hanging-on-backup/8559
|
||||
|
||||
* Bugfix #5107: Fix metadata error on Windows for backups using VSS
|
||||
|
||||
Since restic 0.17.2, when creating a backup on Windows using
|
||||
`--use-fs-snapshot`, restic would report an error like the following:
|
||||
|
||||
```
|
||||
error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process.
|
||||
```
|
||||
|
||||
This has now been fixed by correctly handling paths that refer to volume shadow
|
||||
copy snapshots.
|
||||
|
||||
https://github.com/restic/restic/issues/5107
|
||||
https://github.com/restic/restic/pull/5110
|
||||
https://github.com/restic/restic/pull/5112
|
||||
|
||||
* Enhancement #5096: Allow `prune --dry-run` without lock
|
||||
|
||||
The `prune --dry-run --no-lock` now allows performing a dry-run without locking
|
||||
the repository. Note that if the repository is modified concurrently, `prune`
|
||||
may return inaccurate statistics or errors.
|
||||
|
||||
https://github.com/restic/restic/pull/5096
|
||||
|
||||
|
||||
# Changelog for restic 0.17.2 (2024-10-27)
|
||||
The following sections list the changes in restic 0.17.2 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
## Summary
|
||||
|
||||
* Fix #4004: Support container-level SAS/SAT tokens for Azure backend
|
||||
* Fix #5047: Resolve potential error during concurrent cache cleanup
|
||||
* Fix #5050: Return error if `tag` fails to lock repository
|
||||
* Fix #5057: Exclude irregular files from backups
|
||||
* Fix #5063: Correctly `backup` extended metadata when using VSS on Windows
|
||||
|
||||
## Details
|
||||
|
||||
* Bugfix #4004: Support container-level SAS/SAT tokens for Azure backend
|
||||
|
||||
Restic previously expected SAS/SAT tokens to be generated at the account level,
|
||||
which prevented tokens created at the container level from being used to
|
||||
initialize a repository. This caused an error when attempting to initialize a
|
||||
repository with container-level tokens.
|
||||
|
||||
Restic now supports both account-level and container-level SAS/SAT tokens for
|
||||
initializing a repository.
|
||||
|
||||
https://github.com/restic/restic/issues/4004
|
||||
https://github.com/restic/restic/pull/5093
|
||||
|
||||
* Bugfix #5047: Resolve potential error during concurrent cache cleanup
|
||||
|
||||
When multiple restic processes ran concurrently, they could compete to remove
|
||||
obsolete snapshots from the local backend cache, sometimes leading to a "no such
|
||||
file or directory" error. Restic now suppresses this error to prevent issues
|
||||
during cache cleanup.
|
||||
|
||||
https://github.com/restic/restic/pull/5047
|
||||
|
||||
* Bugfix #5050: Return error if `tag` fails to lock repository
|
||||
|
||||
Since restic 0.17.0, the `tag` command did not return an error when it failed to
|
||||
open or lock the repository. This issue has now been fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/5050
|
||||
https://github.com/restic/restic/pull/5056
|
||||
|
||||
* Bugfix #5057: Exclude irregular files from backups
|
||||
|
||||
Since restic 0.17.1, files with the type `irregular` could mistakenly be
|
||||
included in snapshots, especially when backing up special file types on Windows
|
||||
that restic cannot process. This issue has now been fixed.
|
||||
|
||||
Previously, this bug caused the `check` command to report errors like the
|
||||
following one:
|
||||
|
||||
```
|
||||
tree 12345678[...]: node "example.zip" with invalid type "irregular"
|
||||
```
|
||||
|
||||
To repair affected snapshots, upgrade to restic 0.17.2 and run:
|
||||
|
||||
```
|
||||
restic repair snapshots --forget
|
||||
```
|
||||
|
||||
This will remove the `irregular` files from the snapshots (creating a new
|
||||
snapshot ID for each of the affected snapshots).
|
||||
|
||||
https://github.com/restic/restic/pull/5057
|
||||
https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2
|
||||
|
||||
* Bugfix #5063: Correctly `backup` extended metadata when using VSS on Windows
|
||||
|
||||
On Windows, when creating a backup with the `--use-fs-snapshot` option, restic
|
||||
read extended metadata from the original filesystem path instead of from the
|
||||
snapshot. This could result in errors if files were removed during the backup
|
||||
process.
|
||||
|
||||
This issue has now been resolved.
|
||||
|
||||
https://github.com/restic/restic/issues/5063
|
||||
https://github.com/restic/restic/pull/5097
|
||||
https://github.com/restic/restic/pull/5099
|
||||
|
||||
|
||||
# Changelog for restic 0.17.1 (2024-09-05)
|
||||
The following sections list the changes in restic 0.17.1 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
12
changelog/0.17.2_2024-10-27/issue-4004
Normal file
12
changelog/0.17.2_2024-10-27/issue-4004
Normal file
@@ -0,0 +1,12 @@
|
||||
Bugfix: Support container-level SAS/SAT tokens for Azure backend
|
||||
|
||||
Restic previously expected SAS/SAT tokens to be generated at the account level,
|
||||
which prevented tokens created at the container level from being used to
|
||||
initialize a repository. This caused an error when attempting to initialize a
|
||||
repository with container-level tokens.
|
||||
|
||||
Restic now supports both account-level and container-level SAS/SAT tokens for
|
||||
initializing a repository.
|
||||
|
||||
https://github.com/restic/restic/issues/4004
|
||||
https://github.com/restic/restic/pull/5093
|
7
changelog/0.17.2_2024-10-27/issue-5050
Normal file
7
changelog/0.17.2_2024-10-27/issue-5050
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Return error if `tag` fails to lock repository
|
||||
|
||||
Since restic 0.17.0, the `tag` command did not return an error when it failed
|
||||
to open or lock the repository. This issue has now been fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/5050
|
||||
https://github.com/restic/restic/pull/5056
|
12
changelog/0.17.2_2024-10-27/issue-5063
Normal file
12
changelog/0.17.2_2024-10-27/issue-5063
Normal file
@@ -0,0 +1,12 @@
|
||||
Bugfix: Correctly `backup` extended metadata when using VSS on Windows
|
||||
|
||||
On Windows, when creating a backup with the `--use-fs-snapshot` option, restic
|
||||
read extended metadata from the original filesystem path instead of from the
|
||||
snapshot. This could result in errors if files were removed during the backup
|
||||
process.
|
||||
|
||||
This issue has now been resolved.
|
||||
|
||||
https://github.com/restic/restic/issues/5063
|
||||
https://github.com/restic/restic/pull/5097
|
||||
https://github.com/restic/restic/pull/5099
|
8
changelog/0.17.2_2024-10-27/pull-5047
Normal file
8
changelog/0.17.2_2024-10-27/pull-5047
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Resolve potential error during concurrent cache cleanup
|
||||
|
||||
When multiple restic processes ran concurrently, they could compete to remove
|
||||
obsolete snapshots from the local backend cache, sometimes leading to a "no
|
||||
such file or directory" error. Restic now suppresses this error to prevent
|
||||
issues during cache cleanup.
|
||||
|
||||
https://github.com/restic/restic/pull/5047
|
24
changelog/0.17.2_2024-10-27/pull-5057
Normal file
24
changelog/0.17.2_2024-10-27/pull-5057
Normal file
@@ -0,0 +1,24 @@
|
||||
Bugfix: Exclude irregular files from backups
|
||||
|
||||
Since restic 0.17.1, files with the type `irregular` could mistakenly be included
|
||||
in snapshots, especially when backing up special file types on Windows that
|
||||
restic cannot process. This issue has now been fixed.
|
||||
|
||||
Previously, this bug caused the `check` command to report errors like the
|
||||
following one:
|
||||
|
||||
```
|
||||
tree 12345678[...]: node "example.zip" with invalid type "irregular"
|
||||
```
|
||||
|
||||
To repair affected snapshots, upgrade to restic 0.17.2 and run:
|
||||
|
||||
```
|
||||
restic repair snapshots --forget
|
||||
```
|
||||
|
||||
This will remove the `irregular` files from the snapshots (creating
|
||||
a new snapshot ID for each of the affected snapshots).
|
||||
|
||||
https://github.com/restic/restic/pull/5057
|
||||
https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2
|
7
changelog/0.17.3_2024-11-08/issue-4971
Normal file
7
changelog/0.17.3_2024-11-08/issue-4971
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Fix unusable `mount` on macOS Sonoma
|
||||
|
||||
On macOS Sonoma when using FUSE-T, it was not possible to access files in
|
||||
a mounted repository. This issue is now resolved.
|
||||
|
||||
https://github.com/restic/restic/issues/4971
|
||||
https://github.com/restic/restic/pull/5048
|
14
changelog/0.17.3_2024-11-08/issue-5003
Normal file
14
changelog/0.17.3_2024-11-08/issue-5003
Normal file
@@ -0,0 +1,14 @@
|
||||
Bugfix: Fix metadata errors during backup of removable disks on Windows
|
||||
|
||||
Since restic 0.17.0, backing up removable disks on Windows could report
|
||||
errors with retrieving metadata like shown below.
|
||||
|
||||
```
|
||||
error: incomplete metadata for d:\filename: get named security info failed with: Access is denied.
|
||||
```
|
||||
|
||||
This has now been fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/5003
|
||||
https://github.com/restic/restic/pull/5123
|
||||
https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444
|
15
changelog/0.17.3_2024-11-08/issue-5107
Normal file
15
changelog/0.17.3_2024-11-08/issue-5107
Normal file
@@ -0,0 +1,15 @@
|
||||
Bugfix: Fix metadata error on Windows for backups using VSS
|
||||
|
||||
Since restic 0.17.2, when creating a backup on Windows using `--use-fs-snapshot`,
|
||||
restic would report an error like the following:
|
||||
|
||||
```
|
||||
error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process.
|
||||
```
|
||||
|
||||
This has now been fixed by correctly handling paths that refer to volume
|
||||
shadow copy snapshots.
|
||||
|
||||
https://github.com/restic/restic/issues/5107
|
||||
https://github.com/restic/restic/pull/5110
|
||||
https://github.com/restic/restic/pull/5112
|
8
changelog/0.17.3_2024-11-08/pull-5096
Normal file
8
changelog/0.17.3_2024-11-08/pull-5096
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Allow `prune --dry-run` without lock
|
||||
|
||||
The `prune --dry-run --no-lock` now allows performing a dry-run
|
||||
without locking the repository. Note that if the repository is
|
||||
modified concurrently, `prune` may return inaccurate statistics
|
||||
or errors.
|
||||
|
||||
https://github.com/restic/restic/pull/5096
|
8
changelog/0.17.3_2024-11-08/pull-5101
Normal file
8
changelog/0.17.3_2024-11-08/pull-5101
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Do not retry load/list operation if SFTP connection is broken
|
||||
|
||||
When using restic with the SFTP backend, backend operations that load a
|
||||
file or list files were retried even if the SFTP connection was broken.
|
||||
This has now been fixed.
|
||||
|
||||
https://github.com/restic/restic/pull/5101
|
||||
https://forum.restic.net/t/restic-hanging-on-backup/8559
|
@@ -15,7 +15,7 @@ Details
|
||||
{{ range $entry := .Entries }}{{ with $entry }}
|
||||
* {{ .Type }} #{{ .PrimaryID }}: {{ .Title }}
|
||||
{{ range $par := .Paragraphs }}
|
||||
{{ $par }}
|
||||
{{ indent 3 $par }}
|
||||
{{ end }}
|
||||
{{ range $id := .Issues -}}
|
||||
{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}})
|
||||
|
@@ -95,6 +95,7 @@ type BackupOptions struct {
|
||||
}
|
||||
|
||||
var backupOptions BackupOptions
|
||||
var backupFSTestHook func(fs fs.FS) fs.FS
|
||||
|
||||
// ErrInvalidSourceData is used to report an incomplete backup
|
||||
var ErrInvalidSourceData = errors.New("at least one source file could not be read")
|
||||
@@ -598,6 +599,10 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
||||
targets = []string{filename}
|
||||
}
|
||||
|
||||
if backupFSTestHook != nil {
|
||||
targetFS = backupFSTestHook(targetFS)
|
||||
}
|
||||
|
||||
wg, wgCtx := errgroup.WithContext(ctx)
|
||||
cancelCtx, cancel := context.WithCancel(wgCtx)
|
||||
defer cancel()
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
@@ -51,14 +52,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||
opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
|
||||
|
||||
// first backup
|
||||
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||
testListSnapshots(t, env.gopts, 1)
|
||||
|
||||
testRunCheck(t, env.gopts)
|
||||
stat1 := dirStats(env.repo)
|
||||
|
||||
// second backup, implicit incremental
|
||||
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs := testListSnapshots(t, env.gopts, 2)
|
||||
|
||||
stat2 := dirStats(env.repo)
|
||||
@@ -70,7 +71,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||
testRunCheck(t, env.gopts)
|
||||
// third backup, explicit incremental
|
||||
opts.Parent = snapshotIDs[0].String()
|
||||
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs = testListSnapshots(t, env.gopts, 3)
|
||||
|
||||
stat3 := dirStats(env.repo)
|
||||
@@ -83,7 +84,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||
for i, snapshotID := range snapshotIDs {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID.String()+":"+toPathInSnapshot(filepath.Dir(env.testdata)))
|
||||
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||
rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
|
||||
}
|
||||
@@ -91,6 +92,20 @@ func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||
testRunCheck(t, env.gopts)
|
||||
}
|
||||
|
||||
func toPathInSnapshot(path string) string {
|
||||
// use path as is on most platforms, but convert it on windows
|
||||
if runtime.GOOS == "windows" {
|
||||
// the path generated by the test is always local so take the shortcut
|
||||
vol := filepath.VolumeName(path)
|
||||
if vol[len(vol)-1] != ':' {
|
||||
panic(fmt.Sprintf("unexpected path: %q", path))
|
||||
}
|
||||
path = vol[:len(vol)-1] + string(filepath.Separator) + path[len(vol)+1:]
|
||||
path = filepath.ToSlash(path)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func TestBackupWithRelativePath(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
@@ -111,6 +126,63 @@ func TestBackupWithRelativePath(t *testing.T) {
|
||||
rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
|
||||
}
|
||||
|
||||
type vssDeleteOriginalFS struct {
|
||||
fs.FS
|
||||
testdata string
|
||||
hasRemoved bool
|
||||
}
|
||||
|
||||
func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) {
|
||||
if !f.hasRemoved {
|
||||
// call Lstat to trigger snapshot creation
|
||||
_, _ = f.FS.Lstat(name)
|
||||
// nuke testdata
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
// The CI sometimes runs into "The process cannot access the file because it is being used by another process" errors
|
||||
// thus try a few times to remove the data
|
||||
err = os.RemoveAll(f.testdata)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.hasRemoved = true
|
||||
}
|
||||
return f.FS.Lstat(name)
|
||||
}
|
||||
|
||||
func TestBackupVSS(t *testing.T) {
|
||||
if runtime.GOOS != "windows" || fs.HasSufficientPrivilegesForVSS() != nil {
|
||||
t.Skip("vss fs test can only be run on windows with admin privileges")
|
||||
}
|
||||
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
testSetupBackupData(t, env)
|
||||
opts := BackupOptions{UseFsSnapshot: true}
|
||||
|
||||
var testFS *vssDeleteOriginalFS
|
||||
backupFSTestHook = func(fs fs.FS) fs.FS {
|
||||
testFS = &vssDeleteOriginalFS{
|
||||
FS: fs,
|
||||
testdata: env.testdata,
|
||||
}
|
||||
return testFS
|
||||
}
|
||||
defer func() {
|
||||
backupFSTestHook = nil
|
||||
}()
|
||||
|
||||
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||
testListSnapshots(t, env.gopts, 1)
|
||||
rtest.Equals(t, true, testFS.hasRemoved, "testdata was not removed")
|
||||
}
|
||||
|
||||
func TestBackupParentSelection(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
@@ -499,7 +571,7 @@ func TestHardLink(t *testing.T) {
|
||||
for i, snapshotID := range snapshotIDs {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID.String())
|
||||
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
|
||||
|
||||
|
@@ -62,11 +62,11 @@ func TestCopy(t *testing.T) {
|
||||
for i, snapshotID := range snapshotIDs {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
origRestores[restoredir] = struct{}{}
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotID.String())
|
||||
}
|
||||
for i, snapshotID := range copiedSnapshotIDs {
|
||||
restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
|
||||
testRunRestore(t, env2.gopts, restoredir, snapshotID)
|
||||
testRunRestore(t, env2.gopts, restoredir, snapshotID.String())
|
||||
foundMatch := false
|
||||
for cmpdir := range origRestores {
|
||||
diff := directoriesContentsDiff(restoredir, cmpdir)
|
||||
|
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/repository/index"
|
||||
@@ -10,8 +11,11 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var listAllowedArgs = []string{"blobs", "packs", "index", "snapshots", "keys", "locks"}
|
||||
var listAllowedArgsUseString = strings.Join(listAllowedArgs, "|")
|
||||
|
||||
var cmdList = &cobra.Command{
|
||||
Use: "list [flags] [blobs|packs|index|snapshots|keys|locks]",
|
||||
Use: "list [flags] [" + listAllowedArgsUseString + "]",
|
||||
Short: "List objects in the repository",
|
||||
Long: `
|
||||
The "list" command allows listing objects in the repository based on type.
|
||||
@@ -30,6 +34,8 @@ Exit status is 12 if the password is incorrect.
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(cmd.Context(), globalOptions, args)
|
||||
},
|
||||
ValidArgs: listAllowedArgs,
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@@ -149,7 +149,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term
|
||||
return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive")
|
||||
}
|
||||
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if gopts.NoLock && !opts.DryRun {
|
||||
return errors.Fatal("--no-lock is only applicable in combination with --dry-run for prune command")
|
||||
}
|
||||
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -92,6 +92,10 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
|
||||
// - files whose contents are not fully available (-> file will be modified)
|
||||
rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
|
||||
RewriteNode: func(node *restic.Node, path string) *restic.Node {
|
||||
if node.Type == "irregular" || node.Type == "" {
|
||||
Verbosef(" file %q: removed node with invalid type %q\n", path, node.Type)
|
||||
return nil
|
||||
}
|
||||
if node.Type != "file" {
|
||||
return node
|
||||
}
|
||||
|
@@ -18,17 +18,17 @@ import (
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
|
||||
func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID string) {
|
||||
testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
|
||||
}
|
||||
|
||||
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
|
||||
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID string, excludes []string) {
|
||||
opts := RestoreOptions{
|
||||
Target: dir,
|
||||
}
|
||||
opts.Excludes = excludes
|
||||
|
||||
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
|
||||
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID, opts, gopts))
|
||||
}
|
||||
|
||||
func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
@@ -198,7 +198,7 @@ func TestRestoreFilter(t *testing.T) {
|
||||
snapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||
|
||||
// no restore filter should restore all files
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID.String())
|
||||
for _, testFile := range testfiles {
|
||||
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func TestRestoreFilter(t *testing.T) {
|
||||
|
||||
// restore with excludes
|
||||
restoredir := filepath.Join(env.base, "restore-with-excludes")
|
||||
testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID, excludePatterns)
|
||||
testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID.String(), excludePatterns)
|
||||
testRestoredFileExclusions(t, restoredir)
|
||||
|
||||
// Create an exclude file with some patterns
|
||||
@@ -340,7 +340,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) {
|
||||
|
||||
_ = withRestoreGlobalOptions(func() error {
|
||||
globalOptions.stderr = io.Discard
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0].String())
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -140,7 +139,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
|
||||
if selectByName(path) {
|
||||
return node
|
||||
}
|
||||
Verbosef(fmt.Sprintf("excluding %s\n", path))
|
||||
Verbosef("excluding %s\n", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -110,7 +110,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
|
@@ -47,7 +47,7 @@ import (
|
||||
// to a missing backend storage location or config file
|
||||
var ErrNoRepository = errors.New("repository does not exist")
|
||||
|
||||
var version = "0.17.1"
|
||||
var version = "0.17.3-dev (compiled manually)"
|
||||
|
||||
// TimeFormat is the format used for all timestamps printed by restic.
|
||||
const TimeFormat = "2006-01-02 15:04:05"
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -168,6 +169,16 @@ type testEnvironment struct {
|
||||
gopts GlobalOptions
|
||||
}
|
||||
|
||||
type logOutputter struct {
|
||||
t testing.TB
|
||||
}
|
||||
|
||||
func (l *logOutputter) Write(p []byte) (n int, err error) {
|
||||
l.t.Helper()
|
||||
l.t.Log(strings.TrimSuffix(string(p), "\n"))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// withTestEnvironment creates a test environment and returns a cleanup
|
||||
// function which removes it.
|
||||
func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
||||
@@ -200,8 +211,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
||||
Quiet: true,
|
||||
CacheDir: env.cache,
|
||||
password: rtest.TestPassword,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
// stdout and stderr are written to by Warnf etc. That is the written data
|
||||
// usually consists of one or multiple lines and therefore can be handled well
|
||||
// by t.Log.
|
||||
stdout: &logOutputter{t},
|
||||
stderr: &logOutputter{t},
|
||||
extended: make(options.Options),
|
||||
|
||||
// replace this hook with "nil" if listing a filetype more than once is necessary
|
||||
|
@@ -35,7 +35,7 @@ func TestCheckRestoreNoLock(t *testing.T) {
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
snapshotIDs := testListSnapshots(t, env.gopts, 4)
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0].String())
|
||||
}
|
||||
|
||||
// a listOnceBackend only allows listing once per filetype
|
||||
|
@@ -455,9 +455,11 @@ Backblaze B2
|
||||
than using the Backblaze B2 backend directly.
|
||||
|
||||
Different from the B2 backend, restic's S3 backend will only hide no longer
|
||||
necessary files. Thus, make sure to setup lifecycle rules to eventually
|
||||
delete hidden files. The lifecycle setting "Keep only the last version of the file"
|
||||
will keep only the most current version of a file. Read the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules).
|
||||
necessary files. By default, Backblaze B2 retains all of the different versions of the
|
||||
files and "hides" the older versions. Thus, to free space occupied by hidden files,
|
||||
it is **recommended** to use the B2 lifecycle "Keep only the last version of the file".
|
||||
The previous version of the file is "hidden" for one day and then deleted automatically
|
||||
by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules).
|
||||
|
||||
Restic can backup data to any Backblaze B2 bucket. You need to first setup the
|
||||
following environment variables with the credentials you can find in the
|
||||
|
@@ -132,6 +132,10 @@ options will be deleted. For example, the command
|
||||
``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete``
|
||||
would only delete files within ``/tmp/restore-work/foo``.
|
||||
|
||||
When using ``--target / --delete`` then the ``restore`` command only works if either an ``--include``
|
||||
or ``--exclude`` option is also specified. This ensures that one cannot accidentaly delete
|
||||
the whole system.
|
||||
|
||||
Dry run
|
||||
-------
|
||||
|
||||
|
@@ -191,9 +191,9 @@ Summary is the last output line in a successful backup.
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_unmodified`` | Number of directories that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_blobs`` | Number of data blobs |
|
||||
| ``data_blobs`` | Number of data blobs added |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
| ``tree_blobs`` | Number of tree blobs added |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added`` | Amount of (uncompressed) data added, in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
@@ -651,9 +651,9 @@ was created.
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_unmodified`` | Number of directories that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_blobs`` | Number of data blobs |
|
||||
| ``data_blobs`` | Number of data blobs added |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
| ``tree_blobs`` | Number of tree blobs added |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added`` | Amount of (uncompressed) data added, in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
|
@@ -2177,6 +2177,12 @@ _restic_list()
|
||||
|
||||
must_have_one_flag=()
|
||||
must_have_one_noun=()
|
||||
must_have_one_noun+=("blobs")
|
||||
must_have_one_noun+=("index")
|
||||
must_have_one_noun+=("keys")
|
||||
must_have_one_noun+=("locks")
|
||||
must_have_one_noun+=("packs")
|
||||
must_have_one_noun+=("snapshots")
|
||||
noun_aliases=()
|
||||
}
|
||||
|
||||
|
@@ -31,7 +31,7 @@ var opts = struct {
|
||||
var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
|
||||
|
||||
func init() {
|
||||
pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'")
|
||||
pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches than 'master'")
|
||||
pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes")
|
||||
pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md")
|
||||
pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/")
|
||||
@@ -128,17 +128,22 @@ func uncommittedChanges(dirs ...string) string {
|
||||
return string(changes)
|
||||
}
|
||||
|
||||
func preCheckBranchMaster() {
|
||||
if opts.IgnoreBranchName {
|
||||
return
|
||||
}
|
||||
|
||||
func getBranchName() string {
|
||||
branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output()
|
||||
if err != nil {
|
||||
die("error running 'git': %v", err)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(string(branch)) != "master" {
|
||||
return strings.TrimSpace(string(branch))
|
||||
}
|
||||
|
||||
func preCheckBranchMaster() {
|
||||
if opts.IgnoreBranchName {
|
||||
return
|
||||
}
|
||||
|
||||
branch := getBranchName()
|
||||
if branch != "master" {
|
||||
die("wrong branch: %s", branch)
|
||||
}
|
||||
}
|
||||
@@ -449,6 +454,7 @@ func main() {
|
||||
}
|
||||
|
||||
preCheckBranchMaster()
|
||||
branch := getBranchName()
|
||||
preCheckUncommittedChanges()
|
||||
preCheckVersionExists()
|
||||
preCheckDockerBuilderGoVersion()
|
||||
@@ -485,5 +491,5 @@ func main() {
|
||||
|
||||
msg("done, output dir is %v", opts.OutputDir)
|
||||
|
||||
msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir)
|
||||
msg("now run:\n\ngit push --tags origin %s\n%s\n\nrm -rf %q", branch, dockerCmds, sourceDir)
|
||||
}
|
||||
|
@@ -248,7 +248,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I
|
||||
|
||||
// nodeFromFileInfo returns the restic node from an os.FileInfo.
|
||||
func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) {
|
||||
node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError)
|
||||
mappedFilename := arch.FS.MapFilename(filename)
|
||||
node, err := restic.NodeFromFileInfo(mappedFilename, fi, ignoreXattrListError)
|
||||
if !arch.WithAtime {
|
||||
node.AccessTime = node.ModTime
|
||||
}
|
||||
@@ -262,7 +263,8 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo,
|
||||
}
|
||||
// overwrite name to match that within the snapshot
|
||||
node.Name = path.Base(snPath)
|
||||
if err != nil {
|
||||
// do not filter error for nodes of irregular or invalid type
|
||||
if node.Type != "irregular" && node.Type != "" && err != nil {
|
||||
err = fmt.Errorf("incomplete metadata for %v: %w", filename, err)
|
||||
return node, arch.error(filename, err)
|
||||
}
|
||||
|
@@ -2423,4 +2423,47 @@ func TestMetadataBackupErrorFiltering(t *testing.T) {
|
||||
rtest.Assert(t, node != nil, "node is missing")
|
||||
rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err)
|
||||
rtest.Assert(t, filteredErr != nil, "missing inner error")
|
||||
|
||||
// check that errors from reading irregular file are not filtered
|
||||
filteredErr = nil
|
||||
node, err = arch.nodeFromFileInfo("file", filename, wrapIrregularFileInfo(fi), false)
|
||||
rtest.Assert(t, node != nil, "node is missing")
|
||||
rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered")
|
||||
rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err)
|
||||
}
|
||||
|
||||
func TestIrregularFile(t *testing.T) {
|
||||
files := TestDir{
|
||||
"testfile": TestFile{
|
||||
Content: "foo bar test file",
|
||||
},
|
||||
}
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, files)
|
||||
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
tempfile := filepath.Join(tempdir, "testfile")
|
||||
fi := lstat(t, "testfile")
|
||||
|
||||
statfs := &StatFS{
|
||||
FS: fs.Local{},
|
||||
OverrideLstat: map[string]os.FileInfo{
|
||||
tempfile: wrapIrregularFileInfo(fi),
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
arch := New(repo, fs.Track{FS: statfs}, Options{})
|
||||
_, excluded, err := arch.save(ctx, "/", tempfile, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("Save() should have failed")
|
||||
}
|
||||
rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err)
|
||||
|
||||
if excluded {
|
||||
t.Errorf("Save() excluded the node, that's unexpected")
|
||||
}
|
||||
}
|
||||
|
@@ -46,6 +46,16 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
return res
|
||||
}
|
||||
|
||||
// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file
|
||||
func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
// wrap the os.FileInfo so we can return a modified stat_t
|
||||
return wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
sys: fi.Sys().(*syscall.Stat_t),
|
||||
mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular,
|
||||
}
|
||||
}
|
||||
|
||||
func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) {
|
||||
fi := lstat(t, name)
|
||||
want, err := restic.NodeFromFileInfo(name, fi, false)
|
||||
|
@@ -26,3 +26,11 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file
|
||||
func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
return wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular,
|
||||
}
|
||||
}
|
||||
|
@@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
|
||||
|
||||
debug.Log("%v", snPath)
|
||||
|
||||
node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false)
|
||||
node, err := s.NodeFromFileInfo(snPath, target, fi, false)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
completeError(err)
|
||||
|
@@ -160,6 +160,12 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container.Create")
|
||||
}
|
||||
} else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) {
|
||||
// We ignore this Auth. Failure, as the failure is related to the type
|
||||
// of SAS/SAT, not an actual real failure. If the token is invalid, we
|
||||
// fail later on anyway.
|
||||
// For details see Issue #4004.
|
||||
debug.Log("Ignoring AuthorizationFailure when calling GetProperties")
|
||||
} else if err != nil {
|
||||
return be, errors.Wrap(err, "container.GetProperties")
|
||||
}
|
||||
|
@@ -80,6 +80,91 @@ func BenchmarkBackendAzure(t *testing.B) {
|
||||
newAzureTestSuite().RunBenchmarks(t)
|
||||
}
|
||||
|
||||
// TestBackendAzureAccountToken tests that a Storage Account SAS/SAT token can authorize.
|
||||
// This test ensures that restic can use a token that was generated using the storage
|
||||
// account keys can be used to authorize the azure connection.
|
||||
// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the
|
||||
// RESTIC_TEST_AZURE_ACCOUNT_SAS environment variables to be set, otherwise this test
|
||||
// will be skipped.
|
||||
func TestBackendAzureAccountToken(t *testing.T) {
|
||||
vars := []string{
|
||||
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
|
||||
"RESTIC_TEST_AZURE_REPOSITORY",
|
||||
"RESTIC_TEST_AZURE_ACCOUNT_SAS",
|
||||
}
|
||||
|
||||
for _, v := range vars {
|
||||
if os.Getenv(v) == "" {
|
||||
t.Skipf("set %v to test SAS/SAT Token Authentication", v)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
|
||||
cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_SAS"))
|
||||
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = azure.Create(ctx, *cfg, tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackendAzureContainerToken tests that a container SAS/SAT token can authorize.
|
||||
// This test ensures that restic can use a token that was generated using a user
|
||||
// delegation key against the container we are storing data in can be used to
|
||||
// authorize the azure connection.
|
||||
// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the
|
||||
// RESTIC_TEST_AZURE_CONTAINER_SAS environment variables to be set, otherwise this test
|
||||
// will be skipped.
|
||||
func TestBackendAzureContainerToken(t *testing.T) {
|
||||
vars := []string{
|
||||
"RESTIC_TEST_AZURE_ACCOUNT_NAME",
|
||||
"RESTIC_TEST_AZURE_REPOSITORY",
|
||||
"RESTIC_TEST_AZURE_CONTAINER_SAS",
|
||||
}
|
||||
|
||||
for _, v := range vars {
|
||||
if os.Getenv(v) == "" {
|
||||
t.Skipf("set %v to test SAS/SAT Token Authentication", v)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
|
||||
cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_CONTAINER_SAS"))
|
||||
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = azure.Create(ctx, *cfg, tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadLargeFile(t *testing.T) {
|
||||
if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" {
|
||||
t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads")
|
||||
|
4
internal/backend/cache/file.go
vendored
4
internal/backend/cache/file.go
vendored
@@ -211,6 +211,10 @@ func (c *Cache) list(t restic.FileType) (restic.IDSet, error) {
|
||||
dir := filepath.Join(c.path, cacheLayoutPaths[t])
|
||||
err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// ignore ErrNotExist to gracefully handle multiple processes clearing the cache
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "Walk")
|
||||
}
|
||||
|
||||
|
@@ -421,6 +421,10 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error {
|
||||
// Load runs fn with a reader that yields the contents of the file at h at the
|
||||
// given offset.
|
||||
func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error {
|
||||
if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) {
|
||||
return fn(rd)
|
||||
@@ -490,6 +494,10 @@ func (r *SFTP) Remove(_ context.Context, h backend.Handle) error {
|
||||
// List runs fn for each file in the backend which has the type t. When an
|
||||
// error occurs (or fn returns an error), List stops and returns it.
|
||||
func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
basedir, subdirs := r.Basedir(t)
|
||||
walker := r.c.Walk(basedir)
|
||||
for {
|
||||
|
@@ -20,6 +20,15 @@ func fixpath(name string) string {
|
||||
if strings.HasPrefix(abspath, `\\?\UNC\`) {
|
||||
return abspath
|
||||
}
|
||||
// Check if \\?\GLOBALROOT exists which marks volume shadow copy snapshots
|
||||
if strings.HasPrefix(abspath, `\\?\GLOBALROOT\`) {
|
||||
if strings.Count(abspath, `\`) == 5 {
|
||||
// Append slash if this just a volume name, e.g. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX`
|
||||
// Without the trailing slash any access to the volume itself will fail.
|
||||
return abspath + string(filepath.Separator)
|
||||
}
|
||||
return abspath
|
||||
}
|
||||
// Check if \\?\ already exist
|
||||
if strings.HasPrefix(abspath, `\\?\`) {
|
||||
return abspath
|
||||
|
@@ -18,6 +18,12 @@ func (fs Local) VolumeName(path string) string {
|
||||
return filepath.VolumeName(path)
|
||||
}
|
||||
|
||||
// MapFilename is a temporary hack to prepare a filename for usage with
|
||||
// NodeFromFileInfo. This is only relevant for LocalVss.
|
||||
func (fs Local) MapFilename(filename string) string {
|
||||
return filename
|
||||
}
|
||||
|
||||
// Open opens a file for reading.
|
||||
func (fs Local) Open(name string) (File, error) {
|
||||
f, err := os.Open(fixpath(name))
|
||||
|
@@ -145,6 +145,12 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) {
|
||||
return os.Lstat(fs.snapshotPath(name))
|
||||
}
|
||||
|
||||
// MapFilename is a temporary hack to prepare a filename for usage with
|
||||
// NodeFromFileInfo. This is only relevant for LocalVss.
|
||||
func (fs *LocalVss) MapFilename(filename string) string {
|
||||
return fs.snapshotPath(filename)
|
||||
}
|
||||
|
||||
// isMountPointIncluded is true if given mountpoint included by user.
|
||||
func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool {
|
||||
if fs.excludeVolumes == nil {
|
||||
|
@@ -39,6 +39,12 @@ func (fs *Reader) VolumeName(_ string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// MapFilename is a temporary hack to prepare a filename for usage with
|
||||
// NodeFromFileInfo. This is only relevant for LocalVss.
|
||||
func (fs *Reader) MapFilename(filename string) string {
|
||||
return filename
|
||||
}
|
||||
|
||||
// Open opens a file for reading.
|
||||
func (fs *Reader) Open(name string) (f File, err error) {
|
||||
switch name {
|
||||
@@ -223,7 +229,7 @@ func (r *readerFile) Close() error {
|
||||
var _ File = &readerFile{}
|
||||
|
||||
// fakeFile implements all File methods, but only returns errors for anything
|
||||
// except Stat() and Name().
|
||||
// except Stat()
|
||||
type fakeFile struct {
|
||||
name string
|
||||
os.FileInfo
|
||||
@@ -260,10 +266,6 @@ func (f fakeFile) Stat() (os.FileInfo, error) {
|
||||
return f.FileInfo, nil
|
||||
}
|
||||
|
||||
func (f fakeFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile.
|
||||
type fakeDir struct {
|
||||
entries []os.FileInfo
|
||||
|
@@ -11,6 +11,7 @@ type FS interface {
|
||||
OpenFile(name string, flag int, perm os.FileMode) (File, error)
|
||||
Stat(name string) (os.FileInfo, error)
|
||||
Lstat(name string) (os.FileInfo, error)
|
||||
MapFilename(filename string) string
|
||||
|
||||
Join(elem ...string) string
|
||||
Separator() string
|
||||
@@ -33,5 +34,4 @@ type File interface {
|
||||
Readdir(int) ([]os.FileInfo, error)
|
||||
Seek(int64, int) (int64, error)
|
||||
Stat() (os.FileInfo, error)
|
||||
Name() string
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -12,5 +13,17 @@ func PreallocateFile(wr *os.File, size int64) error {
|
||||
}
|
||||
// int fallocate(int fd, int mode, off_t offset, off_t len)
|
||||
// use mode = 0 to also change the file size
|
||||
return unix.Fallocate(int(wr.Fd()), 0, 0, size)
|
||||
return ignoringEINTR(func() error { return unix.Fallocate(int(wr.Fd()), 0, 0, size) })
|
||||
}
|
||||
|
||||
// ignoringEINTR makes a function call and repeats it if it returns
|
||||
// an EINTR error.
|
||||
// copied from /usr/lib/go/src/internal/poll/fd_posix.go of go 1.23.1
|
||||
func ignoringEINTR(fn func() error) error {
|
||||
for {
|
||||
err := fn()
|
||||
if err != syscall.EINTR {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -54,6 +54,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err
|
||||
sd, err = getNamedSecurityInfoLow(filePath)
|
||||
} else {
|
||||
sd, err = getNamedSecurityInfoHigh(filePath)
|
||||
// Fallback to the low privilege version when receiving an access denied error.
|
||||
// For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media
|
||||
// but instead an access denied error is returned. Workaround that by just retrying with
|
||||
// the low privilege version, but don't switch privileges as we cannot distinguish this
|
||||
// case from actual access denied errors.
|
||||
// see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details
|
||||
if err != nil && isAccessDeniedError(err) {
|
||||
sd, err = getNamedSecurityInfoLow(filePath)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) {
|
||||
@@ -114,6 +123,10 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error {
|
||||
err = setNamedSecurityInfoLow(filePath, dacl)
|
||||
} else {
|
||||
err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl)
|
||||
// See corresponding fallback in getSecurityDescriptor for an explanation
|
||||
if err != nil && isAccessDeniedError(err) {
|
||||
err = setNamedSecurityInfoLow(filePath, dacl)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -174,6 +187,15 @@ func isHandlePrivilegeNotHeldError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED
|
||||
func isAccessDeniedError(err error) bool {
|
||||
if errno, ok := err.(syscall.Errno); ok {
|
||||
// Compare the error code to the expected value
|
||||
return errno == windows.ERROR_ACCESS_DENIED
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SecurityDescriptorBytesToStruct converts the security descriptor bytes representation
|
||||
// into a pointer to windows SECURITY_DESCRIPTOR.
|
||||
func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) {
|
||||
|
@@ -20,29 +20,36 @@ import (
|
||||
|
||||
// Statically ensure that *dir implement those interface
|
||||
var _ = fs.HandleReadDirAller(&dir{})
|
||||
var _ = fs.NodeForgetter(&dir{})
|
||||
var _ = fs.NodeGetxattrer(&dir{})
|
||||
var _ = fs.NodeListxattrer(&dir{})
|
||||
var _ = fs.NodeStringLookuper(&dir{})
|
||||
|
||||
type dir struct {
|
||||
root *Root
|
||||
forget forgetFn
|
||||
items map[string]*restic.Node
|
||||
inode uint64
|
||||
parentInode uint64
|
||||
node *restic.Node
|
||||
m sync.Mutex
|
||||
cache treeCache
|
||||
}
|
||||
|
||||
func cleanupNodeName(name string) string {
|
||||
return filepath.Base(name)
|
||||
}
|
||||
|
||||
func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) {
|
||||
func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *restic.Node) (*dir, error) {
|
||||
debug.Log("new dir for %v (%v)", node.Name, node.Subtree)
|
||||
|
||||
return &dir{
|
||||
root: root,
|
||||
forget: forget,
|
||||
node: node,
|
||||
inode: inode,
|
||||
parentInode: parentInode,
|
||||
cache: *newTreeCache(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -75,10 +82,11 @@ func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *rest
|
||||
return tree.Nodes, nil
|
||||
}
|
||||
|
||||
func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) {
|
||||
func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *restic.Snapshot) (*dir, error) {
|
||||
debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree)
|
||||
return &dir{
|
||||
root: root,
|
||||
root: root,
|
||||
forget: forget,
|
||||
node: &restic.Node{
|
||||
AccessTime: snapshot.Time,
|
||||
ModTime: snapshot.Time,
|
||||
@@ -87,6 +95,7 @@ func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*d
|
||||
Subtree: snapshot.Tree,
|
||||
},
|
||||
inode: inode,
|
||||
cache: *newTreeCache(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -208,25 +217,27 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, ok := d.items[name]
|
||||
if !ok {
|
||||
debug.Log(" Lookup(%v) -> not found", name)
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
inode := inodeFromNode(d.inode, node)
|
||||
switch node.Type {
|
||||
case "dir":
|
||||
return newDir(d.root, inode, d.inode, node)
|
||||
case "file":
|
||||
return newFile(d.root, inode, node)
|
||||
case "symlink":
|
||||
return newLink(d.root, inode, node)
|
||||
case "dev", "chardev", "fifo", "socket":
|
||||
return newOther(d.root, inode, node)
|
||||
default:
|
||||
debug.Log(" node %v has unknown type %v", name, node.Type)
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) {
|
||||
node, ok := d.items[name]
|
||||
if !ok {
|
||||
debug.Log(" Lookup(%v) -> not found", name)
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
inode := inodeFromNode(d.inode, node)
|
||||
switch node.Type {
|
||||
case "dir":
|
||||
return newDir(d.root, forget, inode, d.inode, node)
|
||||
case "file":
|
||||
return newFile(d.root, forget, inode, node)
|
||||
case "symlink":
|
||||
return newLink(d.root, forget, inode, node)
|
||||
case "dev", "chardev", "fifo", "socket":
|
||||
return newOther(d.root, forget, inode, node)
|
||||
default:
|
||||
debug.Log(" node %v has unknown type %v", name, node.Type)
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
@@ -237,3 +248,7 @@ func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fus
|
||||
func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
return nodeGetXattr(d.node, req, resp)
|
||||
}
|
||||
|
||||
func (d *dir) Forget() {
|
||||
d.forget()
|
||||
}
|
||||
|
@@ -20,14 +20,16 @@ const blockSize = 512
|
||||
|
||||
// Statically ensure that *file and *openFile implement the given interfaces
|
||||
var _ = fs.HandleReader(&openFile{})
|
||||
var _ = fs.NodeListxattrer(&file{})
|
||||
var _ = fs.NodeForgetter(&file{})
|
||||
var _ = fs.NodeGetxattrer(&file{})
|
||||
var _ = fs.NodeListxattrer(&file{})
|
||||
var _ = fs.NodeOpener(&file{})
|
||||
|
||||
type file struct {
|
||||
root *Root
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
root *Root
|
||||
forget forgetFn
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
}
|
||||
|
||||
type openFile struct {
|
||||
@@ -36,12 +38,13 @@ type openFile struct {
|
||||
cumsize []uint64
|
||||
}
|
||||
|
||||
func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) {
|
||||
func newFile(root *Root, forget forgetFn, inode uint64, node *restic.Node) (fusefile *file, err error) {
|
||||
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
|
||||
return &file{
|
||||
inode: inode,
|
||||
root: root,
|
||||
node: node,
|
||||
inode: inode,
|
||||
forget: forget,
|
||||
root: root,
|
||||
node: node,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -172,3 +175,7 @@ func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu
|
||||
func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
return nodeGetXattr(f.node, req, resp)
|
||||
}
|
||||
|
||||
func (f *file) Forget() {
|
||||
f.forget()
|
||||
}
|
||||
|
@@ -119,7 +119,7 @@ func TestFuseFile(t *testing.T) {
|
||||
root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)}
|
||||
|
||||
inode := inodeFromNode(1, node)
|
||||
f, err := newFile(root, inode, node)
|
||||
f, err := newFile(root, func() {}, inode, node)
|
||||
rtest.OK(t, err)
|
||||
of, err := f.Open(context.TODO(), nil, nil)
|
||||
rtest.OK(t, err)
|
||||
@@ -162,7 +162,7 @@ func TestFuseDir(t *testing.T) {
|
||||
}
|
||||
parentInode := inodeFromName(0, "parent")
|
||||
inode := inodeFromName(1, "foo")
|
||||
d, err := newDir(root, inode, parentInode, node)
|
||||
d, err := newDir(root, func() {}, inode, parentInode, node)
|
||||
rtest.OK(t, err)
|
||||
|
||||
// don't open the directory as that would require setting up a proper tree blob
|
||||
@@ -217,6 +217,34 @@ func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid ui
|
||||
rtest.Equals(t, uint32(0), attr.Gid)
|
||||
}
|
||||
|
||||
// The Lookup method must return the same Node object unless it was forgotten in the meantime
|
||||
func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node {
|
||||
t.Helper()
|
||||
result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
|
||||
rtest.OK(t, err)
|
||||
result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, result == result2, "%v are not the same object", path)
|
||||
|
||||
result2.(fs.NodeForgetter).Forget()
|
||||
result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, result != result2, "object for %v should change after forget", path)
|
||||
return result
|
||||
}
|
||||
|
||||
func TestStableNodeObjects(t *testing.T) {
|
||||
repo := repository.TestRepository(t)
|
||||
restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2)
|
||||
root := NewRoot(repo, Config{})
|
||||
|
||||
idsdir := testStableLookup(t, root, "ids")
|
||||
snapID := loadFirstSnapshot(t, repo).ID().Str()
|
||||
snapshotdir := testStableLookup(t, idsdir, snapID)
|
||||
dir := testStableLookup(t, snapshotdir, "dir-0")
|
||||
testStableLookup(t, dir, "file-2")
|
||||
}
|
||||
|
||||
// Test reporting of fuse.Attr.Blocks in multiples of 512.
|
||||
func TestBlocks(t *testing.T) {
|
||||
root := &Root{}
|
||||
@@ -276,7 +304,7 @@ func TestLink(t *testing.T) {
|
||||
{Name: "foo", Value: []byte("bar")},
|
||||
}}
|
||||
|
||||
lnk, err := newLink(&Root{}, 42, node)
|
||||
lnk, err := newLink(&Root{}, func() {}, 42, node)
|
||||
rtest.OK(t, err)
|
||||
target, err := lnk.Readlink(context.TODO(), nil)
|
||||
rtest.OK(t, err)
|
||||
|
@@ -12,16 +12,20 @@ import (
|
||||
)
|
||||
|
||||
// Statically ensure that *link implements the given interface
|
||||
var _ = fs.NodeForgetter(&link{})
|
||||
var _ = fs.NodeGetxattrer(&link{})
|
||||
var _ = fs.NodeListxattrer(&link{})
|
||||
var _ = fs.NodeReadlinker(&link{})
|
||||
|
||||
type link struct {
|
||||
root *Root
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
root *Root
|
||||
forget forgetFn
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
}
|
||||
|
||||
func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) {
|
||||
return &link{root: root, inode: inode, node: node}, nil
|
||||
func newLink(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*link, error) {
|
||||
return &link{root: root, forget: forget, inode: inode, node: node}, nil
|
||||
}
|
||||
|
||||
func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
|
||||
@@ -55,3 +59,7 @@ func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu
|
||||
func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
return nodeGetXattr(l.node, req, resp)
|
||||
}
|
||||
|
||||
func (l *link) Forget() {
|
||||
l.forget()
|
||||
}
|
||||
|
@@ -7,17 +7,23 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/anacrolix/fuse"
|
||||
"github.com/anacrolix/fuse/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// Statically ensure that *other implements the given interface
|
||||
var _ = fs.NodeForgetter(&other{})
|
||||
var _ = fs.NodeReadlinker(&other{})
|
||||
|
||||
type other struct {
|
||||
root *Root
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
root *Root
|
||||
forget forgetFn
|
||||
node *restic.Node
|
||||
inode uint64
|
||||
}
|
||||
|
||||
func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) {
|
||||
return &other{root: root, inode: inode, node: node}, nil
|
||||
func newOther(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*other, error) {
|
||||
return &other{root: root, forget: forget, inode: inode, node: node}, nil
|
||||
}
|
||||
|
||||
func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
|
||||
@@ -40,3 +46,7 @@ func (l *other) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *other) Forget() {
|
||||
l.forget()
|
||||
}
|
||||
|
@@ -66,7 +66,7 @@ func NewRoot(repo restic.Repository, cfg Config) *Root {
|
||||
}
|
||||
}
|
||||
|
||||
root.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "")
|
||||
root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "")
|
||||
|
||||
return root
|
||||
}
|
||||
|
@@ -19,25 +19,30 @@ import (
|
||||
// It uses the saved prefix to select the corresponding MetaDirData.
|
||||
type SnapshotsDir struct {
|
||||
root *Root
|
||||
forget forgetFn
|
||||
inode uint64
|
||||
parentInode uint64
|
||||
dirStruct *SnapshotsDirStructure
|
||||
prefix string
|
||||
cache treeCache
|
||||
}
|
||||
|
||||
// ensure that *SnapshotsDir implements these interfaces
|
||||
var _ = fs.HandleReadDirAller(&SnapshotsDir{})
|
||||
var _ = fs.NodeForgetter(&SnapshotsDir{})
|
||||
var _ = fs.NodeStringLookuper(&SnapshotsDir{})
|
||||
|
||||
// NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links
|
||||
func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir {
|
||||
func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir {
|
||||
debug.Log("create snapshots dir, inode %d", inode)
|
||||
return &SnapshotsDir{
|
||||
root: root,
|
||||
forget: forget,
|
||||
inode: inode,
|
||||
parentInode: parentInode,
|
||||
dirStruct: dirStruct,
|
||||
prefix: prefix,
|
||||
cache: *newTreeCache(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,33 +112,41 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
entry := meta.names[name]
|
||||
if entry != nil {
|
||||
return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) {
|
||||
entry := meta.names[name]
|
||||
if entry == nil {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
inode := inodeFromName(d.inode, name)
|
||||
if entry.linkTarget != "" {
|
||||
return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot)
|
||||
return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot)
|
||||
} else if entry.snapshot != nil {
|
||||
return newDirFromSnapshot(d.root, inode, entry.snapshot)
|
||||
return newDirFromSnapshot(d.root, forget, inode, entry.snapshot)
|
||||
}
|
||||
return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil
|
||||
}
|
||||
return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil
|
||||
})
|
||||
}
|
||||
|
||||
return nil, syscall.ENOENT
|
||||
func (d *SnapshotsDir) Forget() {
|
||||
d.forget()
|
||||
}
|
||||
|
||||
// SnapshotLink
|
||||
type snapshotLink struct {
|
||||
root *Root
|
||||
forget forgetFn
|
||||
inode uint64
|
||||
target string
|
||||
snapshot *restic.Snapshot
|
||||
}
|
||||
|
||||
var _ = fs.NodeForgetter(&snapshotLink{})
|
||||
var _ = fs.NodeReadlinker(&snapshotLink{})
|
||||
|
||||
// newSnapshotLink
|
||||
func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) {
|
||||
return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil
|
||||
func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) {
|
||||
return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil
|
||||
}
|
||||
|
||||
// Readlink
|
||||
@@ -157,3 +170,7 @@ func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *snapshotLink) Forget() {
|
||||
l.forget()
|
||||
}
|
||||
|
45
internal/fuse/tree_cache.go
Normal file
45
internal/fuse/tree_cache.go
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build darwin || freebsd || linux
|
||||
// +build darwin freebsd linux
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/anacrolix/fuse/fs"
|
||||
)
|
||||
|
||||
type treeCache struct {
|
||||
nodes map[string]fs.Node
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
type forgetFn func()
|
||||
|
||||
func newTreeCache() *treeCache {
|
||||
return &treeCache{
|
||||
nodes: map[string]fs.Node{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) {
|
||||
t.m.Lock()
|
||||
defer t.m.Unlock()
|
||||
|
||||
if node, ok := t.nodes[name]; ok {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
node, err := create(func() {
|
||||
t.m.Lock()
|
||||
defer t.m.Unlock()
|
||||
|
||||
delete(t.nodes, name)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.nodes[name] = node
|
||||
return node, nil
|
||||
}
|
@@ -3,8 +3,16 @@
|
||||
|
||||
package restic
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
import (
|
||||
"os"
|
||||
|
||||
func mknod(path string, mode uint32, dev uint64) (err error) {
|
||||
return unix.Mknod(path, mode, int(dev))
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mknod(path string, mode uint32, dev uint64) error {
|
||||
err := unix.Mknod(path, mode, int(dev))
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "mknod", Path: path, Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -3,14 +3,21 @@
|
||||
|
||||
package restic
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func mknod(path string, mode uint32, dev uint64) (err error) {
|
||||
return syscall.Mknod(path, mode, dev)
|
||||
func mknod(path string, mode uint32, dev uint64) error {
|
||||
err := syscall.Mknod(path, mode, dev)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "mknod", Path: path, Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s statT) atim() syscall.Timespec { return s.Atimespec }
|
||||
|
@@ -7,10 +7,12 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
@@ -145,3 +147,12 @@ func TestNodeFromFileInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMknodError(t *testing.T) {
|
||||
d := t.TempDir()
|
||||
// Call mkfifo, which calls mknod, as mknod may give
|
||||
// "operation not permitted" on Mac.
|
||||
err := mkfifo(d, 0)
|
||||
rtest.Assert(t, errors.Is(err, os.ErrExist), "want ErrExist, got %q", err)
|
||||
rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err)
|
||||
}
|
||||
|
@@ -372,8 +372,11 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filepath.Clean(path), `\`) {
|
||||
// filepath.Clean(path) ends with '\' for Windows root volume paths only
|
||||
isVolume, err := isVolumePath(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isVolume {
|
||||
// Do not process file attributes, created time and sd for windows root volume paths
|
||||
// Security descriptors are not supported for root volume paths.
|
||||
// Though file attributes and created time are supported for root volume paths,
|
||||
@@ -464,6 +467,18 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) {
|
||||
return isEASupportedVolume, err
|
||||
}
|
||||
|
||||
// isVolumePath returns whether a path refers to a volume
|
||||
func isVolumePath(path string) (bool, error) {
|
||||
volName, err := prepareVolumeName(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cleanPath := filepath.Clean(path)
|
||||
cleanVolume := filepath.Clean(volName + `\`)
|
||||
return cleanPath == cleanVolume, nil
|
||||
}
|
||||
|
||||
// prepareVolumeName prepares the volume name for different cases in Windows
|
||||
func prepareVolumeName(path string) (volumeName string, err error) {
|
||||
// Check if it's an extended length path
|
||||
|
@@ -450,6 +450,13 @@ func TestPrepareVolumeName(t *testing.T) {
|
||||
expectError: false,
|
||||
expectedEASupported: false,
|
||||
},
|
||||
{
|
||||
name: "Volume Shadow Copy root",
|
||||
path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`,
|
||||
expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`,
|
||||
expectError: false,
|
||||
expectedEASupported: false,
|
||||
},
|
||||
{
|
||||
name: "Volume Shadow Copy path",
|
||||
path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`,
|
||||
|
Reference in New Issue
Block a user