Compare commits

...

65 Commits

Author SHA1 Message Date
Alexander Neumann
abb1dc4eb6 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
8d21bb92db Add tests for invalid configs 2018-06-10 12:27:52 +02:00
Alexander Neumann
0b3c402801 Move options package to ui/options 2018-06-10 12:27:52 +02:00
Alexander Neumann
b3b70002ab wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
4916ba7a8a wip name 2018-06-10 12:27:52 +02:00
Alexander Neumann
ea565df3e8 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
0758c92afc wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
8b0092908a wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
ffd7bc1021 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
6bad560324 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
7ad648c686 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
0c078cc205 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
1fbcf63830 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
740e2d6139 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
aaef54559a wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
722517c480 wip 2018-06-10 12:27:52 +02:00
Alexander Neumann
e4c0d77bdd Add VERSION for 0.9.1 2018-06-10 11:31:03 +02:00
Alexander Neumann
1dd655dad2 Generate CHANGELOG.md for 0.9.1 2018-06-10 11:30:53 +02:00
Alexander Neumann
581d0984fe Fix changelog entry 2018-06-10 11:29:57 +02:00
Alexander Neumann
e62add84bc Move changelog files for 0.9.1 2018-06-10 11:22:32 +02:00
Alexander Neumann
63779c1eb4 Merge pull request #1839 from restic/fix-find
Fix find, do not skip some snapshots
2018-06-10 10:08:47 +02:00
Alexander Neumann
c204382ea9 Revert "Fix integration tests on Windows"
This reverts commit 33dbd0ba5c.
2018-06-10 00:01:28 +02:00
Alexander Neumann
321efec60c Fix integration tests on Windows 2018-06-10 00:00:22 +02:00
Alexander Neumann
33dbd0ba5c Fix integration tests on Windows 2018-06-09 23:58:44 +02:00
Alexander Neumann
9a73869c27 Update docs for RHEL/CentOS 2018-06-09 23:41:40 +02:00
Alexander Neumann
8f26fe271c ls: Use walker for ls 2018-06-09 23:35:20 +02:00
Alexander Neumann
251335f124 Add entry to changelog 2018-06-09 23:35:20 +02:00
Alexander Neumann
081743d0a5 find: Use walker.Walk 2018-06-09 23:35:20 +02:00
Alexander Neumann
3a86f4852b Add walker for trees in the repo 2018-06-09 23:35:20 +02:00
Alexander Neumann
14aead94b3 filter: Allow double wildcard in ChildMatch 2018-06-09 23:18:13 +02:00
Alexander Neumann
ce01ca30d6 find: Correct tree pruning optimization
The `find` command will now take care to only mark trees as "not found"
when the pattern couldn't be found within any subtree.

Closes #1825, #1823
2018-06-09 18:59:13 +02:00
Alexander Neumann
e2d347a698 find: Use OS independent slash-based format 2018-06-09 18:58:13 +02:00
Alexander Neumann
42ebb0a0a6 backup: Parse timestamp earlier 2018-06-09 18:21:12 +02:00
Alexander Neumann
419acad3c3 Merge pull request #1837 from restic/fix-1833
cache: Ensure failed downloads are retried
2018-06-09 18:20:21 +02:00
Alexander Neumann
810b5ea076 Add entry to changelog 2018-06-09 17:55:51 +02:00
Alexander Neumann
fc5439a37a cache: Ensure failed downloads are retried
This fixes #1833, which consists of two different bugs:

 * The `defer` in `cacheFile()` may remove a channel from the
   `inProgress` map although it is not responsible for downloading the
   file

 * If the download fails, goroutines waiting for the file to be cached
   assumed that the file was there, there was no way to signal the
   error.
2018-06-09 17:50:56 +02:00
Alexander Neumann
48aab8bd65 Merge pull request #1836 from restic/update-blazer
Update github.com/kurin/blazer
2018-06-09 14:31:12 +02:00
Alexander Neumann
6fbcd1694b Add entry to changelog 2018-06-09 14:31:02 +02:00
Alexander Neumann
494fe2a8b5 Merge pull request #1835 from restic/fix-1834
backup: Fix deadlock
2018-06-09 14:28:16 +02:00
Alexander Neumann
f761068f4e Update github.com/kurin/blazer 2018-06-09 12:32:18 +02:00
Alexander Neumann
c44e808aa5 backup: Fix deadlock
When the archiver is faster than the scanner, restic deadlocks. This
commit adds a `finished` channel to the struct in `ui/backup.go` so that
scanner results are ignored when the archiver is already finished.

Closes #1834
2018-06-09 12:15:19 +02:00
Alexander Neumann
ab37c6095a Merge pull request #1821 from michaelkoetter/fix-1795
#1795 use unix.IoctlGetWinsize to get terminal size
2018-06-07 20:20:06 +02:00
Michael Kötter
d6fd94e49d Don't run Solaris build for go1.9 2018-06-04 15:04:50 +02:00
Michael Kötter
53040a2e34 add "solaris/amd64" to cross-compile archs 2018-06-04 12:51:34 +02:00
Alexander Neumann
cfc19b4582 Merge pull request #1828 from restic/handle-s3-list-errors
s3: Pass list errors up to the caller
2018-06-02 10:34:49 +02:00
Alexander Neumann
141fabdd09 s3: Pass list errors up to the caller 2018-06-01 22:15:23 +02:00
Alexander Neumann
d49ca42771 Merge pull request #1827 from restic/azure-large-files
azure: Support uploading large files
2018-06-01 18:37:26 +02:00
Alexander Neumann
f6fded729d Add entry to changelog 2018-06-01 14:52:16 +02:00
Alexander Neumann
465700595c azure: Support uploading large files
Closes #1822
2018-06-01 14:52:16 +02:00
Alexander Neumann
0fcd9d6926 Merge pull request #1824 from rfjakob/ssh_command_exited
sftp: persist "ssh command exited" error
2018-05-31 21:26:39 +02:00
Jakob Unterwurzacher
dd3b9910ee sftp: persist "ssh command exited" error
If our ssh process has died, not only the next, but all subsequent
calls to clientError() should indicate the error.

restic output when the ssh process is killed with "kill -9":

  Save(<data/afb68adbf9>) returned error, retrying after 253.661803ms: Write: failed to send packet header: write |1: file already closed
  Save(<data/afb68adbf9>) returned error, retrying after 580.752212ms: ssh command exited: signal: killed
  Save(<data/afb68adbf9>) returned error, retrying after 790.150468ms: ssh command exited: signal: killed
  Save(<data/afb68adbf9>) returned error, retrying after 1.769595051s: ssh command exited: signal: killed
  [...]
  error in cleanup handler: ssh command exited: signal: killed

Before this patch:

  Save(<data/de698d934f>) returned error, retrying after 252.84163ms: Write: failed to send packet header: write |1: file already closed
  Save(<data/de698d934f>) returned error, retrying after 660.236963ms: OpenFile: failed to send packet header: write |1: file already closed
  Save(<data/de698d934f>) returned error, retrying after 568.049909ms: OpenFile: failed to send packet header: write |1: file already closed
  Save(<data/de698d934f>) returned error, retrying after 2.428813824s: OpenFile: failed to send packet header: write |1: file already closed
  [...]
  error in cleanup handler: failed to send packet header: write |1: file already closed
2018-05-30 19:28:14 +02:00
Alexander Neumann
185b60c22b Document project governance 2018-05-28 22:29:06 +02:00
Michael Kötter
589c23dc23 #1795 use unix.IoctlGetWinsize to get terminal size 2018-05-27 23:44:48 +02:00
Alexander Neumann
0183fea926 Merge pull request #1820 from restic/fix-1803
termstatus: Fix panic for non-terminal runs
2018-05-27 13:08:25 +02:00
Alexander Neumann
7d9642523b termstatus: Fix panic for non-terminal runs
Closes #1803
2018-05-27 12:52:01 +02:00
Alexander Neumann
4bf07a74a0 Merge pull request #1806 from mholt/patch-1
doc: Clarify multiple forget policies get ORed
2018-05-26 11:03:22 +02:00
Alexander Neumann
2a976d795f b2: Remove extra error check 2018-05-26 10:12:30 +02:00
Alexander Neumann
1892b314f8 Merge pull request #1815 from restic/update-blazer
Update github.com/kurin/blazer
2018-05-25 20:46:35 +02:00
Alexander Neumann
b7bed406b9 Update github.com/kurin/blazer 2018-05-25 20:26:26 +02:00
Matt Holt
ee4202f7c3 doc: Clarify multiple forget policies get ORed 2018-05-23 17:28:02 -06:00
Alexander Neumann
4cd28713b6 Merge pull request #1802 from restic/rclone-add-limits
rclone: Add limiting bandwidth to the rclone backend
2018-05-22 21:19:52 +02:00
Alexander Neumann
e3fe87f269 Remove superseded feature from the CHANGELOG
For a discussion please see https://github.com/restic/restic/issues/1796
2018-05-22 20:57:07 +02:00
Alexander Neumann
a02698fcdd Add entry to changelog 2018-05-22 20:48:29 +02:00
Alexander Neumann
bfd923e81e rclone: Respect bandwith limits 2018-05-22 20:48:17 +02:00
Alexander Neumann
20bfed5985 Update build.go 2018-05-21 20:31:19 +02:00
76 changed files with 2441 additions and 404 deletions

View File

@@ -5,7 +5,7 @@ matrix:
include:
- os: linux
go: "1.9.x"
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 RESTIC_BUILD_SOLARIS=0
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
- os: linux

View File

@@ -1,3 +1,70 @@
Changelog for restic 0.9.1 (2018-06-10)
=======================================
The following sections list the changes in restic 0.9.1 relevant to
restic users. The changes are ordered by importance.
Summary
-------
* Fix #1801: Add limiting bandwidth to the rclone backend
* Fix #1822: Allow uploading large files to MS Azure
* Fix #1825: Correct `find` to not skip snapshots
* Fix #1833: Fix caching files on error
* Fix #1834: Resolve deadlock
Details
-------
* Bugfix #1801: Add limiting bandwidth to the rclone backend
The rclone backend did not respect `--limit-upload` or `--limit-download`. Oftentimes it's
not necessary to use this, as the limiting in rclone itself should be used because it gives much
better results, but in case a remote instance of rclone is used (e.g. called via ssh), it is still
relevant to limit the bandwidth from restic to rclone.
https://github.com/restic/restic/issues/1801
* Bugfix #1822: Allow uploading large files to MS Azure
Sometimes, restic creates files to be uploaded to the repository which are quite large, e.g.
when saving directories with many entries or very large files. The MS Azure API does not allow
uploading files larger that 256MiB directly, rather restic needs to upload them in blocks of
100MiB. This is now implemented.
https://github.com/restic/restic/issues/1822
* Bugfix #1825: Correct `find` to not skip snapshots
Under certain circumstances, the `find` command was found to skip snapshots containing
directories with files to look for when the directories haven't been modified at all, and were
already printed as part of a different snapshot. This is now corrected.
In addition, we've switched to our own matching/pattern implementation, so now things like
`restic find "/home/user/foo/**/main.go"` are possible.
https://github.com/restic/restic/issues/1825
https://github.com/restic/restic/issues/1823
* Bugfix #1833: Fix caching files on error
During `check` it may happen that different threads access the same file in the backend, which
is then downloaded into the cache only once. When that fails, only the thread which is
responsible for downloading the file signals the correct error. The other threads just assume
that the file has been downloaded successfully and then get an error when they try to access the
cached file.
https://github.com/restic/restic/issues/1833
* Bugfix #1834: Resolve deadlock
When the "scanning" process restic runs to find out how much data there is does not finish before
the backup itself is done, restic stops doing anything. This is resolved now.
https://github.com/restic/restic/issues/1834
https://github.com/restic/restic/pull/1835
Changelog for restic 0.9.0 (2018-05-21)
=======================================
@@ -22,7 +89,6 @@ Summary
* Enh #1477: Accept AWS_SESSION_TOKEN for the s3 backend
* Enh #1648: Ignore AWS permission denied error when creating a repository
* Enh #1649: Add illumos/Solaris support
* Enh #1676: Improve backup speed: Skip initial scan phase in quiet mode
* Enh #1709: Improve messages `restic check` prints
* Enh #827: Add --new-password-file flag for non-interactive password changes
* Enh #1735: Allow keeping a time range of snaphots
@@ -217,15 +283,6 @@ Details
https://github.com/restic/restic/pull/1649
* Enhancement #1676: Improve backup speed: Skip initial scan phase in quiet mode
We've improved the backup speed when the quiet flag (`-q` or `--quiet`) is set by skipping the
initial scan which gathers information for displaying the progress bar and the ETA
estimation.
https://github.com/restic/restic/issues/1160
https://github.com/restic/restic/pull/1676
* Enhancement #1709: Improve messages `restic check` prints
Some messages `restic check` prints are not really errors, so from now on restic does not treat

27
GOVERNANCE.md Normal file
View File

@@ -0,0 +1,27 @@
# restic project governance
## Overview
The restic project uses a governance model commonly described as Benevolent
Dictator For Life (BDFL). This document outlines our understanding of what this
means. It is derived from the [i3 window manager project
governance](https://raw.githubusercontent.com/i3/i3/next/.github/GOVERNANCE.md).
## Roles
* user: anyone who interacts with the restic project
* core contributor: a handful of people who have contributed significantly to
the project by any means (issue triage, support, documentation, code, etc.).
Core contributors are recognizable via GitHubs "Member" badge.
* Benevolent Dictator For Life (BDFL): a single individual who makes decisions
when consensus cannot be reached. restic's current BDFL is [@fd0](https://github.com/fd0).
## Decision making process
In general, we try to reach consensus in discussions. In case consensus cannot
be reached, the BDFL makes a decision.
## Contribution process
The contribution process is described in a separate document called
[CONTRIBUTING](CONTRIBUTING.md).

4
Gopkg.lock generated
View File

@@ -94,8 +94,8 @@
[[projects]]
name = "github.com/kurin/blazer"
packages = ["b2","base","internal/b2assets","internal/b2types","internal/blog","x/window"]
revision = "b7c9cf27cae3aec98c2caaeb5181608bfe05b17c"
version = "v0.3.1"
revision = "318e9768bf9a0fe52a64b9f8fe74f4f5caef6452"
version = "v0.4.4"
[[projects]]
name = "github.com/marstr/guid"

View File

@@ -1 +1 @@
0.9.0
0.9.1

View File

@@ -230,6 +230,7 @@ func showUsage(output io.Writer) {
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
fmt.Fprintf(output, " --goarm value set GOARM for cross-compilation\n")
fmt.Fprintf(output, " --tempdir dir use a specific directory for compilation\n")
}
func verbosePrintf(message string, args ...interface{}) {

View File

@@ -1,8 +0,0 @@
Enhancement: Improve backup speed: Skip initial scan phase in quiet mode
We've improved the backup speed when the quiet flag (`-q` or `--quiet`) is set
by skipping the initial scan which gathers information for displaying the
progress bar and the ETA estimation.
https://github.com/restic/restic/pull/1676
https://github.com/restic/restic/issues/1160

View File

@@ -0,0 +1,9 @@
Bugfix: Add limiting bandwidth to the rclone backend
The rclone backend did not respect `--limit-upload` or `--limit-download`.
Oftentimes it's not necessary to use this, as the limiting in rclone itself
should be used because it gives much better results, but in case a remote
instance of rclone is used (e.g. called via ssh), it is still relevant to limit
the bandwidth from restic to rclone.
https://github.com/restic/restic/issues/1801

View File

@@ -0,0 +1,9 @@
Bugfix: Allow uploading large files to MS Azure
Sometimes, restic creates files to be uploaded to the repository which are
quite large, e.g. when saving directories with many entries or very large
files. The MS Azure API does not allow uploading files larger that 256MiB
directly, rather restic needs to upload them in blocks of 100MiB. This is now
implemented.
https://github.com/restic/restic/issues/1822

View File

@@ -0,0 +1,12 @@
Bugfix: Correct `find` to not skip snapshots
Under certain circumstances, the `find` command was found to skip snapshots
containing directories with files to look for when the directories haven't been
modified at all, and were already printed as part of a different snapshot. This
is now corrected.
In addition, we've switched to our own matching/pattern implementation, so now
things like `restic find "/home/user/foo/**/main.go"` are possible.
https://github.com/restic/restic/issues/1825
https://github.com/restic/restic/issues/1823

View File

@@ -0,0 +1,9 @@
Bugfix: Fix caching files on error
During `check` it may happen that different threads access the same file in the
backend, which is then downloaded into the cache only once. When that fails,
only the thread which is responsible for downloading the file signals the
correct error. The other threads just assume that the file has been downloaded
successfully and then get an error when they try to access the cached file.
https://github.com/restic/restic/issues/1833

View File

@@ -0,0 +1,8 @@
Bugfix: Resolve deadlock
When the "scanning" process restic runs to find out how much data there is does
not finish before the backup itself is done, restic stops doing anything. This
is resolved now.
https://github.com/restic/restic/issues/1834
https://github.com/restic/restic/pull/1835

View File

@@ -21,6 +21,7 @@ import (
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/config"
"github.com/restic/restic/internal/ui/termstatus"
)
@@ -43,6 +44,11 @@ given as the arguments.
},
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
err := config.ApplyFlags(&backupOptions.Config, cmd.Flags())
if err != nil {
return err
}
if backupOptions.Stdin && backupOptions.FilesFrom == "-" {
return errors.Fatal("cannot use both `--stdin` and `--files-from -`")
}
@@ -51,7 +57,7 @@ given as the arguments.
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil })
err := runBackup(backupOptions, globalOptions, term, args)
err = runBackup(backupOptions, globalOptions, term, args)
if err != nil {
return err
}
@@ -62,9 +68,10 @@ given as the arguments.
// BackupOptions bundles all options for the backup command.
type BackupOptions struct {
Config config.Backup
Parent string
Force bool
Excludes []string
ExcludeFiles []string
ExcludeOtherFS bool
ExcludeIfPresent []string
@@ -86,7 +93,9 @@ func init() {
f := cmdBackup.Flags()
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayP("exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
@@ -188,12 +197,12 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, excludes []string, err error) {
// allowed devices
if opts.ExcludeOtherFS {
f, err := rejectByDevice(targets)
if err != nil {
return nil, err
return nil, nil, err
}
fs = append(fs, f)
}
@@ -202,19 +211,21 @@ func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets
if repo.Cache != nil {
f, err := rejectResticCache(repo)
if err != nil {
return nil, err
return nil, nil, err
}
fs = append(fs, f)
}
excludes = append(excludes, opts.Config.Excludes...)
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
opts.Excludes = append(opts.Excludes, readExcludePatternsFromFiles(opts.ExcludeFiles)...)
excludes = append(excludes, readExcludePatternsFromFiles(opts.ExcludeFiles)...)
}
if len(opts.Excludes) > 0 {
fs = append(fs, rejectByPattern(opts.Excludes))
if len(excludes) > 0 {
fs = append(fs, rejectByPattern(excludes))
}
if opts.ExcludeCaches {
@@ -224,13 +235,13 @@ func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets
for _, spec := range opts.ExcludeIfPresent {
f, err := rejectIfPresent(spec)
if err != nil {
return nil, err
return nil, nil, err
}
fs = append(fs, f)
}
return fs, nil
return fs, excludes, nil
}
// readExcludePatternsFromFiles reads all exclude files and returns the list of
@@ -336,6 +347,14 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
return err
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
var t tomb.Tomb
p := ui.NewBackup(term, gopts.verbosity)
@@ -373,7 +392,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
}
// rejectFuncs collect functions that can reject items from the backup
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
rejectFuncs, excludes, err := collectRejectFuncs(opts, repo, targets)
if err != nil {
return err
}
@@ -402,14 +421,6 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
return true
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
var targetFS fs.FS = fs.Local{}
if opts.Stdin {
p.V("read data from stdin")
@@ -443,7 +454,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
}
snapshotOpts := archiver.SnapshotOptions{
Excludes: opts.Excludes,
Excludes: excludes,
Tags: opts.Tags,
Time: timeStamp,
Hostname: opts.Hostname,

View File

@@ -3,7 +3,6 @@ package main
import (
"context"
"encoding/json"
"path/filepath"
"strings"
"time"
@@ -11,7 +10,9 @@ import (
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/walker"
)
var cmdFind = &cobra.Command{
@@ -94,7 +95,7 @@ type statefulOutput struct {
hits int
}
func (s *statefulOutput) PrintJSON(prefix string, node *restic.Node) {
func (s *statefulOutput) PrintJSON(path string, node *restic.Node) {
type findNode restic.Node
b, err := json.Marshal(struct {
// Add these attributes
@@ -111,7 +112,7 @@ func (s *statefulOutput) PrintJSON(prefix string, node *restic.Node) {
Content byte `json:"content,omitempty"`
Subtree byte `json:"subtree,omitempty"`
}{
Path: filepath.Join(prefix, node.Name),
Path: path,
Permissions: node.Mode.String(),
findNode: (*findNode)(node),
})
@@ -138,22 +139,22 @@ func (s *statefulOutput) PrintJSON(prefix string, node *restic.Node) {
s.hits++
}
func (s *statefulOutput) PrintNormal(prefix string, node *restic.Node) {
func (s *statefulOutput) PrintNormal(path string, node *restic.Node) {
if s.newsn != s.oldsn {
if s.oldsn != nil {
Verbosef("\n")
}
s.oldsn = s.newsn
Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID())
Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID().Str())
}
Printf(formatNode(prefix, node, s.ListLong) + "\n")
Printf(formatNode(path, node, s.ListLong) + "\n")
}
func (s *statefulOutput) Print(prefix string, node *restic.Node) {
func (s *statefulOutput) Print(path string, node *restic.Node) {
if s.JSON {
s.PrintJSON(prefix, node)
s.PrintJSON(path, node)
} else {
s.PrintNormal(prefix, node)
s.PrintNormal(path, node)
}
}
@@ -174,74 +175,75 @@ func (s *statefulOutput) Finish() {
// Finder bundles information needed to find a file or directory.
type Finder struct {
repo restic.Repository
pat findPattern
out statefulOutput
notfound restic.IDSet
repo restic.Repository
pat findPattern
out statefulOutput
ignoreTrees restic.IDSet
}
func (f *Finder) findInTree(ctx context.Context, treeID restic.ID, prefix string) error {
if f.notfound.Has(treeID) {
debug.Log("%v skipping tree %v, has already been checked", prefix, treeID)
return nil
func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error {
debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), f.pat.oldest, f.pat.newest)
if sn.Tree == nil {
return errors.Errorf("snapshot %v has no tree", sn.ID().Str())
}
debug.Log("%v checking tree %v\n", prefix, treeID)
f.out.newsn = sn
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(nodepath string, node *restic.Node, err error) (bool, error) {
if err != nil {
return false, err
}
tree, err := f.repo.LoadTree(ctx, treeID)
if err != nil {
return err
}
var found bool
for _, node := range tree.Nodes {
debug.Log(" testing entry %q\n", node.Name)
if node == nil {
return false, nil
}
name := node.Name
if f.pat.ignoreCase {
name = strings.ToLower(name)
}
m, err := filepath.Match(f.pat.pattern, name)
foundMatch, err := filter.Match(f.pat.pattern, nodepath)
if err != nil {
return err
}
if m {
if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) {
debug.Log(" ModTime is older than %s\n", f.pat.oldest)
continue
}
if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) {
debug.Log(" ModTime is newer than %s\n", f.pat.newest)
continue
}
debug.Log(" found match\n")
found = true
f.out.Print(prefix, node)
return false, err
}
var (
ignoreIfNoMatch = true
errIfNoMatch error
)
if node.Type == "dir" {
if err := f.findInTree(ctx, *node.Subtree, filepath.Join(prefix, node.Name)); err != nil {
return err
childMayMatch, err := filter.ChildMatch(f.pat.pattern, nodepath)
if err != nil {
return false, err
}
if !childMayMatch {
ignoreIfNoMatch = true
errIfNoMatch = walker.SkipNode
} else {
ignoreIfNoMatch = false
}
}
}
if !found {
f.notfound.Insert(treeID)
}
if !foundMatch {
return ignoreIfNoMatch, errIfNoMatch
}
return nil
}
if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) {
debug.Log(" ModTime is older than %s\n", f.pat.oldest)
return ignoreIfNoMatch, errIfNoMatch
}
func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error {
debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), f.pat.oldest, f.pat.newest)
if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) {
debug.Log(" ModTime is newer than %s\n", f.pat.newest)
return ignoreIfNoMatch, errIfNoMatch
}
f.out.newsn = sn
return f.findInTree(ctx, *sn.Tree, string(filepath.Separator))
debug.Log(" found match\n")
f.out.Print(nodepath, node)
return false, nil
})
}
func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
@@ -289,10 +291,10 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
defer cancel()
f := &Finder{
repo: repo,
pat: pat,
out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
notfound: restic.NewIDSet(),
repo: repo,
pat: pat,
out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
ignoreTrees: restic.NewIDSet(),
}
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) {
if err = f.findInSnapshot(ctx, sn); err != nil {

View File

@@ -2,13 +2,12 @@ package main
import (
"context"
"path/filepath"
"github.com/spf13/cobra"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/walker"
)
var cmdLs = &cobra.Command{
@@ -46,26 +45,6 @@ func init() {
flags.StringArrayVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given")
}
func printTree(ctx context.Context, repo *repository.Repository, id *restic.ID, prefix string) error {
tree, err := repo.LoadTree(ctx, *id)
if err != nil {
return err
}
for _, entry := range tree.Nodes {
Printf("%s\n", formatNode(prefix, entry, lsOptions.ListLong))
if entry.Type == "dir" && entry.Subtree != nil {
entryPath := prefix + string(filepath.Separator) + entry.Name
if err = printTree(ctx, repo, entry.Subtree, entryPath); err != nil {
return err
}
}
}
return nil
}
func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
if len(args) == 0 && opts.Host == "" && len(opts.Tags) == 0 && len(opts.Paths) == 0 {
return errors.Fatal("Invalid arguments, either give one or more snapshot IDs or set filters.")
@@ -85,7 +64,18 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
Verbosef("snapshot %s of %v at %s):\n", sn.ID().Str(), sn.Paths, sn.Time)
if err = printTree(gopts.ctx, repo, sn.Tree, ""); err != nil {
err := walker.Walk(ctx, repo, *sn.Tree, nil, func(nodepath string, node *restic.Node, err error) (bool, error) {
if err != nil {
return false, err
}
if node == nil {
return false, nil
}
Printf("%s\n", formatNode(nodepath, node, lsOptions.ListLong))
return false, nil
})
if err != nil {
return err
}
}

View File

@@ -3,7 +3,7 @@ package main
import (
"fmt"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
"github.com/spf13/cobra"
)

View File

@@ -3,7 +3,6 @@ package main
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/restic/restic/internal/restic"
@@ -63,10 +62,9 @@ func formatDuration(d time.Duration) string {
return formatSeconds(sec)
}
func formatNode(prefix string, n *restic.Node, long bool) string {
nodepath := prefix + string(filepath.Separator) + n.Name
func formatNode(path string, n *restic.Node, long bool) string {
if !long {
return nodepath
return path
}
var mode os.FileMode
@@ -92,6 +90,6 @@ func formatNode(prefix string, n *restic.Node, long bool) string {
return fmt.Sprintf("%s %5d %5d %6d %s %s%s",
mode|n.Mode, n.UID, n.GID, n.Size,
n.ModTime.Format(TimeFormat), nodepath,
n.ModTime.Format(TimeFormat), path,
target)
}

View File

@@ -26,10 +26,11 @@ import (
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/limiter"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui/config"
"github.com/restic/restic/internal/ui/options"
"github.com/restic/restic/internal/errors"
@@ -40,8 +41,8 @@ var version = "compiled manually"
// GlobalOptions hold all global options for restic.
type GlobalOptions struct {
Repo string
PasswordFile string
config.Config
Quiet bool
Verbose int
NoLock bool
@@ -86,8 +87,11 @@ func init() {
})
f := cmdRoot.PersistentFlags()
f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)")
// these fields are embedded in config.Config and queried via f.Get[...]()
f.StringP("repo", "r", "", "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringP("password-file", "p", "", "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)")
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify --verbose multiple times or level `n`)")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
@@ -233,7 +237,11 @@ func Exitf(exitcode int, format string, args ...interface{}) {
}
// resolvePassword determines the password to be used for opening the repository.
func resolvePassword(opts GlobalOptions, env string) (string, error) {
func resolvePassword(opts GlobalOptions) (string, error) {
if opts.Password != "" {
return opts.Password, nil
}
if opts.PasswordFile != "" {
s, err := textfile.Read(opts.PasswordFile)
if os.IsNotExist(errors.Cause(err)) {
@@ -242,10 +250,6 @@ func resolvePassword(opts GlobalOptions, env string) (string, error) {
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
if pwd := os.Getenv(env); pwd != "" {
return pwd, nil
}
return "", nil
}
@@ -561,17 +565,18 @@ func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend,
}
// wrap the transport so that the throughput via HTTP is limited
rt = limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb).Transport(rt)
lim := limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb)
rt = lim.Transport(rt)
switch loc.Scheme {
case "local":
be, err = local.Open(cfg.(local.Config))
// wrap the backend in a LimitBackend so that the throughput is limited
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb))
be = limiter.LimitBackend(be, lim)
case "sftp":
be, err = sftp.Open(cfg.(sftp.Config))
// wrap the backend in a LimitBackend so that the throughput is limited
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb))
be = limiter.LimitBackend(be, lim)
case "s3":
be, err = s3.Open(cfg.(s3.Config), rt)
case "gs":
@@ -585,7 +590,7 @@ func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend,
case "rest":
be, err = rest.Open(cfg.(rest.Config), rt)
case "rclone":
be, err = rclone.Open(cfg.(rclone.Config))
be, err = rclone.Open(cfg.(rclone.Config), lim)
default:
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
@@ -648,7 +653,7 @@ func create(s string, opts options.Options) (restic.Backend, error) {
case "rest":
return rest.Create(cfg.(rest.Config), rt)
case "rclone":
return rclone.Open(cfg.(rclone.Config))
return rclone.Open(cfg.(rclone.Config), nil)
}
debug.Log("invalid repository scheme: %v", s)

View File

@@ -9,10 +9,11 @@ import (
"runtime"
"testing"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/config"
"github.com/restic/restic/internal/ui/options"
)
type dirEntry struct {
@@ -209,7 +210,9 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
rtest.OK(t, os.MkdirAll(env.repo, 0700))
env.gopts = GlobalOptions{
Repo: env.repo,
Config: config.Config{
Repo: env.repo,
},
Quiet: true,
CacheDir: env.cache,
ctx: context.Background(),

View File

@@ -387,23 +387,23 @@ func TestBackupExclude(t *testing.T) {
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files := testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz"}
opts.Config.Excludes = []string{"*.tar.gz"}
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz", "private/secret"}
opts.Config.Excludes = []string{"*.tar.gz", "private/secret"}
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
"expected file %q not in first snapshot, but it's included", "passwords.txt")
}

View File

@@ -8,9 +8,11 @@ import (
"os"
"runtime"
"github.com/davecgh/go-spew/spew"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/config"
"github.com/restic/restic/internal/ui/options"
"github.com/spf13/cobra"
@@ -29,7 +31,24 @@ directories in an encrypted repository stored on different backends.
SilenceUsage: true,
DisableAutoGenTag: true,
PersistentPreRunE: func(c *cobra.Command, args []string) error {
PersistentPreRunE: func(c *cobra.Command, args []string) (err error) {
globalOptions.Config, err = config.Load("restic.conf")
if err != nil {
return err
}
err = config.ApplyEnv(&globalOptions.Config, os.Environ())
if err != nil {
return err
}
err = config.ApplyFlags(&globalOptions.Config, c.Flags())
if err != nil {
return err
}
spew.Dump(globalOptions.Config)
// set verbosity, default is one
globalOptions.verbosity = 1
if globalOptions.Quiet && (globalOptions.Verbose > 1) {
@@ -54,7 +73,7 @@ directories in an encrypted repository stored on different backends.
if c.Name() == "version" {
return nil
}
pwd, err := resolvePassword(globalOptions, "RESTIC_PASSWORD")
pwd, err := resolvePassword(globalOptions)
if err != nil {
fmt.Fprintf(os.Stderr, "Resolving password failed: %v\n", err)
Exit(1)

View File

@@ -74,7 +74,7 @@ installed from the official repos, e.g. with ``apt-get``:
RHEL & CentOS
=============
restic can be installed via copr repository.
restic can be installed via copr repository, for RHEL7/CentOS you can try the following:
.. code-block:: console
@@ -82,6 +82,18 @@ restic can be installed via copr repository.
$ yum copr enable copart/restic
$ yum install restic
If that doesn't work, you can try adding the repository directly, for CentOS6 use:
.. code-block:: console
$ yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/copart/restic/repo/epel-6/copart-restic-epel-6.repo
For CentOS7 use:
.. code-block:: console
$ yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/copart/restic/repo/epel-7/copart-restic-epel-7.repo
Fedora
======

View File

@@ -164,6 +164,9 @@ The ``forget`` command accepts the following parameters:
years, months, and days, e.g. ``2y5m7d`` will keep all snapshots made in the
two years, five months, and seven days before the latest snapshot.
Multiple policies will be ORed together so as to be as inclusive as possible
for keeping snapshots.
Additionally, you can restrict removing snapshots to those which have a
particular hostname with the ``--hostname`` parameter, or tags with the
``--tag`` option. When multiple tags are specified, only the snapshots

View File

@@ -58,6 +58,9 @@ func (s *Scanner) Scan(ctx context.Context, targets []string) error {
}
}
if ctx.Err() != nil {
return ctx.Err()
}
s.Result("", stats)
return nil
}
@@ -107,6 +110,9 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca
stats.Others++
}
if ctx.Err() != nil {
return stats, ctx.Err()
}
s.Result(target, stats)
return stats, nil
}

View File

@@ -2,6 +2,7 @@ package azure
import (
"context"
"encoding/base64"
"io"
"io/ioutil"
"net/http"
@@ -64,13 +65,13 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
}
// Open opens the Azure backend at specified container.
func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
return open(cfg, rt)
}
// Create opens the Azure backend at specified container and creates the container if
// it does not exist yet.
func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
func Create(cfg Config, rt http.RoundTripper) (*Backend, error) {
be, err := open(cfg, rt)
if err != nil {
@@ -129,8 +130,18 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
debug.Log("InsertObject(%v, %v)", be.container.Name, objName)
// wrap the reader so that net/http client cannot close the reader
err := be.container.GetBlobReference(objName).CreateBlockBlobFromReader(ioutil.NopCloser(rd), nil)
var err error
if rd.Length() < 256*1024*1024 {
// wrap the reader so that net/http client cannot close the reader
dataReader := ioutil.NopCloser(rd)
// if it's smaller than 256miB, then just create the file directly from the reader
err = be.container.GetBlobReference(objName).CreateBlockBlobFromReader(dataReader, nil)
} else {
// otherwise use the more complicated method
err = be.saveLarge(ctx, objName, rd)
}
be.sem.ReleaseToken()
debug.Log("%v, err %#v", objName, err)
@@ -138,6 +149,55 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
return errors.Wrap(err, "CreateBlockBlobFromReader")
}
func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.RewindReader) error {
// create the file on the server
file := be.container.GetBlobReference(objName)
err := file.CreateBlockBlob(nil)
if err != nil {
return errors.Wrap(err, "CreateBlockBlob")
}
// read the data, in 100 MiB chunks
buf := make([]byte, 100*1024*1024)
var blocks []storage.Block
for {
n, err := io.ReadFull(rd, buf)
if err == io.ErrUnexpectedEOF {
err = nil
}
if err == io.EOF {
// end of file reached, no bytes have been read at all
break
}
if err != nil {
return errors.Wrap(err, "ReadFull")
}
buf = buf[:n]
// upload it as a new "block", use the base64 hash for the ID
h := restic.Hash(buf)
id := base64.StdEncoding.EncodeToString(h[:])
debug.Log("PutBlock %v with %d bytes", id, len(buf))
err = file.PutBlock(id, buf, nil)
if err != nil {
return errors.Wrap(err, "PutBlock")
}
blocks = append(blocks, storage.Block{
ID: id,
Status: "Uncommitted",
})
}
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
err = file.PutBlockList(blocks, nil)
debug.Log("PutBlockList returned %v", err)
return errors.Wrap(err, "PutBlockList")
}
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
type wrapReader struct {
io.ReadCloser

View File

@@ -1,8 +1,10 @@
package azure_test
import (
"bytes"
"context"
"fmt"
"io"
"os"
"testing"
"time"
@@ -122,3 +124,95 @@ func BenchmarkBackendAzure(t *testing.B) {
t.Logf("run tests")
newAzureTestSuite(t).RunBenchmarks(t)
}
func TestUploadLargeFile(t *testing.T) {
if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" {
t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads")
return
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if os.Getenv("RESTIC_TEST_AZURE_REPOSITORY") == "" {
t.Skipf("environment variables not available")
return
}
azcfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
if err != nil {
t.Fatal(err)
}
}
cfg := azcfg.(azure.Config)
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
cfg.AccountKey = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY")
cfg.Prefix = fmt.Sprintf("test-upload-large-%d", time.Now().UnixNano())
tr, err := backend.Transport(backend.TransportOptions{})
if err != nil {
t.Fatal(err)
}
be, err := azure.Create(cfg, tr)
if err != nil {
if err != nil {
t.Fatal(err)
}
}
defer func() {
err := be.Delete(ctx)
if err != nil {
t.Fatal(err)
}
}()
data := rtest.Random(23, 300*1024*1024)
id := restic.Hash(data)
h := restic.Handle{Name: id.String(), Type: restic.DataFile}
t.Logf("hash of %d bytes: %v", len(data), id)
err = be.Save(ctx, h, restic.NewByteReader(data))
if err != nil {
t.Fatal(err)
}
defer func() {
err := be.Remove(ctx, h)
if err != nil {
t.Fatal(err)
}
}()
var tests = []struct {
offset, length int
}{
{0, len(data)},
{23, 1024},
{23 + 100*1024, 500},
{888 + 200*1024, 89999},
{888 + 100*1024*1024, 120 * 1024 * 1024},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
want := data[test.offset : test.offset+test.length]
buf := make([]byte, test.length)
err = be.Load(ctx, h, test.length, int64(test.offset), func(rd io.Reader) error {
_, err = io.ReadFull(rd, buf)
return err
})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, want) {
t.Fatalf("wrong bytes returned")
}
})
}
}

View File

@@ -5,7 +5,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to connect to an azure compatible

View File

@@ -295,10 +295,6 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic
return ctx.Err()
}
if ctx.Err() != nil {
return ctx.Err()
}
attrs, err := obj.Attrs(ctx)
if err != nil {
return err

View File

@@ -6,7 +6,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to connect to an b2 compatible

View File

@@ -5,7 +5,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to connect to a Google Cloud Storage

View File

@@ -4,7 +4,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config holds all information needed to open a local repository.

View File

@@ -5,6 +5,7 @@ import (
"context"
"crypto/tls"
"fmt"
"io"
"math/rand"
"net"
"net/http"
@@ -18,6 +19,7 @@ import (
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/limiter"
"golang.org/x/net/context/ctxhttp"
"golang.org/x/net/http2"
)
@@ -81,8 +83,38 @@ func run(command string, args ...string) (*StdioConn, *exec.Cmd, *sync.WaitGroup
return c, cmd, &wg, bg, nil
}
// wrappedConn adds bandwidth limiting capabilities to the StdioConn by
// wrapping the Read/Write methods.
type wrappedConn struct {
*StdioConn
io.Reader
io.Writer
}
func (c wrappedConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
func (c wrappedConn) Write(p []byte) (int, error) {
return c.Writer.Write(p)
}
func wrapConn(c *StdioConn, lim limiter.Limiter) wrappedConn {
wc := wrappedConn{
StdioConn: c,
Reader: c,
Writer: c,
}
if lim != nil {
wc.Reader = lim.Downstream(c)
wc.Writer = lim.UpstreamWriter(c)
}
return wc
}
// New initializes a Backend and starts the process.
func New(cfg Config) (*Backend, error) {
func New(cfg Config, lim limiter.Limiter) (*Backend, error) {
var (
args []string
err error
@@ -118,11 +150,16 @@ func New(cfg Config) (*Backend, error) {
arg0, args := args[0], args[1:]
debug.Log("running command: %v %v", arg0, args)
conn, cmd, wg, bg, err := run(arg0, args...)
stdioConn, cmd, wg, bg, err := run(arg0, args...)
if err != nil {
return nil, err
}
var conn net.Conn = stdioConn
if lim != nil {
conn = wrapConn(stdioConn, lim)
}
dialCount := 0
tr := &http2.Transport{
AllowHTTP: true, // this is not really HTTP, just stdin/stdout
@@ -141,7 +178,7 @@ func New(cfg Config) (*Backend, error) {
tr: tr,
cmd: cmd,
waitCh: waitCh,
conn: conn,
conn: stdioConn,
wg: wg,
}
@@ -202,8 +239,8 @@ func New(cfg Config) (*Backend, error) {
}
// Open starts an rclone process with the given config.
func Open(cfg Config) (*Backend, error) {
be, err := New(cfg)
func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
be, err := New(cfg, lim)
if err != nil {
return nil, err
}
@@ -229,7 +266,7 @@ func Open(cfg Config) (*Backend, error) {
// Create initializes a new restic repo with clone.
func Create(cfg Config) (*Backend, error) {
be, err := New(cfg)
be, err := New(cfg, nil)
if err != nil {
return nil, err
}

View File

@@ -39,7 +39,7 @@ func newTestSuite(t testing.TB) *test.Suite {
Open: func(config interface{}) (restic.Backend, error) {
t.Logf("Open()")
cfg := config.(rclone.Config)
return rclone.Open(cfg)
return rclone.Open(cfg, nil)
},
// CleanupFn removes data created during the tests.

View File

@@ -4,7 +4,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to start rclone.

View File

@@ -5,7 +5,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to connect to a REST server.

View File

@@ -6,7 +6,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains all configuration necessary to connect to an s3 compatible

View File

@@ -188,6 +188,10 @@ func (be *Backend) ReadDir(dir string) (list []os.FileInfo, err error) {
defer close(done)
for obj := range be.client.ListObjects(be.cfg.Bucket, dir, false, done) {
if obj.Err != nil {
return nil, err
}
if obj.Key == "" {
continue
}
@@ -424,6 +428,10 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
listresp := be.client.ListObjects(be.cfg.Bucket, prefix, recursive, ctx.Done())
for obj := range listresp {
if obj.Err != nil {
return obj.Err
}
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue

View File

@@ -2,8 +2,9 @@ package backend
import (
"context"
"github.com/restic/restic/internal/errors"
"io"
"github.com/restic/restic/internal/errors"
)
// Semaphore limits access to a restricted resource.

View File

@@ -6,7 +6,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config collects all information required to connect to an sftp server.

View File

@@ -75,7 +75,9 @@ func startClient(program string, args ...string) (*SFTP, error) {
go func() {
err := cmd.Wait()
debug.Log("ssh command exited, err %v", err)
ch <- errors.Wrap(err, "cmd.Wait")
for {
ch <- errors.Wrap(err, "ssh command exited")
}
}()
// open the SFTP session

View File

@@ -5,7 +5,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/ui/options"
)
// Config contains basic configuration needed to specify swift location for a swift server

View File

@@ -91,14 +91,6 @@ var autoCacheFiles = map[restic.FileType]bool{
func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
finish := make(chan struct{})
defer func() {
close(finish)
// remove the finish channel from the map
b.inProgressMutex.Lock()
delete(b.inProgress, h)
b.inProgressMutex.Unlock()
}()
b.inProgressMutex.Lock()
other, alreadyDownloading := b.inProgress[h]
@@ -120,10 +112,17 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
if err != nil {
// try to remove from the cache, ignore errors
_ = b.Cache.Remove(h)
return err
}
return nil
// signal other waiting goroutines that the file may now be cached
close(finish)
// remove the finish channel from the map
b.inProgressMutex.Lock()
delete(b.inProgress, h)
b.inProgressMutex.Unlock()
return err
}
// loadFromCacheOrDelegate will try to load the file from the cache, and fall
@@ -131,12 +130,13 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
func (b *Backend) loadFromCacheOrDelegate(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
rd, err := b.Cache.Load(h, length, offset)
if err != nil {
debug.Log("error caching %v: %v, falling back to backend", h, err)
return b.Backend.Load(ctx, h, length, offset, consumer)
}
err = consumer(rd)
if err != nil {
rd.Close() // ignore secondary errors
_ = rd.Close() // ignore secondary errors
return err
}
return rd.Close()
@@ -193,19 +193,8 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
debug.Log("auto-store %v in the cache", h)
err := b.cacheFile(ctx, h)
if err == nil {
// load the cached version
rd, err := b.Cache.Load(h, 0, 0)
if err != nil {
return err
}
err = consumer(rd)
if err != nil {
rd.Close() // ignore secondary errors
return err
}
return rd.Close()
return b.loadFromCacheOrDelegate(ctx, h, length, offset, consumer)
}
debug.Log("error caching %v: %v, falling back to backend", h, err)

View File

@@ -3,9 +3,13 @@ package cache
import (
"bytes"
"context"
"io"
"math/rand"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/restic"
@@ -112,3 +116,59 @@ func TestBackend(t *testing.T) {
t.Errorf("removed file still in cache after stat")
}
}
type loadErrorBackend struct {
restic.Backend
loadError error
}
func (be loadErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
time.Sleep(10 * time.Millisecond)
return be.loadError
}
func TestErrorBackend(t *testing.T) {
be := mem.New()
c, cleanup := TestNewCache(t)
defer cleanup()
h, data := randomData(5234142)
// save directly in backend
save(t, be, h, data)
testErr := errors.New("test error")
errBackend := loadErrorBackend{
Backend: be,
loadError: testErr,
}
loadTest := func(wg *sync.WaitGroup, be restic.Backend) {
defer wg.Done()
buf, err := backend.LoadAll(context.TODO(), be, h)
if err == testErr {
return
}
if err != nil {
t.Error(err)
return
}
if !bytes.Equal(buf, data) {
t.Errorf("data does not match")
}
time.Sleep(time.Millisecond)
}
wrappedBE := c.Wrap(errBackend)
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go loadTest(&wg, wrappedBE)
}
wg.Wait()
}

View File

@@ -83,6 +83,12 @@ func childMatch(patterns, strs []string) (matched bool, err error) {
return true, nil
}
ok, pos := hasDoubleWildcard(patterns)
if ok && len(strs) >= pos {
// cut off at the double wildcard
strs = strs[:pos]
}
// match path against absolute pattern prefix
l := 0
if len(strs) > len(patterns) {

View File

@@ -83,6 +83,8 @@ var matchTests = []struct {
{"foo/**/bar/*.go", "bar/main.go", false},
{"foo/**/bar", "/home/user/foo/x/y/bar", true},
{"foo/**/bar", "/home/user/foo/x/y/bar/main.go", true},
{"foo/**/bar/**/x", "/home/user/foo/bar/x", true},
{"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true},
{"user/**/important*", "/home/user/work/x/y/hidden/x", false},
{"user/**/hidden*/**/c", "/home/user/work/x/y/hidden/z/a/b/c", true},
{"c:/foo/*test.*", "c:/foo/bar/test.go", false},
@@ -107,20 +109,28 @@ func testpattern(t *testing.T, pattern, path string, shouldMatch bool) {
func TestMatch(t *testing.T) {
for _, test := range matchTests {
testpattern(t, test.pattern, test.path, test.match)
t.Run("", func(t *testing.T) {
testpattern(t, test.pattern, test.path, test.match)
})
// Test with native path separator
if filepath.Separator != '/' {
// Test with pattern as native
pattern := strings.Replace(test.pattern, "/", string(filepath.Separator), -1)
testpattern(t, pattern, test.path, test.match)
// Test with pattern as native
t.Run("pattern-native", func(t *testing.T) {
testpattern(t, pattern, test.path, test.match)
})
// Test with path as native
path := strings.Replace(test.path, "/", string(filepath.Separator), -1)
testpattern(t, test.pattern, path, test.match)
t.Run("path-native", func(t *testing.T) {
// Test with path as native
testpattern(t, test.pattern, path, test.match)
})
// Test with both pattern and path as native
testpattern(t, pattern, path, test.match)
t.Run("both-native", func(t *testing.T) {
// Test with both pattern and path as native
testpattern(t, pattern, path, test.match)
})
}
}
}
@@ -147,6 +157,16 @@ var childMatchTests = []struct {
{"/foo/**/baz", "/foo/bar/baz", true},
{"/foo/**/baz", "/foo/bar/baz/blah", true},
{"/foo/**/qux", "/foo/bar/baz/qux", true},
{"/foo/**/qux", "/foo/bar/baz", true},
{"/foo/**/qux", "/foo/bar/baz/boo", true},
{"/foo/**", "/foo/bar/baz", true},
{"/foo/**", "/foo/bar", true},
{"foo/**/bar/**/x", "/home/user/foo", true},
{"foo/**/bar/**/x", "/home/user/foo/bar", true},
{"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true},
{"/foo/*/qux", "/foo/bar", true},
{"/foo/*/qux", "/foo/bar/boo", false},
{"/foo/*/qux", "/foo/bar/boo/xx", false},
{"/baz/bar", "/foo", false},
{"/foo", "/foo/bar", true},
{"/*", "/foo", true},
@@ -179,20 +199,28 @@ func testchildpattern(t *testing.T, pattern, path string, shouldMatch bool) {
func TestChildMatch(t *testing.T) {
for _, test := range childMatchTests {
testchildpattern(t, test.pattern, test.path, test.match)
t.Run("", func(t *testing.T) {
testchildpattern(t, test.pattern, test.path, test.match)
})
// Test with native path separator
if filepath.Separator != '/' {
// Test with pattern as native
pattern := strings.Replace(test.pattern, "/", string(filepath.Separator), -1)
testchildpattern(t, pattern, test.path, test.match)
// Test with pattern as native
t.Run("pattern-native", func(t *testing.T) {
testchildpattern(t, pattern, test.path, test.match)
})
// Test with path as native
path := strings.Replace(test.path, "/", string(filepath.Separator), -1)
testchildpattern(t, test.pattern, path, test.match)
t.Run("path-native", func(t *testing.T) {
// Test with path as native
testchildpattern(t, test.pattern, path, test.match)
})
// Test with both pattern and path as native
testchildpattern(t, pattern, path, test.match)
t.Run("both-native", func(t *testing.T) {
// Test with both pattern and path as native
testchildpattern(t, pattern, path, test.match)
})
}
}
}

View File

@@ -12,6 +12,10 @@ type Limiter interface {
// uploads.
Upstream(r io.Reader) io.Reader
// UpstreamWriter returns a rate limited writer that is intended to be used
// in uploads.
UpstreamWriter(w io.Writer) io.Writer
// Downstream returns a rate limited reader that is intended to be used
// for downloads.
Downstream(r io.Reader) io.Reader

View File

@@ -35,11 +35,15 @@ func NewStaticLimiter(uploadKb, downloadKb int) Limiter {
}
func (l staticLimiter) Upstream(r io.Reader) io.Reader {
return l.limit(r, l.upstream)
return l.limitReader(r, l.upstream)
}
func (l staticLimiter) UpstreamWriter(w io.Writer) io.Writer {
return l.limitWriter(w, l.upstream)
}
func (l staticLimiter) Downstream(r io.Reader) io.Reader {
return l.limit(r, l.downstream)
return l.limitReader(r, l.downstream)
}
type roundTripper func(*http.Request) (*http.Response, error)
@@ -75,13 +79,20 @@ func (l staticLimiter) Transport(rt http.RoundTripper) http.RoundTripper {
})
}
func (l staticLimiter) limit(r io.Reader, b *ratelimit.Bucket) io.Reader {
func (l staticLimiter) limitReader(r io.Reader, b *ratelimit.Bucket) io.Reader {
if b == nil {
return r
}
return ratelimit.Reader(r, b)
}
func (l staticLimiter) limitWriter(w io.Writer, b *ratelimit.Bucket) io.Writer {
if b == nil {
return w
}
return ratelimit.Writer(w, b)
}
func toByteRate(val int) float64 {
return float64(val) * 1024.
}

View File

@@ -40,7 +40,7 @@ type Backup struct {
processedCh chan counter
errCh chan struct{}
workerCh chan fileWorkerMessage
clearStatus chan struct{}
finished chan struct{}
summary struct {
sync.Mutex
@@ -69,7 +69,7 @@ func NewBackup(term *termstatus.Terminal, verbosity uint) *Backup {
processedCh: make(chan counter),
errCh: make(chan struct{}),
workerCh: make(chan fileWorkerMessage),
clearStatus: make(chan struct{}),
finished: make(chan struct{}),
}
}
@@ -92,7 +92,7 @@ func (b *Backup) Run(ctx context.Context) error {
select {
case <-ctx.Done():
return nil
case <-b.clearStatus:
case <-b.finished:
started = false
b.term.SetStatus([]string{""})
case t, ok := <-b.totalCh:
@@ -333,7 +333,10 @@ func (b *Backup) CompleteItemFn(item string, previous, current *restic.Node, s a
// ReportTotal sets the total stats up to now
func (b *Backup) ReportTotal(item string, s archiver.ScanStats) {
b.totalCh <- counter{Files: s.Files, Dirs: s.Dirs, Bytes: s.Bytes}
select {
case b.totalCh <- counter{Files: s.Files, Dirs: s.Dirs, Bytes: s.Bytes}:
case <-b.finished:
}
if item == "" {
b.V("scan finished in %.3fs: %v files, %s",
@@ -347,7 +350,7 @@ func (b *Backup) ReportTotal(item string, s archiver.ScanStats) {
// Finish prints the finishing messages.
func (b *Backup) Finish() {
b.clearStatus <- struct{}{}
close(b.finished)
b.P("\n")
b.P("Files: %5d new, %5d changed, %5d unmodified\n", b.summary.Files.New, b.summary.Files.Changed, b.summary.Files.Unchanged)

View File

@@ -0,0 +1,365 @@
package config
import (
"fmt"
"io/ioutil"
"reflect"
"strings"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/spf13/pflag"
)
// Config contains configuration items read from a file.
type Config struct {
Repo string `hcl:"repo" flag:"repo" env:"RESTIC_REPOSITORY"`
Password string `hcl:"password" env:"RESTIC_PASSWORD"`
PasswordFile string `hcl:"password_file" flag:"password-file" env:"RESTIC_PASSWORD_FILE"`
Backends map[string]Backend
Backup Backup `hcl:"backup"`
}
// Backend configures a backend.
type Backend struct {
Type string `hcl:"type"`
*BackendLocal `hcl:"-" json:"local"`
*BackendSFTP `hcl:"-" json:"sftp"`
}
// BackendLocal configures a local backend.
type BackendLocal struct {
Type string `hcl:"type"`
Path string `hcl:"path"`
}
// BackendSFTP configures an sftp backend.
type BackendSFTP struct {
Type string `hcl:"type"`
User string `hcl:"user"`
Host string `hcl:"host"`
Path string `hcl:"path"`
}
// Backup sets the options for the "backup" command.
type Backup struct {
Target []string `hcl:"target"`
Excludes []string `hcl:"exclude" flag:"exclude"`
}
// listTags returns the all the top-level tags with the name tagname of obj.
func listTags(obj interface{}, tagname string) map[string]struct{} {
list := make(map[string]struct{})
// resolve indirection if obj is a pointer
v := reflect.Indirect(reflect.ValueOf(obj))
for i := 0; i < v.NumField(); i++ {
f := v.Type().Field(i)
val := f.Tag.Get(tagname)
list[val] = struct{}{}
}
return list
}
func validateObjects(list *ast.ObjectList, validNames map[string]struct{}) error {
for _, item := range list.Items {
ident := item.Keys[0].Token.Value().(string)
if _, ok := validNames[ident]; !ok {
return errors.Errorf("unknown option %q found at line %v, column %v",
ident, item.Pos().Line, item.Pos().Column)
}
}
return nil
}
// Parse parses a config file from buf.
func Parse(buf []byte) (cfg Config, err error) {
parsed, err := hcl.ParseBytes(buf)
if err != nil {
return Config{}, err
}
err = hcl.DecodeObject(&cfg, parsed)
if err != nil {
return Config{}, err
}
root := parsed.Node.(*ast.ObjectList)
// load all 'backend' sections
cfg.Backends, err = parseBackends(root)
if err != nil {
return Config{}, err
}
// check for additional unknown items
rootTags := listTags(cfg, "hcl")
rootTags["backend"] = struct{}{}
checks := map[string]map[string]struct{}{
"": rootTags,
"backup": listTags(Backup{}, "hcl"),
}
for name, valid := range checks {
list := root
if name != "" {
if len(root.Filter(name).Items) == 0 {
continue
}
val := root.Filter(name).Items[0].Val
obj, ok := val.(*ast.ObjectType)
if !ok {
return Config{}, errors.Errorf("error in line %v, column %v: %q must be an object", val.Pos().Line, val.Pos().Column, name)
}
list = obj.List
}
err = validateObjects(list, valid)
if err != nil {
return Config{}, err
}
}
return cfg, nil
}
// parseBackends parses the backend configuration sections.
func parseBackends(root *ast.ObjectList) (map[string]Backend, error) {
backends := make(map[string]Backend)
// find top-level backend objects
for _, obj := range root.Items {
// is not an object block
if len(obj.Keys) == 0 {
continue
}
// does not start with an an identifier
if obj.Keys[0].Token.Type != token.IDENT {
continue
}
// something other than a backend section
if s, ok := obj.Keys[0].Token.Value().(string); !ok || s != "backend" {
continue
}
// missing name
if len(obj.Keys) != 2 {
return nil, errors.Errorf("backend has no name at line %v, column %v",
obj.Pos().Line, obj.Pos().Column)
}
// check that the name is not empty
name := obj.Keys[1].Token.Value().(string)
if len(name) == 0 {
return nil, errors.Errorf("backend name is empty at line %v, column %v",
obj.Pos().Line, obj.Pos().Column)
}
// decode object
var be Backend
err := hcl.DecodeObject(&be, obj)
if err != nil {
return nil, err
}
if be.Type == "" {
be.Type = "local"
}
var target interface{}
switch be.Type {
case "local":
be.BackendLocal = &BackendLocal{}
target = be.BackendLocal
case "sftp":
be.BackendSFTP = &BackendSFTP{}
target = be.BackendSFTP
default:
return nil, errors.Errorf("unknown backend type %q at line %v, column %v",
be.Type, obj.Pos().Line, obj.Pos().Column)
}
// check structure of the backend object
innerBlock, ok := obj.Val.(*ast.ObjectType)
if !ok {
return nil, errors.Errorf("unable to verify structure of backend %q at line %v, column %v",
name, obj.Pos().Line, obj.Pos().Column)
}
// check allowed types
err = validateObjects(innerBlock.List, listTags(target, "hcl"))
if err != nil {
return nil, err
}
err = hcl.DecodeObject(target, innerBlock)
if err != nil {
return nil, errors.Errorf("parsing backend %q (type %s) at line %v, column %v failed: %v",
name, be.Type, obj.Pos().Line, obj.Pos().Column, err)
}
if _, ok := backends[name]; ok {
return nil, errors.Errorf("backend %q at line %v, column %v already configured",
name, obj.Pos().Line, obj.Pos().Column)
}
backends[name] = be
}
return backends, nil
}
// Load loads a config from a file.
func Load(filename string) (Config, error) {
buf, err := ioutil.ReadFile(filename)
if err != nil {
return Config{}, err
}
return Parse(buf)
}
func getFieldsForTag(tagname string, target interface{}) map[string]reflect.Value {
v := reflect.ValueOf(target).Elem()
// resolve indirection
vi := reflect.Indirect(reflect.ValueOf(target))
attr := make(map[string]reflect.Value)
for i := 0; i < vi.NumField(); i++ {
typeField := vi.Type().Field(i)
tag := typeField.Tag.Get(tagname)
if tag == "" {
continue
}
field := v.FieldByName(typeField.Name)
if !field.CanSet() {
continue
}
attr[tag] = field
}
return attr
}
// ApplyFlags takes the values from the flag set and applies them to cfg.
func ApplyFlags(cfg interface{}, fset *pflag.FlagSet) error {
if reflect.TypeOf(cfg).Kind() != reflect.Ptr {
panic("target config is not a pointer")
}
debug.Log("apply flags")
attr := getFieldsForTag("flag", cfg)
var visitError error
fset.VisitAll(func(flag *pflag.Flag) {
if visitError != nil {
return
}
field, ok := attr[flag.Name]
if !ok {
return
}
if !flag.Changed {
return
}
debug.Log("apply flag %v, to field %v\n", flag.Name, field.Type().Name())
switch flag.Value.Type() {
case "count":
v, err := fset.GetCount(flag.Name)
if err != nil {
visitError = err
return
}
field.SetUint(uint64(v))
case "bool":
v, err := fset.GetBool(flag.Name)
if err != nil {
visitError = err
return
}
field.SetBool(v)
case "string":
v, err := fset.GetString(flag.Name)
if err != nil {
visitError = err
return
}
field.SetString(v)
case "stringArray":
v, err := fset.GetStringArray(flag.Name)
if err != nil {
visitError = err
return
}
slice := reflect.MakeSlice(reflect.TypeOf(v), len(v), len(v))
field.Set(slice)
for i, s := range v {
slice.Index(i).SetString(s)
}
default:
visitError = errors.Errorf("flag %v has unknown type %v", flag.Name, flag.Value.Type())
return
}
})
return visitError
}
// ApplyEnv takes the list of environment variables and applies them to the
// config.
func ApplyEnv(cfg interface{}, env []string) error {
attr := getFieldsForTag("env", cfg)
for _, s := range env {
data := strings.SplitN(s, "=", 2)
if len(data) != 2 {
continue
}
name, value := data[0], data[1]
field, ok := attr[name]
if !ok {
continue
}
if field.Kind() != reflect.String {
panic(fmt.Sprintf("unsupported field type %v", field.Kind()))
}
debug.Log("apply env %v (%q) to %v\n", name, value, field.Type().Name())
field.SetString(value)
}
return nil
}
// ApplyOptions takes a list of Options and applies them to the config.
func ApplyOptions(cfg interface{}, opts map[string]string) error {
return errors.New("not implemented")
}

View File

@@ -0,0 +1,250 @@
package config
import (
"encoding/json"
"flag"
"io/ioutil"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/spf13/pflag"
)
var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/")
func saveGoldenFile(t testing.TB, base string, cfg Config) {
buf, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
t.Fatalf("error marshaling result: %v", err)
}
buf = append(buf, '\n')
if err = ioutil.WriteFile(filepath.Join("testdata", base+".golden"), buf, 0644); err != nil {
t.Fatalf("unable to update golden file: %v", err)
}
}
func loadGoldenFile(t testing.TB, base string) Config {
buf, err := ioutil.ReadFile(filepath.Join("testdata", base+".golden"))
if err != nil {
t.Fatal(err)
}
var cfg Config
err = json.Unmarshal(buf, &cfg)
if err != nil {
t.Fatal(err)
}
return cfg
}
func TestConfigLoad(t *testing.T) {
entries, err := ioutil.ReadDir("testdata")
if err != nil {
t.Fatal(err)
}
for _, entry := range entries {
filename := entry.Name()
if filepath.Ext(filename) != ".conf" {
continue
}
base := strings.TrimSuffix(filename, ".conf")
t.Run(base, func(t *testing.T) {
cfg, err := Load(filepath.Join("testdata", filename))
if err != nil {
t.Fatal(err)
}
if *updateGoldenFiles {
saveGoldenFile(t, base, cfg)
}
want := loadGoldenFile(t, base)
if !cmp.Equal(want, cfg) {
t.Errorf("wrong config: %v", cmp.Diff(want, cfg))
}
})
}
}
func TestInvalidConfigs(t *testing.T) {
var tests = []struct {
config string
err string
}{
{
config: `backend ""`,
err: "expected start of object",
},
{
config: `backend "" {}`,
err: "name is empty",
},
{
config: `backend "foo" {
type = ""
user = "xxx"
}`,
err: `unknown option "user"`,
},
{
config: `backend "foo" {
type = "local"
user = "xxx"
}`,
err: `unknown option "user"`,
},
{
config: `backend "foo" {
path = "/foo"
}
backend "foo" {
path = "/bar"
}`,
err: `backend "foo" already configured`,
},
{
config: `backend "foo" {
type = "xxx"
}`,
err: `unknown backend type "xxx"`,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
_, err := Parse([]byte(test.config))
if err == nil {
t.Fatalf("expected error not found, got nil")
}
if !strings.Contains(err.Error(), test.err) {
t.Fatalf("returned error does not contain substring %q: %q", test.err, err.Error())
}
})
}
}
func TestConfigApplyFlags(t *testing.T) {
var tests = []struct {
filename string
applyFlags func(cfg *Config) error
want Config
}{
{
filename: "backup.conf",
applyFlags: func(cfg *Config) error {
args := []string{"--exclude", "foo/*.go"}
s := pflag.NewFlagSet("", pflag.ContinueOnError)
s.StringArrayP("exclude", "e", nil, "exclude files")
err := s.Parse(args)
if err != nil {
return err
}
return ApplyFlags(&cfg.Backup, s)
},
want: Config{
Backup: Backup{
Target: []string{"foo", "/home/user"},
Excludes: []string{"foo/*.go"},
},
Backends: map[string]Backend{},
},
},
{
filename: "backup.conf",
applyFlags: func(cfg *Config) error {
args := []string{"--repo", "sftp:user@server:/srv/backup/repo"}
s := pflag.NewFlagSet("", pflag.ContinueOnError)
s.StringP("repo", "r", "", "repository to backup to or restore from")
err := s.Parse(args)
if err != nil {
return err
}
return ApplyFlags(cfg, s)
},
want: Config{
Backup: Backup{
Target: []string{"foo", "/home/user"},
},
Repo: "sftp:user@server:/srv/backup/repo",
Backends: map[string]Backend{},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
cfg, err := Load(filepath.Join("testdata", test.filename))
if err != nil {
t.Fatal(err)
}
err = test.applyFlags(&cfg)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(test.want, cfg) {
t.Error(cmp.Diff(test.want, cfg))
}
})
}
}
func TestConfigApplyEnv(t *testing.T) {
var tests = []struct {
filename string
env []string
want Config
}{
{
filename: "backup.conf",
env: []string{
"RESTIC_REPOSITORY=/tmp/repo",
"RESTIC_PASSWORD=foobar",
"RESTIC_PASSWORD_FILE=/root/secret.txt",
},
want: Config{
Password: "foobar",
PasswordFile: "/root/secret.txt",
Repo: "/tmp/repo",
Backup: Backup{
Target: []string{"foo", "/home/user"},
},
Backends: map[string]Backend{},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
cfg, err := Load(filepath.Join("testdata", test.filename))
if err != nil {
t.Fatal(err)
}
err = ApplyEnv(&cfg, test.env)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(test.want, cfg) {
t.Error(cmp.Diff(test.want, cfg))
}
})
}
}

28
internal/ui/config/testdata/all.conf vendored Normal file
View File

@@ -0,0 +1,28 @@
repo = "sftp:user@server:/srv/repo"
password = "secret"
password_file = "/root/secret.txt"
backup {
target = [
"/home/user/",
"/home/otheruser",
]
exclude = ["*.c"]
}
backend "local" {
type = "local"
path = "/foo/bar"
}
backend "local2" {
path = "/foo/bar"
}
backend "sftp" {
type = "sftp"
user = "foo"
host = "bar"
path = "/foo/bar"
}

42
internal/ui/config/testdata/all.golden vendored Normal file
View File

@@ -0,0 +1,42 @@
{
"Repo": "sftp:user@server:/srv/repo",
"Password": "secret",
"PasswordFile": "/root/secret.txt",
"Backends": {
"local": {
"Type": "local",
"local": {
"Type": "local",
"Path": "/foo/bar"
},
"sftp": null
},
"local2": {
"Type": "local",
"local": {
"Type": "",
"Path": "/foo/bar"
},
"sftp": null
},
"sftp": {
"Type": "sftp",
"local": null,
"sftp": {
"Type": "sftp",
"User": "foo",
"Host": "bar",
"Path": "/foo/bar"
}
}
},
"Backup": {
"Target": [
"/home/user/",
"/home/otheruser"
],
"Excludes": [
"*.c"
]
}
}

View File

@@ -0,0 +1,10 @@
password = "geheim"
backend "foo" {
type = "local"
path = "/srv/data/repo"
}
backend "bar" {
path = "/srv/data/repo"
}

View File

@@ -0,0 +1,27 @@
{
"Repo": "",
"Password": "geheim",
"PasswordFile": "",
"Backends": {
"bar": {
"Type": "local",
"local": {
"Type": "",
"Path": "/srv/data/repo"
},
"sftp": null
},
"foo": {
"Type": "local",
"local": {
"Type": "local",
"Path": "/srv/data/repo"
},
"sftp": null
}
},
"Backup": {
"Target": null,
"Excludes": null
}
}

View File

@@ -0,0 +1,6 @@
backup {
target = [
"foo",
"/home/user",
]
}

View File

@@ -0,0 +1,13 @@
{
"Repo": "",
"Password": "",
"PasswordFile": "",
"Backends": {},
"Backup": {
"Target": [
"foo",
"/home/user"
],
"Excludes": null
}
}

View File

@@ -0,0 +1,6 @@
backend "test" {
type = "local"
path = "/foo/bar/baz"
}
repo = "test"

View File

@@ -0,0 +1,19 @@
{
"Repo": "test",
"Password": "",
"PasswordFile": "",
"Backends": {
"test": {
"Type": "local",
"local": {
"Type": "local",
"Path": "/foo/bar/baz"
},
"sftp": null
}
},
"Backup": {
"Target": null,
"Excludes": null
}
}

View File

@@ -290,6 +290,20 @@ func (t *Terminal) Errorf(msg string, args ...interface{}) {
t.Error(s)
}
// truncate returns a string that has at most maxlen characters. If maxlen is
// negative, the empty string is returned.
func truncate(s string, maxlen int) string {
if maxlen < 0 {
return ""
}
if len(s) < maxlen {
return s
}
return s[:maxlen]
}
// SetStatus updates the status lines.
func (t *Terminal) SetStatus(lines []string) {
if len(lines) == 0 {
@@ -297,7 +311,7 @@ func (t *Terminal) SetStatus(lines []string) {
}
width, _, err := getTermSize(t.fd)
if err != nil || width < 0 {
if err != nil || width <= 0 {
// use 80 columns by default
width = 80
}
@@ -305,11 +319,7 @@ func (t *Terminal) SetStatus(lines []string) {
// make sure that all lines have a line break and are not too long
for i, line := range lines {
line = strings.TrimRight(line, "\n")
if len(line) >= width-2 {
line = line[:width-2]
}
line += "\n"
line = truncate(line, width-2) + "\n"
lines[i] = line
}

View File

@@ -0,0 +1,32 @@
package termstatus
import "testing"
func TestTruncate(t *testing.T) {
var tests = []struct {
input string
maxlen int
output string
}{
{"", 80, ""},
{"", 0, ""},
{"", -1, ""},
{"foo", 80, "foo"},
{"foo", 4, "foo"},
{"foo", 3, "foo"},
{"foo", 2, "fo"},
{"foo", 1, "f"},
{"foo", 0, ""},
{"foo", -1, ""},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
out := truncate(test.input, test.maxlen)
if out != test.output {
t.Fatalf("wrong output for input %v, maxlen %d: want %q, got %q",
test.input, test.maxlen, test.output, out)
}
})
}
}

View File

@@ -4,8 +4,8 @@ package termstatus
import (
"io"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
isatty "github.com/mattn/go-isatty"
)
@@ -30,10 +30,9 @@ func canUpdateStatus(fd uintptr) bool {
// getTermSize returns the dimensions of the given terminal.
// the code is taken from "golang.org/x/crypto/ssh/terminal"
func getTermSize(fd uintptr) (width, height int, err error) {
var dimensions [4]uint16
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
if err != nil {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
return int(ws.Col), int(ws.Row), nil
}

View File

@@ -0,0 +1 @@
package walker

138
internal/walker/walker.go Normal file
View File

@@ -0,0 +1,138 @@
package walker
import (
"context"
"path"
"sort"
"github.com/pkg/errors"
"github.com/restic/restic/internal/restic"
)
// TreeLoader loads a tree from a repository.
type TreeLoader interface {
LoadTree(context.Context, restic.ID) (*restic.Tree, error)
}
// SkipNode is returned by WalkFunc when a dir node should not be walked.
var SkipNode = errors.New("skip this node")
// WalkFunc is the type of the function called for each node visited by Walk.
// Path is the slash-separated path from the root node. If there was a problem
// loading a node, err is set to a non-nil error. WalkFunc can chose to ignore
// it by returning nil.
//
// When the special value SkipNode is returned and node is a dir node, it is
// not walked. When the node is not a dir node, the remaining items in this
// tree are skipped.
//
// Setting ignore to true tells Walk that it should not visit the node again.
// For tree nodes, this means that the function is not called for the
// referenced tree. If the node is not a tree, and all nodes in the current
// tree have ignore set to true, the current tree will not be visited again.
// When err is not nil and different from SkipNode, the value returned for
// ignore is ignored.
type WalkFunc func(path string, node *restic.Node, nodeErr error) (ignore bool, err error)
// Walk calls walkFn recursively for each node in root. If walkFn returns an
// error, it is passed up the call stack. The trees in ignoreTrees are not
// walked. If walkFn ignores trees, these are added to the set.
func Walk(ctx context.Context, repo TreeLoader, root restic.ID, ignoreTrees restic.IDSet, walkFn WalkFunc) error {
tree, err := repo.LoadTree(ctx, root)
_, err = walkFn("/", nil, err)
if err != nil {
if err == SkipNode {
err = nil
}
return err
}
if ignoreTrees == nil {
ignoreTrees = restic.NewIDSet()
}
_, err = walk(ctx, repo, "/", tree, ignoreTrees, walkFn)
return err
}
// walk recursively traverses the tree, ignoring subtrees when the ID of the
// subtree is in ignoreTrees. If err is nil and ignore is true, the subtree ID
// will be added to ignoreTrees by walk.
func walk(ctx context.Context, repo TreeLoader, prefix string, tree *restic.Tree, ignoreTrees restic.IDSet, walkFn WalkFunc) (ignore bool, err error) {
var allNodesIgnored = true
sort.Slice(tree.Nodes, func(i, j int) bool {
return tree.Nodes[i].Name < tree.Nodes[j].Name
})
for _, node := range tree.Nodes {
p := path.Join(prefix, node.Name)
if node.Type == "" {
return false, errors.Errorf("node type is empty for node %q", node.Name)
}
if node.Type != "dir" {
ignore, err := walkFn(p, node, nil)
if err != nil {
if err == SkipNode {
// skip the remaining entries in this tree
return allNodesIgnored, nil
}
return false, err
}
if ignore == false {
allNodesIgnored = false
}
continue
}
if node.Subtree == nil {
return false, errors.Errorf("subtree for node %v in tree %v is nil", node.Name, p)
}
if ignoreTrees.Has(*node.Subtree) {
continue
}
subtree, err := repo.LoadTree(ctx, *node.Subtree)
ignore, err := walkFn(p, node, err)
if err != nil {
if err == SkipNode {
if ignore {
ignoreTrees.Insert(*node.Subtree)
}
continue
}
return false, err
}
if ignore {
ignoreTrees.Insert(*node.Subtree)
}
if !ignore {
allNodesIgnored = false
}
ignore, err = walk(ctx, repo, p, subtree, ignoreTrees, walkFn)
if err != nil {
return false, err
}
if ignore {
ignoreTrees.Insert(*node.Subtree)
}
if !ignore {
allNodesIgnored = false
}
}
return allNodesIgnored, nil
}

View File

@@ -0,0 +1,423 @@
package walker
import (
"context"
"encoding/json"
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/restic/restic/internal/restic"
)
// TestTree is used to construct a list of trees for testing the walker.
type TestTree map[string]interface{}
// TestNode is used to test the walker.
type TestFile struct{}
func BuildTreeMap(tree TestTree) (m TreeMap, root restic.ID) {
m = TreeMap{}
id := buildTreeMap(tree, m)
return m, id
}
func buildTreeMap(tree TestTree, m TreeMap) restic.ID {
res := restic.NewTree()
for name, item := range tree {
switch elem := item.(type) {
case TestFile:
res.Insert(&restic.Node{
Name: name,
Type: "file",
})
case TestTree:
id := buildTreeMap(elem, m)
res.Insert(&restic.Node{
Name: name,
Subtree: &id,
Type: "dir",
})
default:
panic(fmt.Sprintf("invalid type %T", elem))
}
}
buf, err := json.Marshal(res)
if err != nil {
panic(err)
}
id := restic.Hash(buf)
if _, ok := m[id]; !ok {
m[id] = res
}
return id
}
// TreeMap returns the trees from the map on LoadTree.
type TreeMap map[restic.ID]*restic.Tree
func (t TreeMap) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) {
tree, ok := t[id]
if !ok {
return nil, errors.New("tree not found")
}
return tree, nil
}
// checkFunc returns a function suitable for walking the tree to check
// something, and a function which will check the final result.
type checkFunc func(t testing.TB) (walker WalkFunc, final func(testing.TB))
// checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'.
func checkItemOrder(want []string) checkFunc {
pos := 0
return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) {
walker = func(path string, node *restic.Node, err error) (bool, error) {
if err != nil {
t.Errorf("error walking %v: %v", path, err)
return false, err
}
if pos >= len(want) {
t.Errorf("additional unexpected path found: %v", path)
return false, nil
}
if path != want[pos] {
t.Errorf("wrong path found, want %q, got %q", want[pos], path)
}
pos++
return false, nil
}
final = func(t testing.TB) {
if pos != len(want) {
t.Errorf("not enough items returned, want %d, got %d", len(want), pos)
}
}
return walker, final
}
}
// checkSkipFor returns SkipNode if path is in skipFor, it checks that the
// paths the walk func is called for are exactly the ones in wantPaths.
func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc {
var pos int
return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) {
walker = func(path string, node *restic.Node, err error) (bool, error) {
if err != nil {
t.Errorf("error walking %v: %v", path, err)
return false, err
}
if pos >= len(wantPaths) {
t.Errorf("additional unexpected path found: %v", path)
return false, nil
}
if path != wantPaths[pos] {
t.Errorf("wrong path found, want %q, got %q", wantPaths[pos], path)
}
pos++
if _, ok := skipFor[path]; ok {
return false, SkipNode
}
return false, nil
}
final = func(t testing.TB) {
if pos != len(wantPaths) {
t.Errorf("wrong number of paths returned, want %d, got %d", len(wantPaths), pos)
}
}
return walker, final
}
}
// checkIgnore returns SkipNode if path is in skipFor and sets ignore according
// to ignoreFor. It checks that the paths the walk func is called for are exactly
// the ones in wantPaths.
func checkIgnore(skipFor map[string]struct{}, ignoreFor map[string]bool, wantPaths []string) checkFunc {
var pos int
return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) {
walker = func(path string, node *restic.Node, err error) (bool, error) {
if err != nil {
t.Errorf("error walking %v: %v", path, err)
return false, err
}
if pos >= len(wantPaths) {
t.Errorf("additional unexpected path found: %v", path)
return ignoreFor[path], nil
}
if path != wantPaths[pos] {
t.Errorf("wrong path found, want %q, got %q", wantPaths[pos], path)
}
pos++
if _, ok := skipFor[path]; ok {
return ignoreFor[path], SkipNode
}
return ignoreFor[path], nil
}
final = func(t testing.TB) {
if pos != len(wantPaths) {
t.Errorf("wrong number of paths returned, want %d, got %d", len(wantPaths), pos)
}
}
return walker, final
}
}
func TestWalker(t *testing.T) {
var tests = []struct {
tree TestTree
checks []checkFunc
}{
{
tree: TestTree{
"foo": TestFile{},
"subdir": TestTree{
"subfile": TestFile{},
},
},
checks: []checkFunc{
checkItemOrder([]string{
"/",
"/foo",
"/subdir",
"/subdir/subfile",
}),
checkSkipFor(
map[string]struct{}{
"/subdir": struct{}{},
}, []string{
"/",
"/foo",
"/subdir",
},
),
checkIgnore(
map[string]struct{}{}, map[string]bool{
"/subdir": true,
}, []string{
"/",
"/foo",
"/subdir",
"/subdir/subfile",
},
),
},
},
{
tree: TestTree{
"foo": TestFile{},
"subdir1": TestTree{
"subfile1": TestFile{},
},
"subdir2": TestTree{
"subfile2": TestFile{},
"subsubdir2": TestTree{
"subsubfile3": TestFile{},
},
},
},
checks: []checkFunc{
checkItemOrder([]string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir2",
"/subdir2/subfile2",
"/subdir2/subsubdir2",
"/subdir2/subsubdir2/subsubfile3",
}),
checkSkipFor(
map[string]struct{}{
"/subdir1": struct{}{},
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir2",
"/subdir2/subfile2",
"/subdir2/subsubdir2",
"/subdir2/subsubdir2/subsubfile3",
},
),
checkSkipFor(
map[string]struct{}{
"/subdir1": struct{}{},
"/subdir2/subsubdir2": struct{}{},
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir2",
"/subdir2/subfile2",
"/subdir2/subsubdir2",
},
),
checkSkipFor(
map[string]struct{}{
"/foo": struct{}{},
}, []string{
"/",
"/foo",
},
),
},
},
{
tree: TestTree{
"foo": TestFile{},
"subdir1": TestTree{
"subfile1": TestFile{},
"subfile2": TestFile{},
"subfile3": TestFile{},
},
"subdir2": TestTree{
"subfile1": TestFile{},
"subfile2": TestFile{},
"subfile3": TestFile{},
},
"subdir3": TestTree{
"subfile1": TestFile{},
"subfile2": TestFile{},
"subfile3": TestFile{},
},
"zzz other": TestFile{},
},
checks: []checkFunc{
checkItemOrder([]string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir1/subfile2",
"/subdir1/subfile3",
"/subdir2",
"/subdir2/subfile1",
"/subdir2/subfile2",
"/subdir2/subfile3",
"/subdir3",
"/subdir3/subfile1",
"/subdir3/subfile2",
"/subdir3/subfile3",
"/zzz other",
}),
checkIgnore(
map[string]struct{}{
"/subdir1": struct{}{},
}, map[string]bool{
"/subdir1": true,
}, []string{
"/",
"/foo",
"/subdir1",
"/zzz other",
},
),
checkIgnore(
map[string]struct{}{}, map[string]bool{
"/subdir1": true,
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir1/subfile2",
"/subdir1/subfile3",
"/zzz other",
},
),
checkIgnore(
map[string]struct{}{
"/subdir2": struct{}{},
}, map[string]bool{
"/subdir2": true,
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir1/subfile2",
"/subdir1/subfile3",
"/subdir2",
"/zzz other",
},
),
checkIgnore(
map[string]struct{}{}, map[string]bool{
"/subdir1/subfile1": true,
"/subdir1/subfile2": true,
"/subdir1/subfile3": true,
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir1/subfile2",
"/subdir1/subfile3",
"/zzz other",
},
),
checkIgnore(
map[string]struct{}{}, map[string]bool{
"/subdir2/subfile1": true,
"/subdir2/subfile2": true,
"/subdir2/subfile3": true,
}, []string{
"/",
"/foo",
"/subdir1",
"/subdir1/subfile1",
"/subdir1/subfile2",
"/subdir1/subfile3",
"/subdir2",
"/subdir2/subfile1",
"/subdir2/subfile2",
"/subdir2/subfile3",
"/zzz other",
},
),
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
repo, root := BuildTreeMap(test.tree)
for _, check := range test.checks {
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
fn, last := check(t)
err := Walk(ctx, repo, root, restic.NewIDSet(), fn)
if err != nil {
t.Error(err)
}
last(t)
})
}
})
}
}

View File

@@ -136,6 +136,12 @@ func (env *TravisEnvironment) Prepare() error {
"openbsd/386", "openbsd/amd64",
"linux/arm", "freebsd/arm",
}
if os.Getenv("RESTIC_BUILD_SOLARIS") == "0" {
msg("Skipping Solaris build\n")
} else {
env.goxOSArch = append(env.goxOSArch, "solaris/amd64")
}
} else {
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
}

View File

@@ -97,20 +97,11 @@ func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, ds
```go
func printObjects(ctx context.Context, bucket *b2.Bucket) error {
var cur *b2.Cursor
for {
objs, c, err := bucket.ListObjects(ctx, 1000, cur)
if err != nil && err != io.EOF {
return err
}
for _, obj := range objs {
fmt.Println(obj)
}
if err == io.EOF {
return
}
cur = c
iterator := bucket.List(ctx)
for iterator.Next() {
fmt.Println(itrator.Object())
}
return iterator.Err()
}
```

View File

@@ -501,7 +501,7 @@ const (
Hider
// Folder is a special state given to non-objects that are returned during a
// List*Objects call with a non-empty Delimiter.
// List call with a ListDelimiter option.
Folder
)
@@ -574,6 +574,8 @@ func (o *Object) Delete(ctx context.Context) error {
}
// Cursor is passed to ListObjects to return subsequent pages.
//
// DEPRECATED. Will be removed in a future release.
type Cursor struct {
// Prefix limits the listed objects to those that begin with this string.
Prefix string
@@ -602,6 +604,8 @@ type Cursor struct {
//
// ListObjects will return io.EOF when there are no objects left in the bucket,
// however it may do so concurrently with the last objects.
//
// DEPRECATED. Will be removed in a future release.
func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
if c == nil {
c = &Cursor{}
@@ -636,6 +640,8 @@ func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Obje
// ListCurrentObjects is similar to ListObjects, except that it returns only
// current, unhidden objects in the bucket.
//
// DEPRECATED. Will be removed in a future release.
func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
if c == nil {
c = &Cursor{}
@@ -669,6 +675,8 @@ func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) (
// ListUnfinishedLargeFiles lists any objects that correspond to large file uploads that haven't been completed.
// This can happen for example when an upload is interrupted.
//
// DEPRECATED. Will be removed in a future release.
func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
if c == nil {
c = &Cursor{}

View File

@@ -64,21 +64,14 @@ func TestReadWriteLive(t *testing.T) {
t.Error(err)
}
var cur *Cursor
for {
objs, c, err := bucket.ListObjects(ctx, 100, cur)
if err != nil && err != io.EOF {
t.Fatal(err)
iter := bucket.List(ctx, ListHidden())
for iter.Next() {
if err := iter.Object().Delete(ctx); err != nil {
t.Error(err)
}
for _, o := range objs {
if err := o.Delete(ctx); err != nil {
t.Error(err)
}
}
if err == io.EOF {
break
}
cur = c
}
if err := iter.Err(); err != nil {
t.Error(err)
}
}
@@ -175,7 +168,7 @@ func TestHideShowLive(t *testing.T) {
t.Fatal(err)
}
got, err := countObjects(ctx, bucket.ListCurrentObjects)
got, err := countObjects(bucket.List(ctx))
if err != nil {
t.Error(err)
}
@@ -193,7 +186,7 @@ func TestHideShowLive(t *testing.T) {
t.Fatal(err)
}
got, err = countObjects(ctx, bucket.ListCurrentObjects)
got, err = countObjects(bucket.List(ctx))
if err != nil {
t.Error(err)
}
@@ -207,7 +200,7 @@ func TestHideShowLive(t *testing.T) {
}
// count see the object again
got, err = countObjects(ctx, bucket.ListCurrentObjects)
got, err = countObjects(bucket.List(ctx))
if err != nil {
t.Error(err)
}
@@ -542,33 +535,37 @@ func TestListObjectsWithPrefix(t *testing.T) {
t.Fatal(err)
}
// This is kind of a hack, but
type lfun func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)
table := []struct {
opts []ListOption
}{
{
opts: []ListOption{
ListPrefix("baz/"),
},
},
{
opts: []ListOption{
ListPrefix("baz/"),
ListHidden(),
},
},
}
for _, f := range []lfun{bucket.ListObjects, bucket.ListCurrentObjects} {
c := &Cursor{
Prefix: "baz/",
}
for _, entry := range table {
iter := bucket.List(ctx, entry.opts...)
var res []string
for {
objs, cur, err := f(ctx, 10, c)
if err != nil && err != io.EOF {
t.Fatalf("bucket.ListObjects: %v", err)
for iter.Next() {
o := iter.Object()
attrs, err := o.Attrs(ctx)
if err != nil {
t.Errorf("(%v).Attrs: %v", o, err)
continue
}
for _, o := range objs {
attrs, err := o.Attrs(ctx)
if err != nil {
t.Errorf("(%v).Attrs: %v", o, err)
continue
}
res = append(res, attrs.Name)
}
if err == io.EOF {
break
}
c = cur
res = append(res, attrs.Name)
}
if iter.Err() != nil {
t.Errorf("iter.Err(): %v", iter.Err())
}
want := []string{"baz/bar"}
if !reflect.DeepEqual(res, want) {
t.Errorf("got %v, want %v", res, want)
@@ -746,19 +743,15 @@ func TestAttrsNoRoundtrip(t *testing.T) {
t.Fatal(err)
}
objs, _, err := bucket.ListObjects(ctx, 1, nil)
if err != nil {
t.Fatal(err)
}
if len(objs) != 1 {
t.Fatalf("unexpected objects: got %d, want 1", len(objs))
}
iter := bucket.List(ctx)
iter.Next()
obj := iter.Object()
var trips int
for range bucket.c.Status().table()["1m"] {
trips += 1
trips++
}
attrs, err := objs[0].Attrs(ctx)
attrs, err := obj.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
@@ -768,7 +761,7 @@ func TestAttrsNoRoundtrip(t *testing.T) {
var newTrips int
for range bucket.c.Status().table()["1m"] {
newTrips += 1
newTrips++
}
if trips != newTrips {
t.Errorf("Attrs() should not have caused any net traffic, but it did: old %d, new %d", trips, newTrips)
@@ -859,13 +852,9 @@ func TestListUnfinishedLargeFiles(t *testing.T) {
if _, err := io.Copy(w, io.LimitReader(zReader{}, 1e6)); err != nil {
t.Fatal(err)
}
// Don't close the writer.
fs, _, err := bucket.ListUnfinishedLargeFiles(ctx, 10, nil)
if err != io.EOF && err != nil {
t.Fatal(err)
}
if len(fs) != 1 {
t.Errorf("ListUnfinishedLargeFiles: got %d, want 1", len(fs))
iter := bucket.List(ctx, ListUnfinished())
if !iter.Next() {
t.Errorf("ListUnfinishedLargeFiles: got none, want 1 (error %v)", iter.Err())
}
}
@@ -905,39 +894,12 @@ type object struct {
err error
}
func countObjects(ctx context.Context, f func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)) (int, error) {
func countObjects(iter *ObjectIterator) (int, error) {
var got int
ch := listObjects(ctx, f)
for c := range ch {
if c.err != nil {
return 0, c.err
}
for iter.Next() {
got++
}
return got, nil
}
func listObjects(ctx context.Context, f func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)) <-chan object {
ch := make(chan object)
go func() {
defer close(ch)
var cur *Cursor
for {
objs, c, err := f(ctx, 100, cur)
if err != nil && err != io.EOF {
ch <- object{err: err}
return
}
for _, o := range objs {
ch <- object{o: o}
}
if err == io.EOF {
return
}
cur = c
}
}()
return ch
return got, iter.Err()
}
var defaultTransport = http.DefaultTransport
@@ -1042,14 +1004,15 @@ func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) {
}
f := func() {
defer ccport.done()
for c := range listObjects(ctx, bucket.ListObjects) {
if c.err != nil {
continue
}
if err := c.o.Delete(ctx); err != nil {
iter := bucket.List(ctx, ListHidden())
for iter.Next() {
if err := iter.Object().Delete(ctx); err != nil {
t.Error(err)
}
}
if err := iter.Err(); err != nil && !IsNotExist(err) {
t.Errorf("%#v", err)
}
if err := bucket.Delete(ctx); err != nil && !IsNotExist(err) {
t.Error(err)
}

217
vendor/github.com/kurin/blazer/b2/iterator.go generated vendored Normal file
View File

@@ -0,0 +1,217 @@
// Copyright 2018, Google
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"io"
"sync"
)
// List returns an iterator for selecting objects in a bucket. The default
// behavior, with no options, is to list all currently un-hidden objects.
func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator {
o := &ObjectIterator{
bucket: b,
ctx: ctx,
}
for _, opt := range opts {
opt(&o.opts)
}
return o
}
// ObjectIterator abtracts away the tricky bits of iterating over a bucket's
// contents.
//
// It is intended to be called in a loop:
// for iter.Next() {
// obj := iter.Object()
// // act on obj
// }
// if err := iter.Err(); err != nil {
// // handle err
// }
type ObjectIterator struct {
bucket *Bucket
ctx context.Context
final bool
err error
idx int
c *Cursor
opts objectIteratorOptions
objs []*Object
init sync.Once
l lister
count int
}
type lister func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)
func (o *ObjectIterator) page(ctx context.Context) error {
if o.opts.locker != nil {
o.opts.locker.Lock()
defer o.opts.locker.Unlock()
}
objs, c, err := o.l(ctx, o.count, o.c)
if err != nil && err != io.EOF {
if bNotExist.MatchString(err.Error()) {
return b2err{
err: err,
notFoundErr: true,
}
}
return err
}
o.c = c
o.objs = objs
o.idx = 0
if err == io.EOF {
o.final = true
}
return nil
}
// Next advances the iterator to the next object. It should be called before
// any calls to Object(). If Next returns true, then the next call to Object()
// will be valid. Once Next returns false, it is important to check the return
// value of Err().
func (o *ObjectIterator) Next() bool {
o.init.Do(func() {
o.count = o.opts.pageSize
if o.count < 0 || o.count > 1000 {
o.count = 1000
}
switch {
case o.opts.unfinished:
o.l = o.bucket.ListUnfinishedLargeFiles
if o.count > 100 {
o.count = 100
}
case o.opts.hidden:
o.l = o.bucket.ListObjects
default:
o.l = o.bucket.ListCurrentObjects
}
o.c = &Cursor{
Prefix: o.opts.prefix,
Delimiter: o.opts.delimiter,
}
})
if o.err != nil {
return false
}
if o.ctx.Err() != nil {
o.err = o.ctx.Err()
return false
}
if o.idx >= len(o.objs) {
if o.final {
o.err = io.EOF
return false
}
if err := o.page(o.ctx); err != nil {
o.err = err
return false
}
return o.Next()
}
o.idx++
return true
}
// Object returns the current object.
func (o *ObjectIterator) Object() *Object {
return o.objs[o.idx-1]
}
// Err returns the current error or nil. If Next() returns false and Err() is
// nil, then all objects have been seen.
func (o *ObjectIterator) Err() error {
if o.err == io.EOF {
return nil
}
return o.err
}
type objectIteratorOptions struct {
hidden bool
unfinished bool
prefix string
delimiter string
pageSize int
locker sync.Locker
}
// A ListOption alters the default behavor of List.
type ListOption func(*objectIteratorOptions)
// ListHidden will include hidden objects in the output.
func ListHidden() ListOption {
return func(o *objectIteratorOptions) {
o.hidden = true
}
}
// ListUnfinished will list unfinished large file operations instead of
// existing objects.
func ListUnfinished() ListOption {
return func(o *objectIteratorOptions) {
o.unfinished = true
}
}
// ListPrefix will restrict the output to objects whose names begin with
// prefix.
func ListPrefix(pfx string) ListOption {
return func(o *objectIteratorOptions) {
o.prefix = pfx
}
}
// ListDelimiter denotes the path separator. If set, object listings will be
// truncated at this character.
//
// For example, if the bucket contains objects foo/bar, foo/baz, and foo,
// then a delimiter of "/" will cause the listing to return "foo" and "foo/".
// Otherwise, the listing would have returned all object names.
//
// Note that objects returned that end in the delimiter may not be actual
// objects, e.g. you cannot read from (or write to, or delete) an object
// "foo/", both because no actual object exists and because B2 disallows object
// names that end with "/". If you want to ensure that all objects returned
// are actual objects, leave this unset.
func ListDelimiter(delimiter string) ListOption {
return func(o *objectIteratorOptions) {
o.delimiter = delimiter
}
}
// ListPageSize configures the iterator to request the given number of objects
// per network round-trip. The default (and maximum) is 1000 objects, except
// for unfinished large files, which is 100.
func ListPageSize(count int) ListOption {
return func(o *objectIteratorOptions) {
o.pageSize = count
}
}
// ListLocker passes the iterator a lock which will be held during network
// round-trips.
func ListLocker(l sync.Locker) ListOption {
return func(o *objectIteratorOptions) {
o.locker = l
}
}

View File

@@ -42,7 +42,7 @@ import (
const (
APIBase = "https://api.backblazeb2.com"
DefaultUserAgent = "blazer/0.3.1"
DefaultUserAgent = "blazer/0.4.4"
)
type b2err struct {

View File

@@ -3,8 +3,8 @@ package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"sync"
"github.com/kurin/blazer/b2"
@@ -24,12 +24,27 @@ func main() {
fmt.Println(err)
return
}
buckets, err := client.ListBuckets(ctx)
if err != nil {
fmt.Println(err)
return
}
var kill []string
for _, bucket := range buckets {
if strings.HasPrefix(bucket.Name(), fmt.Sprintf("%s-b2-tests-", id)) {
kill = append(kill, bucket.Name())
}
if bucket.Name() == fmt.Sprintf("%s-consistobucket", id) || bucket.Name() == fmt.Sprintf("%s-base-tests", id) {
kill = append(kill, bucket.Name())
}
}
var wg sync.WaitGroup
for _, name := range []string{"consistobucket", "base-tests"} {
for _, name := range kill {
wg.Add(1)
go func(name string) {
defer wg.Done()
if err := killBucket(ctx, client, id, name); err != nil {
fmt.Println("removing", name)
if err := killBucket(ctx, client, name); err != nil {
fmt.Println(err)
}
}(name)
@@ -37,8 +52,8 @@ func main() {
wg.Wait()
}
func killBucket(ctx context.Context, client *b2.Client, id, name string) error {
bucket, err := client.NewBucket(ctx, id+"-"+name, nil)
func killBucket(ctx context.Context, client *b2.Client, name string) error {
bucket, err := client.NewBucket(ctx, name, nil)
if b2.IsNotExist(err) {
return nil
}
@@ -46,18 +61,11 @@ func killBucket(ctx context.Context, client *b2.Client, id, name string) error {
return err
}
defer bucket.Delete(ctx)
cur := &b2.Cursor{}
for {
os, c, err := bucket.ListObjects(ctx, 1000, cur)
if err != nil && err != io.EOF {
return err
iter := bucket.List(ctx, b2.ListHidden())
for iter.Next() {
if err := iter.Object().Delete(ctx); err != nil {
fmt.Println(err)
}
for _, o := range os {
o.Delete(ctx)
}
if err == io.EOF {
return nil
}
cur = c
}
return iter.Err()
}

View File

@@ -2,7 +2,6 @@ package consistent
import (
"context"
"io"
"io/ioutil"
"os"
"strconv"
@@ -66,7 +65,7 @@ func TestOperationLive(t *testing.T) {
t.Fatal(err)
}
if n != 100 {
t.Errorf("result: got %d, want 10", n)
t.Errorf("result: got %d, want 100", n)
}
}
@@ -142,14 +141,15 @@ func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) {
return nil, nil
}
f := func() {
for c := range listObjects(ctx, bucket.ListObjects) {
if c.err != nil {
continue
}
if err := c.o.Delete(ctx); err != nil {
iter := bucket.List(ctx, b2.ListHidden())
for iter.Next() {
if err := iter.Object().Delete(ctx); err != nil {
t.Error(err)
}
}
if err := iter.Err(); err != nil && !b2.IsNotExist(err) {
t.Error(err)
}
if err := bucket.Delete(ctx); err != nil && !b2.IsNotExist(err) {
t.Error(err)
}
@@ -157,29 +157,6 @@ func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) {
return bucket, f
}
func listObjects(ctx context.Context, f func(context.Context, int, *b2.Cursor) ([]*b2.Object, *b2.Cursor, error)) <-chan object {
ch := make(chan object)
go func() {
defer close(ch)
var cur *b2.Cursor
for {
objs, c, err := f(ctx, 100, cur)
if err != nil && err != io.EOF {
ch <- object{err: err}
return
}
for _, o := range objs {
ch <- object{o: o}
}
if err == io.EOF {
return
}
cur = c
}
}()
return ch
}
type object struct {
o *b2.Object
err error

View File

@@ -24,7 +24,7 @@ import (
// A Window efficiently records events that have occurred over a span of time
// extending from some fixed interval ago to now. Events that pass beyond this
// horizon effectively "fall off" the back of the window.
// horizon are discarded.
type Window struct {
mu sync.Mutex
events []interface{}
@@ -81,16 +81,27 @@ func (w *Window) sweep(now time.Time) {
w.last = now
}()
b := w.bucket(now)
p := w.bucket(w.last)
// This compares now and w.last's monotonic clocks.
diff := now.Sub(w.last)
if diff < 0 {
// time went backwards somehow; zero events and return
for i := range w.events {
w.events[i] = nil
}
return
}
last := now.Add(-diff)
if b == p && now.Sub(w.last) <= w.res {
b := w.bucket(now)
p := w.bucket(last)
if b == p && diff <= w.res {
// We're in the same bucket as the previous sweep, so all buckets are
// valid.
return
}
if now.Sub(w.last) > w.res*time.Duration(len(w.events)) {
if diff > w.res*time.Duration(len(w.events)) {
// We've gone longer than this window measures since the last sweep, just
// zero the thing and have done.
for i := range w.events {
@@ -102,10 +113,10 @@ func (w *Window) sweep(now time.Time) {
// Expire all invalid buckets. This means buckets not seen since the
// previous sweep and now, including the current bucket but not including the
// previous bucket.
old := int(w.last.UnixNano()) / int(w.res)
new := int(now.UnixNano()) / int(w.res)
old := int64(last.UnixNano()) / int64(w.res)
new := int64(now.UnixNano()) / int64(w.res)
for i := old + 1; i <= new; i++ {
b := i % len(w.events)
b := int(i) % len(w.events)
w.events[b] = nil
}
}

View File

@@ -73,6 +73,21 @@ func TestWindows(t *testing.T) {
want: 6,
reduce: adder,
},
{ // what happens if time goes backwards?
size: time.Minute,
dur: time.Second,
incs: []epair{
{t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
{t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1},
{t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1},
{t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1},
{t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1},
{t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
},
look: time.Date(2000, 1, 1, 0, 0, 30, 0, time.UTC),
want: 1,
reduce: adder,
},
}
for _, e := range table {